2007-12-09 00:42:33 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
/// \file simple_coder.c
|
|
|
|
/// \brief Wrapper for simple filters
|
|
|
|
///
|
|
|
|
/// Simple filters don't change the size of the data i.e. number of bytes
|
|
|
|
/// in equals the number of bytes out.
|
|
|
|
//
|
2009-04-13 11:27:40 +03:00
|
|
|
// Author: Lasse Collin
|
2007-12-09 00:42:33 +02:00
|
|
|
//
|
2009-04-13 11:27:40 +03:00
|
|
|
// This file has been put into the public domain.
|
|
|
|
// You can do whatever you want with this file.
|
2007-12-09 00:42:33 +02:00
|
|
|
//
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
#include "simple_private.h"
|
|
|
|
|
|
|
|
|
2008-06-18 18:02:10 +03:00
|
|
|
/// Copied or encodes/decodes more data to out[].
|
2007-12-09 00:42:33 +02:00
|
|
|
static lzma_ret
|
2016-11-21 20:24:50 +02:00
|
|
|
copy_or_code(lzma_simple_coder *coder, const lzma_allocator *allocator,
|
2007-12-09 00:42:33 +02:00
|
|
|
const uint8_t *restrict in, size_t *restrict in_pos,
|
|
|
|
size_t in_size, uint8_t *restrict out,
|
|
|
|
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
|
|
|
{
|
|
|
|
assert(!coder->end_was_reached);
|
|
|
|
|
|
|
|
if (coder->next.code == NULL) {
|
2008-08-28 22:53:15 +03:00
|
|
|
lzma_bufcpy(in, in_pos, in_size, out, out_pos, out_size);
|
2007-12-09 00:42:33 +02:00
|
|
|
|
|
|
|
// Check if end of stream was reached.
|
2008-06-18 18:02:10 +03:00
|
|
|
if (coder->is_encoder && action == LZMA_FINISH
|
|
|
|
&& *in_pos == in_size)
|
|
|
|
coder->end_was_reached = true;
|
2007-12-09 00:42:33 +02:00
|
|
|
|
|
|
|
} else {
|
|
|
|
// Call the next coder in the chain to provide us some data.
|
|
|
|
const lzma_ret ret = coder->next.code(
|
|
|
|
coder->next.coder, allocator,
|
|
|
|
in, in_pos, in_size,
|
|
|
|
out, out_pos, out_size, action);
|
|
|
|
|
|
|
|
if (ret == LZMA_STREAM_END) {
|
|
|
|
assert(!coder->is_encoder
|
|
|
|
|| action == LZMA_FINISH);
|
|
|
|
coder->end_was_reached = true;
|
|
|
|
|
|
|
|
} else if (ret != LZMA_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return LZMA_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static size_t
|
2016-11-21 20:24:50 +02:00
|
|
|
call_filter(lzma_simple_coder *coder, uint8_t *buffer, size_t size)
|
2007-12-09 00:42:33 +02:00
|
|
|
{
|
|
|
|
const size_t filtered = coder->filter(coder->simple,
|
|
|
|
coder->now_pos, coder->is_encoder,
|
|
|
|
buffer, size);
|
|
|
|
coder->now_pos += filtered;
|
|
|
|
return filtered;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static lzma_ret
|
2016-11-21 20:24:50 +02:00
|
|
|
simple_code(void *coder_ptr, const lzma_allocator *allocator,
|
2007-12-09 00:42:33 +02:00
|
|
|
const uint8_t *restrict in, size_t *restrict in_pos,
|
|
|
|
size_t in_size, uint8_t *restrict out,
|
|
|
|
size_t *restrict out_pos, size_t out_size, lzma_action action)
|
|
|
|
{
|
2016-11-21 20:24:50 +02:00
|
|
|
lzma_simple_coder *coder = coder_ptr;
|
|
|
|
|
2008-01-26 00:25:34 +02:00
|
|
|
// TODO: Add partial support for LZMA_SYNC_FLUSH. We can support it
|
|
|
|
// in cases when the filter is able to filter everything. With most
|
|
|
|
// simple filters it can be done at offset that is a multiple of 2,
|
|
|
|
// 4, or 16. With x86 filter, it needs good luck, and thus cannot
|
|
|
|
// be made to work predictably.
|
|
|
|
if (action == LZMA_SYNC_FLUSH)
|
2008-09-13 12:10:43 +03:00
|
|
|
return LZMA_OPTIONS_ERROR;
|
2008-01-26 00:25:34 +02:00
|
|
|
|
2007-12-09 00:42:33 +02:00
|
|
|
// Flush already filtered data from coder->buffer[] to out[].
|
|
|
|
if (coder->pos < coder->filtered) {
|
2008-08-28 22:53:15 +03:00
|
|
|
lzma_bufcpy(coder->buffer, &coder->pos, coder->filtered,
|
2007-12-09 00:42:33 +02:00
|
|
|
out, out_pos, out_size);
|
|
|
|
|
|
|
|
// If we couldn't flush all the filtered data, return to
|
2010-02-12 13:16:15 +02:00
|
|
|
// application immediately.
|
2007-12-09 00:42:33 +02:00
|
|
|
if (coder->pos < coder->filtered)
|
|
|
|
return LZMA_OK;
|
|
|
|
|
|
|
|
if (coder->end_was_reached) {
|
|
|
|
assert(coder->filtered == coder->size);
|
|
|
|
return LZMA_STREAM_END;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we get here, there is no filtered data left in the buffer.
|
|
|
|
coder->filtered = 0;
|
|
|
|
|
|
|
|
assert(!coder->end_was_reached);
|
|
|
|
|
|
|
|
// If there is more output space left than there is unfiltered data
|
|
|
|
// in coder->buffer[], flush coder->buffer[] to out[], and copy/code
|
|
|
|
// more data to out[] hopefully filling it completely. Then filter
|
|
|
|
// the data in out[]. This step is where most of the data gets
|
|
|
|
// filtered if the buffer sizes used by the application are reasonable.
|
|
|
|
const size_t out_avail = out_size - *out_pos;
|
|
|
|
const size_t buf_avail = coder->size - coder->pos;
|
2012-05-28 20:42:11 +03:00
|
|
|
if (out_avail > buf_avail || buf_avail == 0) {
|
2007-12-09 00:42:33 +02:00
|
|
|
// Store the old position so that we know from which byte
|
|
|
|
// to start filtering.
|
|
|
|
const size_t out_start = *out_pos;
|
|
|
|
|
|
|
|
// Flush data from coder->buffer[] to out[], but don't reset
|
|
|
|
// coder->pos and coder->size yet. This way the coder can be
|
|
|
|
// restarted if the next filter in the chain returns e.g.
|
|
|
|
// LZMA_MEM_ERROR.
|
liblzma: Avoid memcpy(NULL, foo, 0) because it is undefined behavior.
I should have always known this but I didn't. Here is an example
as a reminder to myself:
int mycopy(void *dest, void *src, size_t n)
{
memcpy(dest, src, n);
return dest == NULL;
}
In the example, a compiler may assume that dest != NULL because
passing NULL to memcpy() would be undefined behavior. Testing
with GCC 8.2.1, mycopy(NULL, NULL, 0) returns 1 with -O0 and -O1.
With -O2 the return value is 0 because the compiler infers that
dest cannot be NULL because it was already used with memcpy()
and thus the test for NULL gets optimized out.
In liblzma, if a null-pointer was passed to memcpy(), there were
no checks for NULL *after* the memcpy() call, so I cautiously
suspect that it shouldn't have caused bad behavior in practice,
but it's hard to be sure, and the problematic cases had to be
fixed anyway.
Thanks to Jeffrey Walton.
2019-05-13 20:05:17 +03:00
|
|
|
//
|
|
|
|
// Do the memcpy() conditionally because out can be NULL
|
|
|
|
// (in which case buf_avail is always 0). Calling memcpy()
|
|
|
|
// with a null-pointer is undefined even if the third
|
|
|
|
// argument is 0.
|
|
|
|
if (buf_avail > 0)
|
|
|
|
memcpy(out + *out_pos, coder->buffer + coder->pos,
|
|
|
|
buf_avail);
|
|
|
|
|
2007-12-09 00:42:33 +02:00
|
|
|
*out_pos += buf_avail;
|
|
|
|
|
|
|
|
// Copy/Encode/Decode more data to out[].
|
|
|
|
{
|
|
|
|
const lzma_ret ret = copy_or_code(coder, allocator,
|
|
|
|
in, in_pos, in_size,
|
|
|
|
out, out_pos, out_size, action);
|
|
|
|
assert(ret != LZMA_STREAM_END);
|
|
|
|
if (ret != LZMA_OK)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter out[].
|
|
|
|
const size_t size = *out_pos - out_start;
|
|
|
|
const size_t filtered = call_filter(
|
|
|
|
coder, out + out_start, size);
|
|
|
|
|
|
|
|
const size_t unfiltered = size - filtered;
|
|
|
|
assert(unfiltered <= coder->allocated / 2);
|
|
|
|
|
|
|
|
// Now we can update coder->pos and coder->size, because
|
|
|
|
// the next coder in the chain (if any) was successful.
|
|
|
|
coder->pos = 0;
|
|
|
|
coder->size = unfiltered;
|
|
|
|
|
|
|
|
if (coder->end_was_reached) {
|
|
|
|
// The last byte has been copied to out[] already.
|
|
|
|
// They are left as is.
|
|
|
|
coder->size = 0;
|
|
|
|
|
|
|
|
} else if (unfiltered > 0) {
|
|
|
|
// There is unfiltered data left in out[]. Copy it to
|
|
|
|
// coder->buffer[] and rewind *out_pos appropriately.
|
|
|
|
*out_pos -= unfiltered;
|
|
|
|
memcpy(coder->buffer, out + *out_pos, unfiltered);
|
|
|
|
}
|
|
|
|
} else if (coder->pos > 0) {
|
|
|
|
memmove(coder->buffer, coder->buffer + coder->pos, buf_avail);
|
|
|
|
coder->size -= coder->pos;
|
|
|
|
coder->pos = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(coder->pos == 0);
|
|
|
|
|
|
|
|
// If coder->buffer[] isn't empty, try to fill it by copying/decoding
|
|
|
|
// more data. Then filter coder->buffer[] and copy the successfully
|
|
|
|
// filtered data to out[]. It is probable, that some filtered and
|
|
|
|
// unfiltered data will be left to coder->buffer[].
|
|
|
|
if (coder->size > 0) {
|
|
|
|
{
|
|
|
|
const lzma_ret ret = copy_or_code(coder, allocator,
|
|
|
|
in, in_pos, in_size,
|
|
|
|
coder->buffer, &coder->size,
|
|
|
|
coder->allocated, action);
|
|
|
|
assert(ret != LZMA_STREAM_END);
|
|
|
|
if (ret != LZMA_OK)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
coder->filtered = call_filter(
|
|
|
|
coder, coder->buffer, coder->size);
|
|
|
|
|
|
|
|
// Everything is considered to be filtered if coder->buffer[]
|
|
|
|
// contains the last bytes of the data.
|
|
|
|
if (coder->end_was_reached)
|
|
|
|
coder->filtered = coder->size;
|
|
|
|
|
|
|
|
// Flush as much as possible.
|
2008-08-28 22:53:15 +03:00
|
|
|
lzma_bufcpy(coder->buffer, &coder->pos, coder->filtered,
|
2007-12-09 00:42:33 +02:00
|
|
|
out, out_pos, out_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we got everything done.
|
|
|
|
if (coder->end_was_reached && coder->pos == coder->size)
|
|
|
|
return LZMA_STREAM_END;
|
|
|
|
|
|
|
|
return LZMA_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2016-11-21 20:24:50 +02:00
|
|
|
simple_coder_end(void *coder_ptr, const lzma_allocator *allocator)
|
2007-12-09 00:42:33 +02:00
|
|
|
{
|
2016-11-21 20:24:50 +02:00
|
|
|
lzma_simple_coder *coder = coder_ptr;
|
2008-08-28 22:53:15 +03:00
|
|
|
lzma_next_end(&coder->next, allocator);
|
2007-12-09 00:42:33 +02:00
|
|
|
lzma_free(coder->simple, allocator);
|
|
|
|
lzma_free(coder, allocator);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-11-14 18:59:19 +02:00
|
|
|
static lzma_ret
|
2016-11-21 20:24:50 +02:00
|
|
|
simple_coder_update(void *coder_ptr, const lzma_allocator *allocator,
|
2011-05-17 11:54:38 +03:00
|
|
|
const lzma_filter *filters_null lzma_attribute((__unused__)),
|
2009-11-14 18:59:19 +02:00
|
|
|
const lzma_filter *reversed_filters)
|
|
|
|
{
|
2016-11-21 20:24:50 +02:00
|
|
|
lzma_simple_coder *coder = coder_ptr;
|
|
|
|
|
2009-11-14 18:59:19 +02:00
|
|
|
// No update support, just call the next filter in the chain.
|
|
|
|
return lzma_next_filter_update(
|
|
|
|
&coder->next, allocator, reversed_filters + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-12-09 00:42:33 +02:00
|
|
|
extern lzma_ret
|
2012-07-17 18:19:59 +03:00
|
|
|
lzma_simple_coder_init(lzma_next_coder *next, const lzma_allocator *allocator,
|
2007-12-09 00:42:33 +02:00
|
|
|
const lzma_filter_info *filters,
|
2016-11-21 20:24:50 +02:00
|
|
|
size_t (*filter)(void *simple, uint32_t now_pos,
|
2007-12-09 00:42:33 +02:00
|
|
|
bool is_encoder, uint8_t *buffer, size_t size),
|
2009-07-10 11:39:38 +03:00
|
|
|
size_t simple_size, size_t unfiltered_max,
|
|
|
|
uint32_t alignment, bool is_encoder)
|
2007-12-09 00:42:33 +02:00
|
|
|
{
|
2016-11-21 20:24:50 +02:00
|
|
|
// Allocate memory for the lzma_simple_coder structure if needed.
|
|
|
|
lzma_simple_coder *coder = next->coder;
|
|
|
|
if (coder == NULL) {
|
2007-12-09 00:42:33 +02:00
|
|
|
// Here we allocate space also for the temporary buffer. We
|
|
|
|
// need twice the size of unfiltered_max, because then it
|
|
|
|
// is always possible to filter at least unfiltered_max bytes
|
|
|
|
// more data in coder->buffer[] if it can be filled completely.
|
2016-11-21 20:24:50 +02:00
|
|
|
coder = lzma_alloc(sizeof(lzma_simple_coder)
|
2007-12-09 00:42:33 +02:00
|
|
|
+ 2 * unfiltered_max, allocator);
|
2016-11-21 20:24:50 +02:00
|
|
|
if (coder == NULL)
|
2007-12-09 00:42:33 +02:00
|
|
|
return LZMA_MEM_ERROR;
|
|
|
|
|
2016-11-21 20:24:50 +02:00
|
|
|
next->coder = coder;
|
2007-12-09 00:42:33 +02:00
|
|
|
next->code = &simple_code;
|
|
|
|
next->end = &simple_coder_end;
|
2009-11-14 18:59:19 +02:00
|
|
|
next->update = &simple_coder_update;
|
2007-12-09 00:42:33 +02:00
|
|
|
|
2016-11-21 20:24:50 +02:00
|
|
|
coder->next = LZMA_NEXT_CODER_INIT;
|
|
|
|
coder->filter = filter;
|
|
|
|
coder->allocated = 2 * unfiltered_max;
|
2007-12-09 00:42:33 +02:00
|
|
|
|
|
|
|
// Allocate memory for filter-specific data structure.
|
|
|
|
if (simple_size > 0) {
|
2016-11-21 20:24:50 +02:00
|
|
|
coder->simple = lzma_alloc(simple_size, allocator);
|
|
|
|
if (coder->simple == NULL)
|
2007-12-09 00:42:33 +02:00
|
|
|
return LZMA_MEM_ERROR;
|
|
|
|
} else {
|
2016-11-21 20:24:50 +02:00
|
|
|
coder->simple = NULL;
|
2007-12-09 00:42:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (filters[0].options != NULL) {
|
2008-12-31 16:29:39 +02:00
|
|
|
const lzma_options_bcj *simple = filters[0].options;
|
2016-11-21 20:24:50 +02:00
|
|
|
coder->now_pos = simple->start_offset;
|
|
|
|
if (coder->now_pos & (alignment - 1))
|
2009-07-10 11:39:38 +03:00
|
|
|
return LZMA_OPTIONS_ERROR;
|
2007-12-09 00:42:33 +02:00
|
|
|
} else {
|
2016-11-21 20:24:50 +02:00
|
|
|
coder->now_pos = 0;
|
2007-12-09 00:42:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reset variables.
|
2016-11-21 20:24:50 +02:00
|
|
|
coder->is_encoder = is_encoder;
|
|
|
|
coder->end_was_reached = false;
|
|
|
|
coder->pos = 0;
|
|
|
|
coder->filtered = 0;
|
|
|
|
coder->size = 0;
|
|
|
|
|
|
|
|
return lzma_next_filter_init(&coder->next, allocator, filters + 1);
|
2007-12-09 00:42:33 +02:00
|
|
|
}
|