|
|
|
@ -52,7 +52,7 @@ |
|
|
|
|
#include "deflate.h" |
|
|
|
|
|
|
|
|
|
const char deflate_copyright[] = |
|
|
|
|
" deflate 1.2.12 Copyright 1995-2022 Jean-loup Gailly and Mark Adler "; |
|
|
|
|
" deflate 1.2.13 Copyright 1995-2022 Jean-loup Gailly and Mark Adler "; |
|
|
|
|
/*
|
|
|
|
|
If you use the zlib library in a product, an acknowledgment is welcome |
|
|
|
|
in the documentation of your product. If for some reason you cannot |
|
|
|
@ -87,13 +87,7 @@ local void lm_init OF((deflate_state *s)); |
|
|
|
|
local void putShortMSB OF((deflate_state *s, uInt b)); |
|
|
|
|
local void flush_pending OF((z_streamp strm)); |
|
|
|
|
local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); |
|
|
|
|
#ifdef ASMV |
|
|
|
|
# pragma message("Assembler code may have bugs -- use at your own risk") |
|
|
|
|
void match_init OF((void)); /* asm code initialization */ |
|
|
|
|
uInt longest_match OF((deflate_state *s, IPos cur_match)); |
|
|
|
|
#else |
|
|
|
|
local uInt longest_match OF((deflate_state *s, IPos cur_match)); |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
#ifdef ZLIB_DEBUG |
|
|
|
|
local void check_match OF((deflate_state *s, IPos start, IPos match, |
|
|
|
@ -285,6 +279,8 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, |
|
|
|
|
|
|
|
|
|
if (windowBits < 0) { /* suppress zlib wrapper */ |
|
|
|
|
wrap = 0; |
|
|
|
|
if (windowBits < -15) |
|
|
|
|
return Z_STREAM_ERROR; |
|
|
|
|
windowBits = -windowBits; |
|
|
|
|
} |
|
|
|
|
#ifdef GZIP |
|
|
|
@ -674,36 +670,50 @@ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* =========================================================================
|
|
|
|
|
* For the default windowBits of 15 and memLevel of 8, this function returns |
|
|
|
|
* a close to exact, as well as small, upper bound on the compressed size. |
|
|
|
|
* They are coded as constants here for a reason--if the #define's are |
|
|
|
|
* changed, then this function needs to be changed as well. The return |
|
|
|
|
* value for 15 and 8 only works for those exact settings. |
|
|
|
|
* For the default windowBits of 15 and memLevel of 8, this function returns a |
|
|
|
|
* close to exact, as well as small, upper bound on the compressed size. This |
|
|
|
|
* is an expansion of ~0.03%, plus a small constant. |
|
|
|
|
* |
|
|
|
|
* For any setting other than those defaults for windowBits and memLevel, one |
|
|
|
|
* of two worst case bounds is returned. This is at most an expansion of ~4% or |
|
|
|
|
* ~13%, plus a small constant. |
|
|
|
|
* |
|
|
|
|
* For any setting other than those defaults for windowBits and memLevel, |
|
|
|
|
* the value returned is a conservative worst case for the maximum expansion |
|
|
|
|
* resulting from using fixed blocks instead of stored blocks, which deflate |
|
|
|
|
* can emit on compressed data for some combinations of the parameters. |
|
|
|
|
* Both the 0.03% and 4% derive from the overhead of stored blocks. The first |
|
|
|
|
* one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second |
|
|
|
|
* is for stored blocks of 127 bytes (the worst case memLevel == 1). The |
|
|
|
|
* expansion results from five bytes of header for each stored block. |
|
|
|
|
* |
|
|
|
|
* This function could be more sophisticated to provide closer upper bounds for |
|
|
|
|
* every combination of windowBits and memLevel. But even the conservative |
|
|
|
|
* upper bound of about 14% expansion does not seem onerous for output buffer |
|
|
|
|
* allocation. |
|
|
|
|
* The larger expansion of 13% results from a window size less than or equal to |
|
|
|
|
* the symbols buffer size (windowBits <= memLevel + 7). In that case some of |
|
|
|
|
* the data being compressed may have slid out of the sliding window, impeding |
|
|
|
|
* a stored block from being emitted. Then the only choice is a fixed or |
|
|
|
|
* dynamic block, where a fixed block limits the maximum expansion to 9 bits |
|
|
|
|
* per 8-bit byte, plus 10 bits for every block. The smallest block size for |
|
|
|
|
* which this can occur is 255 (memLevel == 2). |
|
|
|
|
* |
|
|
|
|
* Shifts are used to approximate divisions, for speed. |
|
|
|
|
*/ |
|
|
|
|
uLong ZEXPORT deflateBound(strm, sourceLen) |
|
|
|
|
z_streamp strm; |
|
|
|
|
uLong sourceLen; |
|
|
|
|
{ |
|
|
|
|
deflate_state *s; |
|
|
|
|
uLong complen, wraplen; |
|
|
|
|
uLong fixedlen, storelen, wraplen; |
|
|
|
|
|
|
|
|
|
/* upper bound for fixed blocks with 9-bit literals and length 255
|
|
|
|
|
(memLevel == 2, which is the lowest that may not use stored blocks) -- |
|
|
|
|
~13% overhead plus a small constant */ |
|
|
|
|
fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) + |
|
|
|
|
(sourceLen >> 9) + 4; |
|
|
|
|
|
|
|
|
|
/* conservative upper bound for compressed data */ |
|
|
|
|
complen = sourceLen + |
|
|
|
|
((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; |
|
|
|
|
/* upper bound for stored blocks with length 127 (memLevel == 1) --
|
|
|
|
|
~4% overhead plus a small constant */ |
|
|
|
|
storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) + |
|
|
|
|
(sourceLen >> 11) + 7; |
|
|
|
|
|
|
|
|
|
/* if can't get parameters, return conservative bound plus zlib wrapper */ |
|
|
|
|
/* if can't get parameters, return larger bound plus a zlib wrapper */ |
|
|
|
|
if (deflateStateCheck(strm)) |
|
|
|
|
return complen + 6; |
|
|
|
|
return (fixedlen > storelen ? fixedlen : storelen) + 6; |
|
|
|
|
|
|
|
|
|
/* compute wrapper length */ |
|
|
|
|
s = strm->state; |
|
|
|
@ -740,11 +750,12 @@ uLong ZEXPORT deflateBound(strm, sourceLen) |
|
|
|
|
wraplen = 6; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* if not default parameters, return conservative bound */ |
|
|
|
|
/* if not default parameters, return one of the conservative bounds */ |
|
|
|
|
if (s->w_bits != 15 || s->hash_bits != 8 + 7) |
|
|
|
|
return complen + wraplen; |
|
|
|
|
return (s->w_bits <= s->hash_bits ? fixedlen : storelen) + wraplen; |
|
|
|
|
|
|
|
|
|
/* default settings: return tight bound for that case */ |
|
|
|
|
/* default settings: return tight bound for that case -- ~0.03% overhead
|
|
|
|
|
plus a small constant */ |
|
|
|
|
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + |
|
|
|
|
(sourceLen >> 25) + 13 - 6 + wraplen; |
|
|
|
|
} |
|
|
|
@ -1252,11 +1263,6 @@ local void lm_init (s) |
|
|
|
|
s->match_length = s->prev_length = MIN_MATCH-1; |
|
|
|
|
s->match_available = 0; |
|
|
|
|
s->ins_h = 0; |
|
|
|
|
#ifndef FASTEST |
|
|
|
|
#ifdef ASMV |
|
|
|
|
match_init(); /* initialize the asm code */ |
|
|
|
|
#endif |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#ifndef FASTEST |
|
|
|
@ -1269,10 +1275,6 @@ local void lm_init (s) |
|
|
|
|
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 |
|
|
|
|
* OUT assertion: the match length is not greater than s->lookahead. |
|
|
|
|
*/ |
|
|
|
|
#ifndef ASMV |
|
|
|
|
/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
|
|
|
|
|
* match.S. The code will be functionally equivalent. |
|
|
|
|
*/ |
|
|
|
|
local uInt longest_match(s, cur_match) |
|
|
|
|
deflate_state *s; |
|
|
|
|
IPos cur_match; /* current match */ |
|
|
|
@ -1318,7 +1320,8 @@ local uInt longest_match(s, cur_match) |
|
|
|
|
*/ |
|
|
|
|
if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead; |
|
|
|
|
|
|
|
|
|
Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); |
|
|
|
|
Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, |
|
|
|
|
"need lookahead"); |
|
|
|
|
|
|
|
|
|
do { |
|
|
|
|
Assert(cur_match < s->strstart, "no future"); |
|
|
|
@ -1342,7 +1345,7 @@ local uInt longest_match(s, cur_match) |
|
|
|
|
/* It is not necessary to compare scan[2] and match[2] since they are
|
|
|
|
|
* always equal when the other bytes match, given that the hash keys |
|
|
|
|
* are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at |
|
|
|
|
* strstart+3, +5, ... up to strstart+257. We check for insufficient |
|
|
|
|
* strstart + 3, + 5, up to strstart + 257. We check for insufficient |
|
|
|
|
* lookahead only every 4th comparison; the 128th check will be made |
|
|
|
|
* at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is |
|
|
|
|
* necessary to put more guard bytes at the end of the window, or |
|
|
|
@ -1359,7 +1362,8 @@ local uInt longest_match(s, cur_match) |
|
|
|
|
/* The funny "do {}" generates better code on most compilers */ |
|
|
|
|
|
|
|
|
|
/* Here, scan <= window + strstart + 257 */ |
|
|
|
|
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); |
|
|
|
|
Assert(scan <= s->window + (unsigned)(s->window_size - 1), |
|
|
|
|
"wild scan"); |
|
|
|
|
if (*scan == *match) scan++; |
|
|
|
|
|
|
|
|
|
len = (MAX_MATCH - 1) - (int)(strend - scan); |
|
|
|
@ -1391,7 +1395,8 @@ local uInt longest_match(s, cur_match) |
|
|
|
|
*++scan == *++match && *++scan == *++match && |
|
|
|
|
scan < strend); |
|
|
|
|
|
|
|
|
|
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); |
|
|
|
|
Assert(scan <= s->window + (unsigned)(s->window_size - 1), |
|
|
|
|
"wild scan"); |
|
|
|
|
|
|
|
|
|
len = MAX_MATCH - (int)(strend - scan); |
|
|
|
|
scan = strend - MAX_MATCH; |
|
|
|
@ -1415,7 +1420,6 @@ local uInt longest_match(s, cur_match) |
|
|
|
|
if ((uInt)best_len <= s->lookahead) return (uInt)best_len; |
|
|
|
|
return s->lookahead; |
|
|
|
|
} |
|
|
|
|
#endif /* ASMV */ |
|
|
|
|
|
|
|
|
|
#else /* FASTEST */ |
|
|
|
|
|
|
|
|
@ -1436,7 +1440,8 @@ local uInt longest_match(s, cur_match) |
|
|
|
|
*/ |
|
|
|
|
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); |
|
|
|
|
|
|
|
|
|
Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); |
|
|
|
|
Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, |
|
|
|
|
"need lookahead"); |
|
|
|
|
|
|
|
|
|
Assert(cur_match < s->strstart, "no future"); |
|
|
|
|
|
|
|
|
@ -1680,7 +1685,7 @@ local void fill_window(s) |
|
|
|
|
* |
|
|
|
|
* deflate_stored() is written to minimize the number of times an input byte is |
|
|
|
|
* copied. It is most efficient with large input and output buffers, which |
|
|
|
|
* maximizes the opportunites to have a single copy from next_in to next_out. |
|
|
|
|
* maximizes the opportunities to have a single copy from next_in to next_out. |
|
|
|
|
*/ |
|
|
|
|
local block_state deflate_stored(s, flush) |
|
|
|
|
deflate_state *s; |
|
|
|
@ -2140,7 +2145,8 @@ local block_state deflate_rle(s, flush) |
|
|
|
|
if (s->match_length > s->lookahead) |
|
|
|
|
s->match_length = s->lookahead; |
|
|
|
|
} |
|
|
|
|
Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); |
|
|
|
|
Assert(scan <= s->window + (uInt)(s->window_size - 1), |
|
|
|
|
"wild scan"); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Emit match if have run of MIN_MATCH or longer, else emit literal */ |
|
|
|
|