1 /* 2 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* deflate.c -- compress data using the deflation algorithm 7 * Copyright (C) 1995-2005 Jean-loup Gailly. 8 * For conditions of distribution and use, see copyright notice in zlib.h 9 */ 10 11 #pragma ident "%Z%%M% %I% %E% SMI" 12 13 /* 14 * ALGORITHM 15 * 16 * The "deflation" process depends on being able to identify portions 17 * of the input text which are identical to earlier input (within a 18 * sliding window trailing behind the input currently being processed). 19 * 20 * The most straightforward technique turns out to be the fastest for 21 * most input files: try all possible matches and select the longest. 22 * The key feature of this algorithm is that insertions into the string 23 * dictionary are very simple and thus fast, and deletions are avoided 24 * completely. Insertions are performed at each input character, whereas 25 * string matches are performed only when the previous match ends. So it 26 * is preferable to spend more time in matches to allow very fast string 27 * insertions and avoid deletions. The matching algorithm for small 28 * strings is inspired from that of Rabin & Karp. A brute force approach 29 * is used to find longer strings when a small match has been found. 30 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze 31 * (by Leonid Broukhis). 32 * A previous version of this file used a more sophisticated algorithm 33 * (by Fiala and Greene) which is guaranteed to run in linear amortized 34 * time, but has a larger average cost, uses more memory and is patented. 35 * However the F&G algorithm may be faster for some highly redundant 36 * files if the parameter max_chain_length (described below) is too large. 37 * 38 * ACKNOWLEDGEMENTS 39 * 40 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and 41 * I found it in 'freeze' written by Leonid Broukhis. 42 * Thanks to many people for bug reports and testing. 43 * 44 * REFERENCES 45 * 46 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". 47 * Available in http://www.ietf.org/rfc/rfc1951.txt 48 * 49 * A description of the Rabin and Karp algorithm is given in the book 50 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. 51 * 52 * Fiala,E.R., and Greene,D.H. 53 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 54 * 55 */ 56 57 #include "deflate.h" 58 59 static const char deflate_copyright[] = 60 " deflate 1.2.3 Copyright 1995-2005 Jean-loup Gailly "; 61 /* 62 If you use the zlib library in a product, an acknowledgment is welcome 63 in the documentation of your product. If for some reason you cannot 64 include such an acknowledgment, I would appreciate that you keep this 65 copyright string in the executable of your product. 66 */ 67 68 /* =========================================================================== 69 * Function prototypes. 70 */ 71 typedef enum { 72 need_more, /* block not completed, need more input or more output */ 73 block_done, /* block flush performed */ 74 finish_started, /* finish started, need only more output at next deflate */ 75 finish_done /* finish done, accept no more input or output */ 76 } block_state; 77 78 typedef block_state (*compress_func) OF((deflate_state *s, int flush)); 79 /* Compression function. Returns the block state after the call. */ 80 81 local void fill_window OF((deflate_state *s)); 82 local block_state deflate_stored OF((deflate_state *s, int flush)); 83 local block_state deflate_fast OF((deflate_state *s, int flush)); 84 #ifndef FASTEST 85 local block_state deflate_slow OF((deflate_state *s, int flush)); 86 #endif 87 local void lm_init OF((deflate_state *s)); 88 local void putShortMSB OF((deflate_state *s, uInt b)); 89 local void flush_pending OF((z_streamp strm)); 90 local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); 91 #ifndef FASTEST 92 #ifdef ASMV 93 void match_init OF((void)); /* asm code initialization */ 94 uInt longest_match OF((deflate_state *s, IPos cur_match)); 95 #else 96 local uInt longest_match OF((deflate_state *s, IPos cur_match)); 97 #endif 98 #endif 99 local uInt longest_match_fast OF((deflate_state *s, IPos cur_match)); 100 101 #ifdef DEBUG 102 local void check_match OF((deflate_state *s, IPos start, IPos match, 103 int length)); 104 #endif 105 106 /* =========================================================================== 107 * Local data 108 */ 109 110 #define NIL 0 111 /* Tail of hash chains */ 112 113 #ifndef TOO_FAR 114 # define TOO_FAR 4096 115 #endif 116 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ 117 118 #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) 119 /* Minimum amount of lookahead, except at the end of the input file. 120 * See deflate.c for comments about the MIN_MATCH+1. 121 */ 122 123 /* Values for max_lazy_match, good_match and max_chain_length, depending on 124 * the desired pack level (0..9). The values given below have been tuned to 125 * exclude worst case performance for pathological files. Better values may be 126 * found for specific files. 127 */ 128 typedef struct config_s { 129 ush good_length; /* reduce lazy search above this match length */ 130 ush max_lazy; /* do not perform lazy search above this match length */ 131 ush nice_length; /* quit search above this match length */ 132 ush max_chain; 133 compress_func func; 134 } config; 135 136 #ifdef FASTEST 137 local const config configuration_table[2] = { 138 /* good lazy nice chain */ 139 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 140 /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ 141 #else 142 local const config configuration_table[10] = { 143 /* good lazy nice chain */ 144 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 145 /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ 146 /* 2 */ {4, 5, 16, 8, deflate_fast}, 147 /* 3 */ {4, 6, 32, 32, deflate_fast}, 148 149 /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ 150 /* 5 */ {8, 16, 32, 32, deflate_slow}, 151 /* 6 */ {8, 16, 128, 128, deflate_slow}, 152 /* 7 */ {8, 32, 128, 256, deflate_slow}, 153 /* 8 */ {32, 128, 258, 1024, deflate_slow}, 154 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ 155 #endif 156 157 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 158 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different 159 * meaning. 160 */ 161 162 #define EQUAL 0 163 /* result of memcmp for equal strings */ 164 165 #ifndef NO_DUMMY_DECL 166 struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ 167 #endif 168 169 /* =========================================================================== 170 * Update a hash value with the given input byte 171 * IN assertion: all calls to to UPDATE_HASH are made with consecutive 172 * input characters, so that a running hash key can be computed from the 173 * previous key instead of complete recalculation each time. 174 */ 175 #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) 176 177 178 /* =========================================================================== 179 * Insert string str in the dictionary and set match_head to the previous head 180 * of the hash chain (the most recent string with same hash key). Return 181 * the previous length of the hash chain. 182 * If this file is compiled with -DFASTEST, the compression level is forced 183 * to 1, and no hash chains are maintained. 184 * IN assertion: all calls to to INSERT_STRING are made with consecutive 185 * input characters and the first MIN_MATCH bytes of str are valid 186 * (except for the last MIN_MATCH-1 bytes of the input file). 187 */ 188 #ifdef FASTEST 189 #define INSERT_STRING(s, str, match_head) \ 190 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 191 match_head = s->head[s->ins_h], \ 192 s->head[s->ins_h] = (Pos)(str)) 193 #else 194 #define INSERT_STRING(s, str, match_head) \ 195 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 196 match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ 197 s->head[s->ins_h] = (Pos)(str)) 198 #endif 199 200 /* =========================================================================== 201 * Initialize the hash table (avoiding 64K overflow for 16 bit systems). 202 * prev[] will be initialized on the fly. 203 */ 204 #define CLEAR_HASH(s) \ 205 s->head[s->hash_size-1] = NIL; \ 206 (void) zmemzero((Bytef *)s->head, \ 207 (unsigned)(s->hash_size-1)*sizeof(*s->head)); 208 209 /* ========================================================================= */ 210 int ZEXPORT deflateInit_(strm, level, version, stream_size) 211 z_streamp strm; 212 int level; 213 const char *version; 214 int stream_size; 215 { 216 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, 217 Z_DEFAULT_STRATEGY, version, stream_size); 218 /* To do: ignore strm->next_in if we use it as window */ 219 } 220 221 /* ========================================================================= */ 222 int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, 223 version, stream_size) 224 z_streamp strm; 225 int level; 226 int method; 227 int windowBits; 228 int memLevel; 229 int strategy; 230 const char *version; 231 int stream_size; 232 { 233 deflate_state *s; 234 int wrap = 1; 235 static const char my_version[] = ZLIB_VERSION; 236 237 ushf *overlay; 238 /* We overlay pending_buf and d_buf+l_buf. This works since the average 239 * output size for (length,distance) codes is <= 24 bits. 240 */ 241 242 if (version == Z_NULL || version[0] != my_version[0] || 243 stream_size != sizeof(z_stream)) { 244 return Z_VERSION_ERROR; 245 } 246 if (strm == Z_NULL) return Z_STREAM_ERROR; 247 248 strm->msg = Z_NULL; 249 if (strm->zalloc == (alloc_func)0) { 250 strm->zalloc = zcalloc; 251 strm->opaque = (voidpf)0; 252 } 253 if (strm->zfree == (free_func)0) strm->zfree = zcfree; 254 255 #ifdef FASTEST 256 if (level != 0) level = 1; 257 #else 258 if (level == Z_DEFAULT_COMPRESSION) level = 6; 259 #endif 260 261 if (windowBits < 0) { /* suppress zlib wrapper */ 262 wrap = 0; 263 windowBits = -windowBits; 264 } 265 #ifdef GZIP 266 else if (windowBits > 15) { 267 wrap = 2; /* write gzip wrapper instead */ 268 windowBits -= 16; 269 } 270 #endif 271 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || 272 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || 273 strategy < 0 || strategy > Z_FIXED) { 274 return Z_STREAM_ERROR; 275 } 276 if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ 277 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); 278 if (s == Z_NULL) return Z_MEM_ERROR; 279 strm->state = (struct internal_state FAR *)s; 280 s->strm = strm; 281 282 s->wrap = wrap; 283 s->gzhead = Z_NULL; 284 s->w_bits = windowBits; 285 s->w_size = 1 << s->w_bits; 286 s->w_mask = s->w_size - 1; 287 288 s->hash_bits = memLevel + 7; 289 s->hash_size = 1 << s->hash_bits; 290 s->hash_mask = s->hash_size - 1; 291 s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); 292 293 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); 294 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); 295 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); 296 297 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ 298 299 overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); 300 s->pending_buf = (uchf *) overlay; 301 s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); 302 303 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || 304 s->pending_buf == Z_NULL) { 305 s->status = FINISH_STATE; 306 strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); 307 (void) deflateEnd (strm); 308 return Z_MEM_ERROR; 309 } 310 s->d_buf = overlay + s->lit_bufsize/sizeof(ush); 311 s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; 312 313 s->level = level; 314 s->strategy = strategy; 315 s->method = (Byte)method; 316 317 return deflateReset(strm); 318 } 319 320 /* ========================================================================= */ 321 int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) 322 z_streamp strm; 323 const Bytef *dictionary; 324 uInt dictLength; 325 { 326 deflate_state *s; 327 uInt length = dictLength; 328 uInt n; 329 IPos hash_head = 0; 330 331 if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || 332 strm->state->wrap == 2 || 333 (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) 334 return Z_STREAM_ERROR; 335 336 s = strm->state; 337 if (s->wrap) 338 strm->adler = adler32(strm->adler, dictionary, dictLength); 339 340 if (length < MIN_MATCH) return Z_OK; 341 if (length > MAX_DIST(s)) { 342 length = MAX_DIST(s); 343 dictionary += dictLength - length; /* use the tail of the dictionary */ 344 } 345 (void) zmemcpy(s->window, dictionary, length); 346 s->strstart = length; 347 s->block_start = (long)length; 348 349 /* Insert all strings in the hash table (except for the last two bytes). 350 * s->lookahead stays null, so s->ins_h will be recomputed at the next 351 * call of fill_window. 352 */ 353 s->ins_h = s->window[0]; 354 UPDATE_HASH(s, s->ins_h, s->window[1]); 355 for (n = 0; n <= length - MIN_MATCH; n++) { 356 INSERT_STRING(s, n, hash_head); 357 } 358 if (hash_head) hash_head = 0; /* to make compiler happy */ 359 return Z_OK; 360 } 361 362 /* ========================================================================= */ 363 int ZEXPORT deflateReset (strm) 364 z_streamp strm; 365 { 366 deflate_state *s; 367 368 if (strm == Z_NULL || strm->state == Z_NULL || 369 strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { 370 return Z_STREAM_ERROR; 371 } 372 373 strm->total_in = strm->total_out = 0; 374 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ 375 strm->data_type = Z_UNKNOWN; 376 377 s = (deflate_state *)strm->state; 378 s->pending = 0; 379 s->pending_out = s->pending_buf; 380 381 if (s->wrap < 0) { 382 s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ 383 } 384 s->status = s->wrap ? INIT_STATE : BUSY_STATE; 385 strm->adler = 386 #ifdef GZIP 387 s->wrap == 2 ? crc32(0L, Z_NULL, 0) : 388 #endif 389 adler32(0L, Z_NULL, 0); 390 s->last_flush = Z_NO_FLUSH; 391 392 _tr_init(s); 393 lm_init(s); 394 395 return Z_OK; 396 } 397 398 /* ========================================================================= */ 399 int ZEXPORT deflateSetHeader (strm, head) 400 z_streamp strm; 401 gz_headerp head; 402 { 403 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 404 if (strm->state->wrap != 2) return Z_STREAM_ERROR; 405 strm->state->gzhead = head; 406 return Z_OK; 407 } 408 409 /* ========================================================================= */ 410 int ZEXPORT deflatePrime (strm, bits, value) 411 z_streamp strm; 412 int bits; 413 int value; 414 { 415 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 416 strm->state->bi_valid = bits; 417 strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); 418 return Z_OK; 419 } 420 421 /* ========================================================================= */ 422 int ZEXPORT deflateParams(strm, level, strategy) 423 z_streamp strm; 424 int level; 425 int strategy; 426 { 427 deflate_state *s; 428 compress_func func; 429 int err = Z_OK; 430 431 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 432 s = strm->state; 433 434 #ifdef FASTEST 435 if (level != 0) level = 1; 436 #else 437 if (level == Z_DEFAULT_COMPRESSION) level = 6; 438 #endif 439 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { 440 return Z_STREAM_ERROR; 441 } 442 func = configuration_table[s->level].func; 443 444 if (func != configuration_table[level].func && strm->total_in != 0) { 445 /* Flush the last buffer: */ 446 err = deflate(strm, Z_PARTIAL_FLUSH); 447 } 448 if (s->level != level) { 449 s->level = level; 450 s->max_lazy_match = configuration_table[level].max_lazy; 451 s->good_match = configuration_table[level].good_length; 452 s->nice_match = configuration_table[level].nice_length; 453 s->max_chain_length = configuration_table[level].max_chain; 454 } 455 s->strategy = strategy; 456 return err; 457 } 458 459 /* ========================================================================= */ 460 int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) 461 z_streamp strm; 462 int good_length; 463 int max_lazy; 464 int nice_length; 465 int max_chain; 466 { 467 deflate_state *s; 468 469 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 470 s = strm->state; 471 s->good_match = good_length; 472 s->max_lazy_match = max_lazy; 473 s->nice_match = nice_length; 474 s->max_chain_length = max_chain; 475 return Z_OK; 476 } 477 478 /* ========================================================================= 479 * For the default windowBits of 15 and memLevel of 8, this function returns 480 * a close to exact, as well as small, upper bound on the compressed size. 481 * They are coded as constants here for a reason--if the #define's are 482 * changed, then this function needs to be changed as well. The return 483 * value for 15 and 8 only works for those exact settings. 484 * 485 * For any setting other than those defaults for windowBits and memLevel, 486 * the value returned is a conservative worst case for the maximum expansion 487 * resulting from using fixed blocks instead of stored blocks, which deflate 488 * can emit on compressed data for some combinations of the parameters. 489 * 490 * This function could be more sophisticated to provide closer upper bounds 491 * for every combination of windowBits and memLevel, as well as wrap. 492 * But even the conservative upper bound of about 14% expansion does not 493 * seem onerous for output buffer allocation. 494 */ 495 uLong ZEXPORT deflateBound(strm, sourceLen) 496 z_streamp strm; 497 uLong sourceLen; 498 { 499 deflate_state *s; 500 uLong destLen; 501 502 /* conservative upper bound */ 503 destLen = sourceLen + 504 ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 11; 505 506 /* if can't get parameters, return conservative bound */ 507 if (strm == Z_NULL || strm->state == Z_NULL) 508 return destLen; 509 510 /* if not default parameters, return conservative bound */ 511 s = strm->state; 512 if (s->w_bits != 15 || s->hash_bits != 8 + 7) 513 return destLen; 514 515 /* default settings: return tight bound for that case */ 516 return compressBound(sourceLen); 517 } 518 519 /* ========================================================================= 520 * Put a short in the pending buffer. The 16-bit value is put in MSB order. 521 * IN assertion: the stream state is correct and there is enough room in 522 * pending_buf. 523 */ 524 local void putShortMSB (s, b) 525 deflate_state *s; 526 uInt b; 527 { 528 put_byte(s, (Byte)(b >> 8)); 529 put_byte(s, (Byte)(b & 0xff)); 530 } 531 532 /* ========================================================================= 533 * Flush as much pending output as possible. All deflate() output goes 534 * through this function so some applications may wish to modify it 535 * to avoid allocating a large strm->next_out buffer and copying into it. 536 * (See also read_buf()). 537 */ 538 local void flush_pending(strm) 539 z_streamp strm; 540 { 541 unsigned len = strm->state->pending; 542 543 if (len > strm->avail_out) len = strm->avail_out; 544 if (len == 0) return; 545 546 zmemcpy(strm->next_out, strm->state->pending_out, len); 547 strm->next_out += len; 548 strm->state->pending_out += len; 549 strm->total_out += len; 550 strm->avail_out -= len; 551 strm->state->pending -= len; 552 if (strm->state->pending == 0) { 553 strm->state->pending_out = strm->state->pending_buf; 554 } 555 } 556 557 /* ========================================================================= */ 558 int ZEXPORT deflate (strm, flush) 559 z_streamp strm; 560 int flush; 561 { 562 int old_flush; /* value of flush param for previous deflate call */ 563 deflate_state *s; 564 565 if (strm == Z_NULL || strm->state == Z_NULL || 566 flush > Z_FINISH || flush < 0) { 567 return Z_STREAM_ERROR; 568 } 569 s = strm->state; 570 571 if (strm->next_out == Z_NULL || 572 (strm->next_in == Z_NULL && strm->avail_in != 0) || 573 (s->status == FINISH_STATE && flush != Z_FINISH)) { 574 ERR_RETURN(strm, Z_STREAM_ERROR); 575 } 576 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); 577 578 s->strm = strm; /* just in case */ 579 old_flush = s->last_flush; 580 s->last_flush = flush; 581 582 /* Write the header */ 583 if (s->status == INIT_STATE) { 584 #ifdef GZIP 585 if (s->wrap == 2) { 586 strm->adler = crc32(0L, Z_NULL, 0); 587 put_byte(s, 31); 588 put_byte(s, 139); 589 put_byte(s, 8); 590 if (s->gzhead == NULL) { 591 put_byte(s, 0); 592 put_byte(s, 0); 593 put_byte(s, 0); 594 put_byte(s, 0); 595 put_byte(s, 0); 596 put_byte(s, s->level == 9 ? 2 : 597 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 598 4 : 0)); 599 put_byte(s, OS_CODE); 600 s->status = BUSY_STATE; 601 } 602 else { 603 put_byte(s, (s->gzhead->text ? 1 : 0) + 604 (s->gzhead->hcrc ? 2 : 0) + 605 (s->gzhead->extra == Z_NULL ? 0 : 4) + 606 (s->gzhead->name == Z_NULL ? 0 : 8) + 607 (s->gzhead->comment == Z_NULL ? 0 : 16) 608 ); 609 put_byte(s, (Byte)(s->gzhead->time & 0xff)); 610 put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); 611 put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); 612 put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); 613 put_byte(s, s->level == 9 ? 2 : 614 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 615 4 : 0)); 616 put_byte(s, s->gzhead->os & 0xff); 617 if (s->gzhead->extra != NULL) { 618 put_byte(s, s->gzhead->extra_len & 0xff); 619 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); 620 } 621 if (s->gzhead->hcrc) 622 strm->adler = crc32(strm->adler, s->pending_buf, 623 s->pending); 624 s->gzindex = 0; 625 s->status = EXTRA_STATE; 626 } 627 } 628 else 629 #endif 630 { 631 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; 632 uInt level_flags; 633 634 if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) 635 level_flags = 0; 636 else if (s->level < 6) 637 level_flags = 1; 638 else if (s->level == 6) 639 level_flags = 2; 640 else 641 level_flags = 3; 642 header |= (level_flags << 6); 643 if (s->strstart != 0) header |= PRESET_DICT; 644 header += 31 - (header % 31); 645 646 s->status = BUSY_STATE; 647 putShortMSB(s, header); 648 649 /* Save the adler32 of the preset dictionary: */ 650 if (s->strstart != 0) { 651 putShortMSB(s, (uInt)(strm->adler >> 16)); 652 putShortMSB(s, (uInt)(strm->adler & 0xffff)); 653 } 654 strm->adler = adler32(0L, Z_NULL, 0); 655 } 656 } 657 #ifdef GZIP 658 if (s->status == EXTRA_STATE) { 659 if (s->gzhead->extra != NULL) { 660 uInt beg = s->pending; /* start of bytes to update crc */ 661 662 while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { 663 if (s->pending == s->pending_buf_size) { 664 if (s->gzhead->hcrc && s->pending > beg) 665 strm->adler = crc32(strm->adler, s->pending_buf + beg, 666 s->pending - beg); 667 flush_pending(strm); 668 beg = s->pending; 669 if (s->pending == s->pending_buf_size) 670 break; 671 } 672 put_byte(s, s->gzhead->extra[s->gzindex]); 673 s->gzindex++; 674 } 675 if (s->gzhead->hcrc && s->pending > beg) 676 strm->adler = crc32(strm->adler, s->pending_buf + beg, 677 s->pending - beg); 678 if (s->gzindex == s->gzhead->extra_len) { 679 s->gzindex = 0; 680 s->status = NAME_STATE; 681 } 682 } 683 else 684 s->status = NAME_STATE; 685 } 686 if (s->status == NAME_STATE) { 687 if (s->gzhead->name != NULL) { 688 uInt beg = s->pending; /* start of bytes to update crc */ 689 int val; 690 691 do { 692 if (s->pending == s->pending_buf_size) { 693 if (s->gzhead->hcrc && s->pending > beg) 694 strm->adler = crc32(strm->adler, s->pending_buf + beg, 695 s->pending - beg); 696 flush_pending(strm); 697 beg = s->pending; 698 if (s->pending == s->pending_buf_size) { 699 val = 1; 700 break; 701 } 702 } 703 val = s->gzhead->name[s->gzindex++]; 704 put_byte(s, val); 705 } while (val != 0); 706 if (s->gzhead->hcrc && s->pending > beg) 707 strm->adler = crc32(strm->adler, s->pending_buf + beg, 708 s->pending - beg); 709 if (val == 0) { 710 s->gzindex = 0; 711 s->status = COMMENT_STATE; 712 } 713 } 714 else 715 s->status = COMMENT_STATE; 716 } 717 if (s->status == COMMENT_STATE) { 718 if (s->gzhead->comment != NULL) { 719 uInt beg = s->pending; /* start of bytes to update crc */ 720 int val; 721 722 do { 723 if (s->pending == s->pending_buf_size) { 724 if (s->gzhead->hcrc && s->pending > beg) 725 strm->adler = crc32(strm->adler, s->pending_buf + beg, 726 s->pending - beg); 727 flush_pending(strm); 728 beg = s->pending; 729 if (s->pending == s->pending_buf_size) { 730 val = 1; 731 break; 732 } 733 } 734 val = s->gzhead->comment[s->gzindex++]; 735 put_byte(s, val); 736 } while (val != 0); 737 if (s->gzhead->hcrc && s->pending > beg) 738 strm->adler = crc32(strm->adler, s->pending_buf + beg, 739 s->pending - beg); 740 if (val == 0) 741 s->status = HCRC_STATE; 742 } 743 else 744 s->status = HCRC_STATE; 745 } 746 if (s->status == HCRC_STATE) { 747 if (s->gzhead->hcrc) { 748 if (s->pending + 2 > s->pending_buf_size) 749 flush_pending(strm); 750 if (s->pending + 2 <= s->pending_buf_size) { 751 put_byte(s, (Byte)(strm->adler & 0xff)); 752 put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 753 strm->adler = crc32(0L, Z_NULL, 0); 754 s->status = BUSY_STATE; 755 } 756 } 757 else 758 s->status = BUSY_STATE; 759 } 760 #endif 761 762 /* Flush as much pending output as possible */ 763 if (s->pending != 0) { 764 flush_pending(strm); 765 if (strm->avail_out == 0) { 766 /* Since avail_out is 0, deflate will be called again with 767 * more output space, but possibly with both pending and 768 * avail_in equal to zero. There won't be anything to do, 769 * but this is not an error situation so make sure we 770 * return OK instead of BUF_ERROR at next call of deflate: 771 */ 772 s->last_flush = -1; 773 return Z_OK; 774 } 775 776 /* Make sure there is something to do and avoid duplicate consecutive 777 * flushes. For repeated and useless calls with Z_FINISH, we keep 778 * returning Z_STREAM_END instead of Z_BUF_ERROR. 779 */ 780 } else if (strm->avail_in == 0 && flush <= old_flush && 781 flush != Z_FINISH) { 782 ERR_RETURN(strm, Z_BUF_ERROR); 783 } 784 785 /* User must not provide more input after the first FINISH: */ 786 if (s->status == FINISH_STATE && strm->avail_in != 0) { 787 ERR_RETURN(strm, Z_BUF_ERROR); 788 } 789 790 /* Start a new block or continue the current one. 791 */ 792 if (strm->avail_in != 0 || s->lookahead != 0 || 793 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { 794 block_state bstate; 795 796 bstate = (*(configuration_table[s->level].func))(s, flush); 797 798 if (bstate == finish_started || bstate == finish_done) { 799 s->status = FINISH_STATE; 800 } 801 if (bstate == need_more || bstate == finish_started) { 802 if (strm->avail_out == 0) { 803 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ 804 } 805 return Z_OK; 806 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call 807 * of deflate should use the same flush parameter to make sure 808 * that the flush is complete. So we don't have to output an 809 * empty block here, this will be done at next call. This also 810 * ensures that for a very small output buffer, we emit at most 811 * one empty block. 812 */ 813 } 814 if (bstate == block_done) { 815 if (flush == Z_PARTIAL_FLUSH) { 816 _tr_align(s); 817 } else { /* FULL_FLUSH or SYNC_FLUSH */ 818 _tr_stored_block(s, (char*)0, 0L, 0); 819 /* For a full flush, this empty block will be recognized 820 * as a special marker by inflate_sync(). 821 */ 822 if (flush == Z_FULL_FLUSH) { 823 CLEAR_HASH(s); /* forget history */ 824 } 825 } 826 flush_pending(strm); 827 if (strm->avail_out == 0) { 828 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ 829 return Z_OK; 830 } 831 } 832 } 833 Assert(strm->avail_out > 0, "bug2"); 834 835 if (flush != Z_FINISH) return Z_OK; 836 if (s->wrap <= 0) return Z_STREAM_END; 837 838 /* Write the trailer */ 839 #ifdef GZIP 840 if (s->wrap == 2) { 841 put_byte(s, (Byte)(strm->adler & 0xff)); 842 put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 843 put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); 844 put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); 845 put_byte(s, (Byte)(strm->total_in & 0xff)); 846 put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); 847 put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); 848 put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); 849 } 850 else 851 #endif 852 { 853 putShortMSB(s, (uInt)(strm->adler >> 16)); 854 putShortMSB(s, (uInt)(strm->adler & 0xffff)); 855 } 856 flush_pending(strm); 857 /* If avail_out is zero, the application will call deflate again 858 * to flush the rest. 859 */ 860 if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ 861 return s->pending != 0 ? Z_OK : Z_STREAM_END; 862 } 863 864 /* ========================================================================= */ 865 int ZEXPORT deflateEnd (strm) 866 z_streamp strm; 867 { 868 int status; 869 870 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 871 872 status = strm->state->status; 873 if (status != INIT_STATE && 874 status != EXTRA_STATE && 875 status != NAME_STATE && 876 status != COMMENT_STATE && 877 status != HCRC_STATE && 878 status != BUSY_STATE && 879 status != FINISH_STATE) { 880 return Z_STREAM_ERROR; 881 } 882 883 /* Deallocate in reverse order of allocations: */ 884 TRY_FREE(strm, strm->state->pending_buf); 885 TRY_FREE(strm, strm->state->head); 886 TRY_FREE(strm, strm->state->prev); 887 TRY_FREE(strm, strm->state->window); 888 889 ZFREE(strm, strm->state); 890 strm->state = Z_NULL; 891 892 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; 893 } 894 895 /* ========================================================================= 896 * Copy the source state to the destination state. 897 * To simplify the source, this is not supported for 16-bit MSDOS (which 898 * doesn't have enough memory anyway to duplicate compression states). 899 */ 900 int ZEXPORT deflateCopy (dest, source) 901 z_streamp dest; 902 z_streamp source; 903 { 904 #ifdef MAXSEG_64K 905 return Z_STREAM_ERROR; 906 #else 907 deflate_state *ds; 908 deflate_state *ss; 909 ushf *overlay; 910 911 912 if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { 913 return Z_STREAM_ERROR; 914 } 915 916 ss = source->state; 917 918 zmemcpy(dest, source, sizeof(z_stream)); 919 920 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); 921 if (ds == Z_NULL) return Z_MEM_ERROR; 922 dest->state = (struct internal_state FAR *) ds; 923 zmemcpy(ds, ss, sizeof(deflate_state)); 924 ds->strm = dest; 925 926 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); 927 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); 928 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); 929 overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); 930 ds->pending_buf = (uchf *) overlay; 931 932 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || 933 ds->pending_buf == Z_NULL) { 934 deflateEnd (dest); 935 return Z_MEM_ERROR; 936 } 937 /* following zmemcpy do not work for 16-bit MSDOS */ 938 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); 939 zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); 940 zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); 941 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); 942 943 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); 944 ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); 945 ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; 946 947 ds->l_desc.dyn_tree = ds->dyn_ltree; 948 ds->d_desc.dyn_tree = ds->dyn_dtree; 949 ds->bl_desc.dyn_tree = ds->bl_tree; 950 951 return Z_OK; 952 #endif /* MAXSEG_64K */ 953 } 954 955 /* =========================================================================== 956 * Read a new buffer from the current input stream, update the adler32 957 * and total number of bytes read. All deflate() input goes through 958 * this function so some applications may wish to modify it to avoid 959 * allocating a large strm->next_in buffer and copying from it. 960 * (See also flush_pending()). 961 */ 962 local int read_buf(strm, buf, size) 963 z_streamp strm; 964 Bytef *buf; 965 unsigned size; 966 { 967 unsigned len = strm->avail_in; 968 969 if (len > size) len = size; 970 if (len == 0) return 0; 971 972 strm->avail_in -= len; 973 974 if (strm->state->wrap == 1) { 975 strm->adler = adler32(strm->adler, strm->next_in, len); 976 } 977 #ifdef GZIP 978 else if (strm->state->wrap == 2) { 979 strm->adler = crc32(strm->adler, strm->next_in, len); 980 } 981 #endif 982 zmemcpy(buf, strm->next_in, len); 983 strm->next_in += len; 984 strm->total_in += len; 985 986 return (int)len; 987 } 988 989 /* =========================================================================== 990 * Initialize the "longest match" routines for a new zlib stream 991 */ 992 local void lm_init (s) 993 deflate_state *s; 994 { 995 s->window_size = (ulg)2L*s->w_size; 996 997 CLEAR_HASH(s); 998 999 /* Set the default configuration parameters: 1000 */ 1001 s->max_lazy_match = configuration_table[s->level].max_lazy; 1002 s->good_match = configuration_table[s->level].good_length; 1003 s->nice_match = configuration_table[s->level].nice_length; 1004 s->max_chain_length = configuration_table[s->level].max_chain; 1005 1006 s->strstart = 0; 1007 s->block_start = 0L; 1008 s->lookahead = 0; 1009 s->match_length = s->prev_length = MIN_MATCH-1; 1010 s->match_available = 0; 1011 s->ins_h = 0; 1012 #ifndef FASTEST 1013 #ifdef ASMV 1014 match_init(); /* initialize the asm code */ 1015 #endif 1016 #endif 1017 } 1018 1019 #ifndef FASTEST 1020 /* =========================================================================== 1021 * Set match_start to the longest match starting at the given string and 1022 * return its length. Matches shorter or equal to prev_length are discarded, 1023 * in which case the result is equal to prev_length and match_start is 1024 * garbage. 1025 * IN assertions: cur_match is the head of the hash chain for the current 1026 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 1027 * OUT assertion: the match length is not greater than s->lookahead. 1028 */ 1029 #ifndef ASMV 1030 /* For 80x86 and 680x0, an optimized version will be provided in match.asm or 1031 * match.S. The code will be functionally equivalent. 1032 */ 1033 local uInt longest_match(s, cur_match) 1034 deflate_state *s; 1035 IPos cur_match; /* current match */ 1036 { 1037 unsigned chain_length = s->max_chain_length;/* max hash chain length */ 1038 register Bytef *scan = s->window + s->strstart; /* current string */ 1039 register Bytef *match; /* matched string */ 1040 register int len; /* length of current match */ 1041 int best_len = s->prev_length; /* best match length so far */ 1042 int nice_match = s->nice_match; /* stop if match long enough */ 1043 IPos limit = s->strstart > (IPos)MAX_DIST(s) ? 1044 s->strstart - (IPos)MAX_DIST(s) : NIL; 1045 /* Stop when cur_match becomes <= limit. To simplify the code, 1046 * we prevent matches with the string of window index 0. 1047 */ 1048 Posf *prev = s->prev; 1049 uInt wmask = s->w_mask; 1050 1051 #ifdef UNALIGNED_OK 1052 /* Compare two bytes at a time. Note: this is not always beneficial. 1053 * Try with and without -DUNALIGNED_OK to check. 1054 */ 1055 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; 1056 register ush scan_start = *(ushf*)scan; 1057 register ush scan_end = *(ushf*)(scan+best_len-1); 1058 #else 1059 register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1060 register Byte scan_end1 = scan[best_len-1]; 1061 register Byte scan_end = scan[best_len]; 1062 #endif 1063 1064 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1065 * It is easy to get rid of this optimization if necessary. 1066 */ 1067 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1068 1069 /* Do not waste too much time if we already have a good match: */ 1070 if (s->prev_length >= s->good_match) { 1071 chain_length >>= 2; 1072 } 1073 /* Do not look for matches beyond the end of the input. This is necessary 1074 * to make deflate deterministic. 1075 */ 1076 if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; 1077 1078 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1079 1080 do { 1081 Assert(cur_match < s->strstart, "no future"); 1082 match = s->window + cur_match; 1083 1084 /* Skip to next match if the match length cannot increase 1085 * or if the match length is less than 2. Note that the checks below 1086 * for insufficient lookahead only occur occasionally for performance 1087 * reasons. Therefore uninitialized memory will be accessed, and 1088 * conditional jumps will be made that depend on those values. 1089 * However the length of the match is limited to the lookahead, so 1090 * the output of deflate is not affected by the uninitialized values. 1091 */ 1092 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) 1093 /* This code assumes sizeof(unsigned short) == 2. Do not use 1094 * UNALIGNED_OK if your compiler uses a different size. 1095 */ 1096 if (*(ushf*)(match+best_len-1) != scan_end || 1097 *(ushf*)match != scan_start) continue; 1098 1099 /* It is not necessary to compare scan[2] and match[2] since they are 1100 * always equal when the other bytes match, given that the hash keys 1101 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at 1102 * strstart+3, +5, ... up to strstart+257. We check for insufficient 1103 * lookahead only every 4th comparison; the 128th check will be made 1104 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is 1105 * necessary to put more guard bytes at the end of the window, or 1106 * to check more often for insufficient lookahead. 1107 */ 1108 Assert(scan[2] == match[2], "scan[2]?"); 1109 scan++, match++; 1110 do { 1111 } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1112 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1113 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1114 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1115 scan < strend); 1116 /* The funny "do {}" generates better code on most compilers */ 1117 1118 /* Here, scan <= window+strstart+257 */ 1119 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1120 if (*scan == *match) scan++; 1121 1122 len = (MAX_MATCH - 1) - (int)(strend-scan); 1123 scan = strend - (MAX_MATCH-1); 1124 1125 #else /* UNALIGNED_OK */ 1126 1127 if (match[best_len] != scan_end || 1128 match[best_len-1] != scan_end1 || 1129 *match != *scan || 1130 *++match != scan[1]) continue; 1131 1132 /* The check at best_len-1 can be removed because it will be made 1133 * again later. (This heuristic is not always a win.) 1134 * It is not necessary to compare scan[2] and match[2] since they 1135 * are always equal when the other bytes match, given that 1136 * the hash keys are equal and that HASH_BITS >= 8. 1137 */ 1138 scan += 2, match++; 1139 Assert(*scan == *match, "match[2]?"); 1140 1141 /* We check for insufficient lookahead only every 8th comparison; 1142 * the 256th check will be made at strstart+258. 1143 */ 1144 do { 1145 } while (*++scan == *++match && *++scan == *++match && 1146 *++scan == *++match && *++scan == *++match && 1147 *++scan == *++match && *++scan == *++match && 1148 *++scan == *++match && *++scan == *++match && 1149 scan < strend); 1150 1151 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1152 1153 len = MAX_MATCH - (int)(strend - scan); 1154 scan = strend - MAX_MATCH; 1155 1156 #endif /* UNALIGNED_OK */ 1157 1158 if (len > best_len) { 1159 s->match_start = cur_match; 1160 best_len = len; 1161 if (len >= nice_match) break; 1162 #ifdef UNALIGNED_OK 1163 scan_end = *(ushf*)(scan+best_len-1); 1164 #else 1165 scan_end1 = scan[best_len-1]; 1166 scan_end = scan[best_len]; 1167 #endif 1168 } 1169 } while ((cur_match = prev[cur_match & wmask]) > limit 1170 && --chain_length != 0); 1171 1172 if ((uInt)best_len <= s->lookahead) return (uInt)best_len; 1173 return s->lookahead; 1174 } 1175 #endif /* ASMV */ 1176 #endif /* FASTEST */ 1177 1178 /* --------------------------------------------------------------------------- 1179 * Optimized version for level == 1 or strategy == Z_RLE only 1180 */ 1181 local uInt longest_match_fast(s, cur_match) 1182 deflate_state *s; 1183 IPos cur_match; /* current match */ 1184 { 1185 register Bytef *scan = s->window + s->strstart; /* current string */ 1186 register Bytef *match; /* matched string */ 1187 register int len; /* length of current match */ 1188 register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1189 1190 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1191 * It is easy to get rid of this optimization if necessary. 1192 */ 1193 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1194 1195 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1196 1197 Assert(cur_match < s->strstart, "no future"); 1198 1199 match = s->window + cur_match; 1200 1201 /* Return failure if the match length is less than 2: 1202 */ 1203 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; 1204 1205 /* The check at best_len-1 can be removed because it will be made 1206 * again later. (This heuristic is not always a win.) 1207 * It is not necessary to compare scan[2] and match[2] since they 1208 * are always equal when the other bytes match, given that 1209 * the hash keys are equal and that HASH_BITS >= 8. 1210 */ 1211 scan += 2, match += 2; 1212 Assert(*scan == *match, "match[2]?"); 1213 1214 /* We check for insufficient lookahead only every 8th comparison; 1215 * the 256th check will be made at strstart+258. 1216 */ 1217 do { 1218 } while (*++scan == *++match && *++scan == *++match && 1219 *++scan == *++match && *++scan == *++match && 1220 *++scan == *++match && *++scan == *++match && 1221 *++scan == *++match && *++scan == *++match && 1222 scan < strend); 1223 1224 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1225 1226 len = MAX_MATCH - (int)(strend - scan); 1227 1228 if (len < MIN_MATCH) return MIN_MATCH - 1; 1229 1230 s->match_start = cur_match; 1231 return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; 1232 } 1233 1234 #ifdef DEBUG 1235 /* =========================================================================== 1236 * Check that the match at match_start is indeed a match. 1237 */ 1238 local void check_match(s, start, match, length) 1239 deflate_state *s; 1240 IPos start, match; 1241 int length; 1242 { 1243 /* check that the match is indeed a match */ 1244 if (zmemcmp(s->window + match, 1245 s->window + start, length) != EQUAL) { 1246 fprintf(stderr, " start %u, match %u, length %d\n", 1247 start, match, length); 1248 do { 1249 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); 1250 } while (--length != 0); 1251 z_error("invalid match"); 1252 } 1253 if (z_verbose > 1) { 1254 fprintf(stderr,"\\[%d,%d]", start-match, length); 1255 do { putc(s->window[start++], stderr); } while (--length != 0); 1256 } 1257 } 1258 #else 1259 # define check_match(s, start, match, length) 1260 #endif /* DEBUG */ 1261 1262 /* =========================================================================== 1263 * Fill the window when the lookahead becomes insufficient. 1264 * Updates strstart and lookahead. 1265 * 1266 * IN assertion: lookahead < MIN_LOOKAHEAD 1267 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD 1268 * At least one byte has been read, or avail_in == 0; reads are 1269 * performed for at least two bytes (required for the zip translate_eol 1270 * option -- not supported here). 1271 */ 1272 local void fill_window(s) 1273 deflate_state *s; 1274 { 1275 register unsigned n, m; 1276 register Posf *p; 1277 unsigned more; /* Amount of free space at the end of the window. */ 1278 uInt wsize = s->w_size; 1279 1280 do { 1281 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); 1282 1283 /* Deal with !@#$% 64K limit: */ 1284 if (sizeof(int) <= 2) { 1285 if (more == 0 && s->strstart == 0 && s->lookahead == 0) { 1286 more = wsize; 1287 1288 } else if (more == (unsigned)(-1)) { 1289 /* Very unlikely, but possible on 16 bit machine if 1290 * strstart == 0 && lookahead == 1 (input done a byte at time) 1291 */ 1292 more--; 1293 } 1294 } 1295 1296 /* If the window is almost full and there is insufficient lookahead, 1297 * move the upper half to the lower one to make room in the upper half. 1298 */ 1299 if (s->strstart >= wsize+MAX_DIST(s)) { 1300 1301 zmemcpy(s->window, s->window+wsize, (unsigned)wsize); 1302 s->match_start -= wsize; 1303 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ 1304 s->block_start -= (long) wsize; 1305 1306 /* Slide the hash table (could be avoided with 32 bit values 1307 at the expense of memory usage). We slide even when level == 0 1308 to keep the hash table consistent if we switch back to level > 0 1309 later. (Using level 0 permanently is not an optimal usage of 1310 zlib, so we don't care about this pathological case.) 1311 */ 1312 /* %%% avoid this when Z_RLE */ 1313 n = s->hash_size; 1314 p = &s->head[n]; 1315 do { 1316 m = *--p; 1317 *p = (Pos)(m >= wsize ? m-wsize : NIL); 1318 } while (--n); 1319 1320 n = wsize; 1321 #ifndef FASTEST 1322 p = &s->prev[n]; 1323 do { 1324 m = *--p; 1325 *p = (Pos)(m >= wsize ? m-wsize : NIL); 1326 /* If n is not on any hash chain, prev[n] is garbage but 1327 * its value will never be used. 1328 */ 1329 } while (--n); 1330 #endif 1331 more += wsize; 1332 } 1333 if (s->strm->avail_in == 0) return; 1334 1335 /* If there was no sliding: 1336 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && 1337 * more == window_size - lookahead - strstart 1338 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) 1339 * => more >= window_size - 2*WSIZE + 2 1340 * In the BIG_MEM or MMAP case (not yet supported), 1341 * window_size == input_size + MIN_LOOKAHEAD && 1342 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. 1343 * Otherwise, window_size == 2*WSIZE so more >= 2. 1344 * If there was sliding, more >= WSIZE. So in all cases, more >= 2. 1345 */ 1346 Assert(more >= 2, "more < 2"); 1347 1348 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); 1349 s->lookahead += n; 1350 1351 /* Initialize the hash value now that we have some input: */ 1352 if (s->lookahead >= MIN_MATCH) { 1353 s->ins_h = s->window[s->strstart]; 1354 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1355 #if MIN_MATCH != 3 1356 Call UPDATE_HASH() MIN_MATCH-3 more times 1357 #endif 1358 } 1359 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, 1360 * but this is not important since only literal bytes will be emitted. 1361 */ 1362 1363 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); 1364 } 1365 1366 /* =========================================================================== 1367 * Flush the current block, with given end-of-file flag. 1368 * IN assertion: strstart is set to the end of the current match. 1369 */ 1370 #define FLUSH_BLOCK_ONLY(s, eof) { \ 1371 _tr_flush_block(s, (s->block_start >= 0L ? \ 1372 (charf *)&s->window[(unsigned)s->block_start] : \ 1373 (charf *)Z_NULL), \ 1374 (ulg)((long)s->strstart - s->block_start), \ 1375 (eof)); \ 1376 s->block_start = s->strstart; \ 1377 flush_pending(s->strm); \ 1378 Tracev((stderr,"[FLUSH]")); \ 1379 } 1380 1381 /* Same but force premature exit if necessary. */ 1382 #define FLUSH_BLOCK(s, eof) { \ 1383 FLUSH_BLOCK_ONLY(s, eof); \ 1384 if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ 1385 } 1386 1387 /* =========================================================================== 1388 * Copy without compression as much as possible from the input stream, return 1389 * the current block state. 1390 * This function does not insert new strings in the dictionary since 1391 * uncompressible data is probably not useful. This function is used 1392 * only for the level=0 compression option. 1393 * NOTE: this function should be optimized to avoid extra copying from 1394 * window to pending_buf. 1395 */ 1396 local block_state deflate_stored(s, flush) 1397 deflate_state *s; 1398 int flush; 1399 { 1400 /* Stored blocks are limited to 0xffff bytes, pending_buf is limited 1401 * to pending_buf_size, and each stored block has a 5 byte header: 1402 */ 1403 ulg max_block_size = 0xffff; 1404 ulg max_start; 1405 1406 if (max_block_size > s->pending_buf_size - 5) { 1407 max_block_size = s->pending_buf_size - 5; 1408 } 1409 1410 /* Copy as much as possible from input to output: */ 1411 for (;;) { 1412 /* Fill the window as much as possible: */ 1413 if (s->lookahead <= 1) { 1414 1415 Assert(s->strstart < s->w_size+MAX_DIST(s) || 1416 s->block_start >= (long)s->w_size, "slide too late"); 1417 1418 fill_window(s); 1419 if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; 1420 1421 if (s->lookahead == 0) break; /* flush the current block */ 1422 } 1423 Assert(s->block_start >= 0L, "block gone"); 1424 1425 s->strstart += s->lookahead; 1426 s->lookahead = 0; 1427 1428 /* Emit a stored block if pending_buf will be full: */ 1429 max_start = s->block_start + max_block_size; 1430 if (s->strstart == 0 || (ulg)s->strstart >= max_start) { 1431 /* strstart == 0 is possible when wraparound on 16-bit machine */ 1432 s->lookahead = (uInt)(s->strstart - max_start); 1433 s->strstart = (uInt)max_start; 1434 FLUSH_BLOCK(s, 0); 1435 } 1436 /* Flush if we may have to slide, otherwise block_start may become 1437 * negative and the data will be gone: 1438 */ 1439 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { 1440 FLUSH_BLOCK(s, 0); 1441 } 1442 } 1443 FLUSH_BLOCK(s, flush == Z_FINISH); 1444 return flush == Z_FINISH ? finish_done : block_done; 1445 } 1446 1447 /* =========================================================================== 1448 * Compress as much as possible from the input stream, return the current 1449 * block state. 1450 * This function does not perform lazy evaluation of matches and inserts 1451 * new strings in the dictionary only for unmatched strings or for short 1452 * matches. It is used only for the fast compression options. 1453 */ 1454 local block_state deflate_fast(s, flush) 1455 deflate_state *s; 1456 int flush; 1457 { 1458 IPos hash_head = NIL; /* head of the hash chain */ 1459 int bflush; /* set if current block must be flushed */ 1460 1461 for (;;) { 1462 /* Make sure that we always have enough lookahead, except 1463 * at the end of the input file. We need MAX_MATCH bytes 1464 * for the next match, plus MIN_MATCH bytes to insert the 1465 * string following the next match. 1466 */ 1467 if (s->lookahead < MIN_LOOKAHEAD) { 1468 fill_window(s); 1469 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1470 return need_more; 1471 } 1472 if (s->lookahead == 0) break; /* flush the current block */ 1473 } 1474 1475 /* Insert the string window[strstart .. strstart+2] in the 1476 * dictionary, and set hash_head to the head of the hash chain: 1477 */ 1478 if (s->lookahead >= MIN_MATCH) { 1479 INSERT_STRING(s, s->strstart, hash_head); 1480 } 1481 1482 /* Find the longest match, discarding those <= prev_length. 1483 * At this point we have always match_length < MIN_MATCH 1484 */ 1485 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { 1486 /* To simplify the code, we prevent matches with the string 1487 * of window index 0 (in particular we have to avoid a match 1488 * of the string with itself at the start of the input file). 1489 */ 1490 #ifdef FASTEST 1491 if ((s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) || 1492 (s->strategy == Z_RLE && s->strstart - hash_head == 1)) { 1493 s->match_length = longest_match_fast (s, hash_head); 1494 } 1495 #else 1496 if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { 1497 s->match_length = longest_match (s, hash_head); 1498 } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { 1499 s->match_length = longest_match_fast (s, hash_head); 1500 } 1501 #endif 1502 /* longest_match() or longest_match_fast() sets match_start */ 1503 } 1504 if (s->match_length >= MIN_MATCH) { 1505 check_match(s, s->strstart, s->match_start, s->match_length); 1506 1507 _tr_tally_dist(s, s->strstart - s->match_start, 1508 s->match_length - MIN_MATCH, bflush); 1509 1510 s->lookahead -= s->match_length; 1511 1512 /* Insert new strings in the hash table only if the match length 1513 * is not too large. This saves time but degrades compression. 1514 */ 1515 #ifndef FASTEST 1516 if (s->match_length <= s->max_insert_length && 1517 s->lookahead >= MIN_MATCH) { 1518 s->match_length--; /* string at strstart already in table */ 1519 do { 1520 s->strstart++; 1521 INSERT_STRING(s, s->strstart, hash_head); 1522 /* strstart never exceeds WSIZE-MAX_MATCH, so there are 1523 * always MIN_MATCH bytes ahead. 1524 */ 1525 } while (--s->match_length != 0); 1526 s->strstart++; 1527 } else 1528 #endif 1529 { 1530 s->strstart += s->match_length; 1531 s->match_length = 0; 1532 s->ins_h = s->window[s->strstart]; 1533 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1534 #if MIN_MATCH != 3 1535 Call UPDATE_HASH() MIN_MATCH-3 more times 1536 #endif 1537 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not 1538 * matter since it will be recomputed at next deflate call. 1539 */ 1540 } 1541 } else { 1542 /* No match, output a literal byte */ 1543 Tracevv((stderr,"%c", s->window[s->strstart])); 1544 _tr_tally_lit (s, s->window[s->strstart], bflush); 1545 s->lookahead--; 1546 s->strstart++; 1547 } 1548 if (bflush) FLUSH_BLOCK(s, 0); 1549 } 1550 FLUSH_BLOCK(s, flush == Z_FINISH); 1551 return flush == Z_FINISH ? finish_done : block_done; 1552 } 1553 1554 #ifndef FASTEST 1555 /* =========================================================================== 1556 * Same as above, but achieves better compression. We use a lazy 1557 * evaluation for matches: a match is finally adopted only if there is 1558 * no better match at the next window position. 1559 */ 1560 local block_state deflate_slow(s, flush) 1561 deflate_state *s; 1562 int flush; 1563 { 1564 IPos hash_head = NIL; /* head of hash chain */ 1565 int bflush; /* set if current block must be flushed */ 1566 1567 /* Process the input block. */ 1568 for (;;) { 1569 /* Make sure that we always have enough lookahead, except 1570 * at the end of the input file. We need MAX_MATCH bytes 1571 * for the next match, plus MIN_MATCH bytes to insert the 1572 * string following the next match. 1573 */ 1574 if (s->lookahead < MIN_LOOKAHEAD) { 1575 fill_window(s); 1576 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1577 return need_more; 1578 } 1579 if (s->lookahead == 0) break; /* flush the current block */ 1580 } 1581 1582 /* Insert the string window[strstart .. strstart+2] in the 1583 * dictionary, and set hash_head to the head of the hash chain: 1584 */ 1585 if (s->lookahead >= MIN_MATCH) { 1586 INSERT_STRING(s, s->strstart, hash_head); 1587 } 1588 1589 /* Find the longest match, discarding those <= prev_length. 1590 */ 1591 s->prev_length = s->match_length, s->prev_match = s->match_start; 1592 s->match_length = MIN_MATCH-1; 1593 1594 if (hash_head != NIL && s->prev_length < s->max_lazy_match && 1595 s->strstart - hash_head <= MAX_DIST(s)) { 1596 /* To simplify the code, we prevent matches with the string 1597 * of window index 0 (in particular we have to avoid a match 1598 * of the string with itself at the start of the input file). 1599 */ 1600 if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { 1601 s->match_length = longest_match (s, hash_head); 1602 } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { 1603 s->match_length = longest_match_fast (s, hash_head); 1604 } 1605 /* longest_match() or longest_match_fast() sets match_start */ 1606 1607 if (s->match_length <= 5 && (s->strategy == Z_FILTERED 1608 #if TOO_FAR <= 32767 1609 || (s->match_length == MIN_MATCH && 1610 s->strstart - s->match_start > TOO_FAR) 1611 #endif 1612 )) { 1613 1614 /* If prev_match is also MIN_MATCH, match_start is garbage 1615 * but we will ignore the current match anyway. 1616 */ 1617 s->match_length = MIN_MATCH-1; 1618 } 1619 } 1620 /* If there was a match at the previous step and the current 1621 * match is not better, output the previous match: 1622 */ 1623 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { 1624 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; 1625 /* Do not insert strings in hash table beyond this. */ 1626 1627 check_match(s, s->strstart-1, s->prev_match, s->prev_length); 1628 1629 _tr_tally_dist(s, s->strstart -1 - s->prev_match, 1630 s->prev_length - MIN_MATCH, bflush); 1631 1632 /* Insert in hash table all strings up to the end of the match. 1633 * strstart-1 and strstart are already inserted. If there is not 1634 * enough lookahead, the last two strings are not inserted in 1635 * the hash table. 1636 */ 1637 s->lookahead -= s->prev_length-1; 1638 s->prev_length -= 2; 1639 do { 1640 if (++s->strstart <= max_insert) { 1641 INSERT_STRING(s, s->strstart, hash_head); 1642 } 1643 } while (--s->prev_length != 0); 1644 s->match_available = 0; 1645 s->match_length = MIN_MATCH-1; 1646 s->strstart++; 1647 1648 if (bflush) FLUSH_BLOCK(s, 0); 1649 1650 } else if (s->match_available) { 1651 /* If there was no match at the previous position, output a 1652 * single literal. If there was a match but the current match 1653 * is longer, truncate the previous match to a single literal. 1654 */ 1655 Tracevv((stderr,"%c", s->window[s->strstart-1])); 1656 _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1657 if (bflush) { 1658 FLUSH_BLOCK_ONLY(s, 0); 1659 } 1660 s->strstart++; 1661 s->lookahead--; 1662 if (s->strm->avail_out == 0) return need_more; 1663 } else { 1664 /* There is no previous match to compare with, wait for 1665 * the next step to decide. 1666 */ 1667 s->match_available = 1; 1668 s->strstart++; 1669 s->lookahead--; 1670 } 1671 } 1672 Assert (flush != Z_NO_FLUSH, "no flush?"); 1673 if (s->match_available) { 1674 Tracevv((stderr,"%c", s->window[s->strstart-1])); 1675 _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1676 s->match_available = 0; 1677 } 1678 FLUSH_BLOCK(s, flush == Z_FINISH); 1679 return flush == Z_FINISH ? finish_done : block_done; 1680 } 1681 #endif /* FASTEST */ 1682 1683 #if 0 1684 /* =========================================================================== 1685 * For Z_RLE, simply look for runs of bytes, generate matches only of distance 1686 * one. Do not maintain a hash table. (It will be regenerated if this run of 1687 * deflate switches away from Z_RLE.) 1688 */ 1689 local block_state deflate_rle(s, flush) 1690 deflate_state *s; 1691 int flush; 1692 { 1693 int bflush; /* set if current block must be flushed */ 1694 uInt run; /* length of run */ 1695 uInt max; /* maximum length of run */ 1696 uInt prev; /* byte at distance one to match */ 1697 Bytef *scan; /* scan for end of run */ 1698 1699 for (;;) { 1700 /* Make sure that we always have enough lookahead, except 1701 * at the end of the input file. We need MAX_MATCH bytes 1702 * for the longest encodable run. 1703 */ 1704 if (s->lookahead < MAX_MATCH) { 1705 fill_window(s); 1706 if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { 1707 return need_more; 1708 } 1709 if (s->lookahead == 0) break; /* flush the current block */ 1710 } 1711 1712 /* See how many times the previous byte repeats */ 1713 run = 0; 1714 if (s->strstart > 0) { /* if there is a previous byte, that is */ 1715 max = s->lookahead < MAX_MATCH ? s->lookahead : MAX_MATCH; 1716 scan = s->window + s->strstart - 1; 1717 prev = *scan++; 1718 do { 1719 if (*scan++ != prev) 1720 break; 1721 } while (++run < max); 1722 } 1723 1724 /* Emit match if have run of MIN_MATCH or longer, else emit literal */ 1725 if (run >= MIN_MATCH) { 1726 check_match(s, s->strstart, s->strstart - 1, run); 1727 _tr_tally_dist(s, 1, run - MIN_MATCH, bflush); 1728 s->lookahead -= run; 1729 s->strstart += run; 1730 } else { 1731 /* No match, output a literal byte */ 1732 Tracevv((stderr,"%c", s->window[s->strstart])); 1733 _tr_tally_lit (s, s->window[s->strstart], bflush); 1734 s->lookahead--; 1735 s->strstart++; 1736 } 1737 if (bflush) FLUSH_BLOCK(s, 0); 1738 } 1739 FLUSH_BLOCK(s, flush == Z_FINISH); 1740 return flush == Z_FINISH ? finish_done : block_done; 1741 } 1742 #endif 1743