1 /* deflate.c -- compress data using the deflation algorithm 2 * Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler 3 * For conditions of distribution and use, see copyright notice in zlib.h 4 */ 5 6 /* 7 * ALGORITHM 8 * 9 * The "deflation" process depends on being able to identify portions 10 * of the input text which are identical to earlier input (within a 11 * sliding window trailing behind the input currently being processed). 12 * 13 * The most straightforward technique turns out to be the fastest for 14 * most input files: try all possible matches and select the longest. 15 * The key feature of this algorithm is that insertions into the string 16 * dictionary are very simple and thus fast, and deletions are avoided 17 * completely. Insertions are performed at each input character, whereas 18 * string matches are performed only when the previous match ends. So it 19 * is preferable to spend more time in matches to allow very fast string 20 * insertions and avoid deletions. The matching algorithm for small 21 * strings is inspired from that of Rabin & Karp. A brute force approach 22 * is used to find longer strings when a small match has been found. 23 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze 24 * (by Leonid Broukhis). 25 * A previous version of this file used a more sophisticated algorithm 26 * (by Fiala and Greene) which is guaranteed to run in linear amortized 27 * time, but has a larger average cost, uses more memory and is patented. 28 * However the F&G algorithm may be faster for some highly redundant 29 * files if the parameter max_chain_length (described below) is too large. 30 * 31 * ACKNOWLEDGEMENTS 32 * 33 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and 34 * I found it in 'freeze' written by Leonid Broukhis. 35 * Thanks to many people for bug reports and testing. 36 * 37 * REFERENCES 38 * 39 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". 40 * Available in http://www.ietf.org/rfc/rfc1951.txt 41 * 42 * A description of the Rabin and Karp algorithm is given in the book 43 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. 44 * 45 * Fiala,E.R., and Greene,D.H. 46 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 47 * 48 */ 49 50 /* @(#) $Id$ */ 51 52 #include "deflate.h" 53 54 const char deflate_copyright[] = 55 " deflate 1.2.5 Copyright 1995-2010 Jean-loup Gailly and Mark Adler "; 56 /* 57 If you use the zlib library in a product, an acknowledgment is welcome 58 in the documentation of your product. If for some reason you cannot 59 include such an acknowledgment, I would appreciate that you keep this 60 copyright string in the executable of your product. 61 */ 62 63 /* =========================================================================== 64 * Function prototypes. 65 */ 66 typedef enum { 67 need_more, /* block not completed, need more input or more output */ 68 block_done, /* block flush performed */ 69 finish_started, /* finish started, need only more output at next deflate */ 70 finish_done /* finish done, accept no more input or output */ 71 } block_state; 72 73 typedef block_state (*compress_func) OF((deflate_state *s, int flush, 74 int clas)); 75 /* Compression function. Returns the block state after the call. */ 76 77 local void fill_window OF((deflate_state *s)); 78 local block_state deflate_stored OF((deflate_state *s, int flush, int clas)); 79 local block_state deflate_fast OF((deflate_state *s, int flush, int clas)); 80 #ifndef FASTEST 81 local block_state deflate_slow OF((deflate_state *s, int flush, int clas)); 82 #endif 83 local block_state deflate_rle OF((deflate_state *s, int flush)); 84 local block_state deflate_huff OF((deflate_state *s, int flush)); 85 local void lm_init OF((deflate_state *s)); 86 local void putShortMSB OF((deflate_state *s, uInt b)); 87 local void flush_pending OF((z_streamp strm)); 88 local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); 89 #ifdef ASMV 90 void match_init OF((void)); /* asm code initialization */ 91 uInt longest_match OF((deflate_state *s, IPos cur_match, int clas)); 92 #else 93 local uInt longest_match OF((deflate_state *s, IPos cur_match, int clas)); 94 #endif 95 96 #ifdef DEBUG 97 local void check_match OF((deflate_state *s, IPos start, IPos match, 98 int length)); 99 #endif 100 101 /* =========================================================================== 102 * Local data 103 */ 104 105 #define NIL 0 106 /* Tail of hash chains */ 107 108 #ifndef TOO_FAR 109 # define TOO_FAR 4096 110 #endif 111 /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ 112 113 /* Values for max_lazy_match, good_match and max_chain_length, depending on 114 * the desired pack level (0..9). The values given below have been tuned to 115 * exclude worst case performance for pathological files. Better values may be 116 * found for specific files. 117 */ 118 typedef struct config_s { 119 ush good_length; /* reduce lazy search above this match length */ 120 ush max_lazy; /* do not perform lazy search above this match length */ 121 ush nice_length; /* quit search above this match length */ 122 ush max_chain; 123 compress_func func; 124 } config; 125 126 #ifdef FASTEST 127 local const config configuration_table[2] = { 128 /* good lazy nice chain */ 129 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 130 /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ 131 #else 132 local const config configuration_table[10] = { 133 /* good lazy nice chain */ 134 /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ 135 /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ 136 /* 2 */ {4, 5, 16, 8, deflate_fast}, 137 /* 3 */ {4, 6, 32, 32, deflate_fast}, 138 139 /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ 140 /* 5 */ {8, 16, 32, 32, deflate_slow}, 141 /* 6 */ {8, 16, 128, 128, deflate_slow}, 142 /* 7 */ {8, 32, 128, 256, deflate_slow}, 143 /* 8 */ {32, 128, 258, 1024, deflate_slow}, 144 /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ 145 #endif 146 147 /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 148 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different 149 * meaning. 150 */ 151 152 #define EQUAL 0 153 /* result of memcmp for equal strings */ 154 155 #ifndef NO_DUMMY_DECL 156 struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ 157 #endif 158 159 /* =========================================================================== 160 * Update a hash value with the given input byte 161 * IN assertion: all calls to to UPDATE_HASH are made with consecutive 162 * input characters, so that a running hash key can be computed from the 163 * previous key instead of complete recalculation each time. 164 */ 165 #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) 166 167 168 /* =========================================================================== 169 * Insert string str in the dictionary and set match_head to the previous head 170 * of the hash chain (the most recent string with same hash key). Return 171 * the previous length of the hash chain. 172 * If this file is compiled with -DFASTEST, the compression level is forced 173 * to 1, and no hash chains are maintained. 174 * IN assertion: all calls to to INSERT_STRING are made with consecutive 175 * input characters and the first MIN_MATCH bytes of str are valid 176 * (except for the last MIN_MATCH-1 bytes of the input file). 177 */ 178 #ifdef FASTEST 179 #define INSERT_STRING(s, str, match_head) \ 180 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 181 match_head = s->head[s->ins_h], \ 182 s->head[s->ins_h] = (Pos)(str)) 183 #else 184 #define INSERT_STRING(s, str, match_head) \ 185 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ 186 match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ 187 s->head[s->ins_h] = (Pos)(str)) 188 #endif 189 190 /* =========================================================================== 191 * Initialize the hash table (avoiding 64K overflow for 16 bit systems). 192 * prev[] will be initialized on the fly. 193 */ 194 #define CLEAR_HASH(s) \ 195 s->head[s->hash_size-1] = NIL; \ 196 zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); 197 198 /* ========================================================================= */ 199 int ZEXPORT deflateInit_(strm, level, version, stream_size) 200 z_streamp strm; 201 int level; 202 const char *version; 203 int stream_size; 204 { 205 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, 206 Z_DEFAULT_STRATEGY, version, stream_size); 207 /* To do: ignore strm->next_in if we use it as window */ 208 } 209 210 /* ========================================================================= */ 211 int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, 212 version, stream_size) 213 z_streamp strm; 214 int level; 215 int method; 216 int windowBits; 217 int memLevel; 218 int strategy; 219 const char *version; 220 int stream_size; 221 { 222 deflate_state *s; 223 int wrap = 1; 224 static const char my_version[] = ZLIB_VERSION; 225 226 ushf *overlay; 227 /* We overlay pending_buf and d_buf+l_buf. This works since the average 228 * output size for (length,distance) codes is <= 24 bits. 229 */ 230 231 if (version == Z_NULL || version[0] != my_version[0] || 232 stream_size != sizeof(z_stream)) { 233 return Z_VERSION_ERROR; 234 } 235 if (strm == Z_NULL) return Z_STREAM_ERROR; 236 237 strm->msg = Z_NULL; 238 if (strm->zalloc == (alloc_func)0) { 239 strm->zalloc = zcalloc; 240 strm->opaque = (voidpf)0; 241 } 242 if (strm->zfree == (free_func)0) strm->zfree = zcfree; 243 244 #ifdef FASTEST 245 if (level != 0) level = 1; 246 #else 247 if (level == Z_DEFAULT_COMPRESSION) level = 6; 248 #endif 249 250 if (windowBits < 0) { /* suppress zlib wrapper */ 251 wrap = 0; 252 windowBits = -windowBits; 253 } 254 #ifdef GZIP 255 else if (windowBits > 15) { 256 wrap = 2; /* write gzip wrapper instead */ 257 windowBits -= 16; 258 } 259 #endif 260 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || 261 windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || 262 strategy < 0 || strategy > Z_FIXED) { 263 return Z_STREAM_ERROR; 264 } 265 if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ 266 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); 267 if (s == Z_NULL) return Z_MEM_ERROR; 268 strm->state = (struct internal_state FAR *)s; 269 s->strm = strm; 270 271 s->wrap = wrap; 272 s->gzhead = Z_NULL; 273 s->w_bits = windowBits; 274 s->w_size = 1 << s->w_bits; 275 s->w_mask = s->w_size - 1; 276 277 s->hash_bits = memLevel + 7; 278 s->hash_size = 1 << s->hash_bits; 279 s->hash_mask = s->hash_size - 1; 280 s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); 281 282 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); 283 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); 284 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); 285 s->class_bitmap = NULL; 286 zmemzero(&s->cookie_locations, sizeof(s->cookie_locations)); 287 strm->clas = 0; 288 289 s->high_water = 0; /* nothing written to s->window yet */ 290 291 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ 292 293 overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); 294 s->pending_buf = (uchf *) overlay; 295 s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); 296 297 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || 298 s->pending_buf == Z_NULL) { 299 s->status = FINISH_STATE; 300 strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); 301 deflateEnd (strm); 302 return Z_MEM_ERROR; 303 } 304 s->d_buf = overlay + s->lit_bufsize/sizeof(ush); 305 s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; 306 307 s->level = level; 308 s->strategy = strategy; 309 s->method = (Byte)method; 310 311 return deflateReset(strm); 312 } 313 314 /* ========================================================================= */ 315 int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) 316 z_streamp strm; 317 const Bytef *dictionary; 318 uInt dictLength; 319 { 320 deflate_state *s; 321 uInt length = dictLength; 322 uInt n; 323 IPos hash_head = 0; 324 325 if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || 326 strm->state->wrap == 2 || 327 (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) 328 return Z_STREAM_ERROR; 329 330 s = strm->state; 331 if (s->wrap) 332 strm->adler = adler32(strm->adler, dictionary, dictLength); 333 334 if (length < MIN_MATCH) return Z_OK; 335 if (length > s->w_size) { 336 length = s->w_size; 337 dictionary += dictLength - length; /* use the tail of the dictionary */ 338 } 339 zmemcpy(s->window, dictionary, length); 340 s->strstart = length; 341 s->block_start = (long)length; 342 343 /* Insert all strings in the hash table (except for the last two bytes). 344 * s->lookahead stays null, so s->ins_h will be recomputed at the next 345 * call of fill_window. 346 */ 347 s->ins_h = s->window[0]; 348 UPDATE_HASH(s, s->ins_h, s->window[1]); 349 for (n = 0; n <= length - MIN_MATCH; n++) { 350 INSERT_STRING(s, n, hash_head); 351 } 352 if (hash_head) hash_head = 0; /* to make compiler happy */ 353 return Z_OK; 354 } 355 356 /* ========================================================================= */ 357 int ZEXPORT deflateReset (strm) 358 z_streamp strm; 359 { 360 deflate_state *s; 361 362 if (strm == Z_NULL || strm->state == Z_NULL || 363 strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) { 364 return Z_STREAM_ERROR; 365 } 366 367 strm->total_in = strm->total_out = 0; 368 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ 369 strm->data_type = Z_UNKNOWN; 370 371 s = (deflate_state *)strm->state; 372 s->pending = 0; 373 s->pending_out = s->pending_buf; 374 TRY_FREE(strm, s->class_bitmap); 375 s->class_bitmap = NULL; 376 377 if (s->wrap < 0) { 378 s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ 379 } 380 s->status = s->wrap ? INIT_STATE : BUSY_STATE; 381 strm->adler = 382 #ifdef GZIP 383 s->wrap == 2 ? crc32(0L, Z_NULL, 0) : 384 #endif 385 adler32(0L, Z_NULL, 0); 386 s->last_flush = Z_NO_FLUSH; 387 388 _tr_init(s); 389 lm_init(s); 390 391 return Z_OK; 392 } 393 394 /* ========================================================================= */ 395 int ZEXPORT deflateSetHeader (strm, head) 396 z_streamp strm; 397 gz_headerp head; 398 { 399 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 400 if (strm->state->wrap != 2) return Z_STREAM_ERROR; 401 strm->state->gzhead = head; 402 return Z_OK; 403 } 404 405 /* ========================================================================= */ 406 int ZEXPORT deflatePrime (strm, bits, value) 407 z_streamp strm; 408 int bits; 409 int value; 410 { 411 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 412 strm->state->bi_valid = bits; 413 strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); 414 return Z_OK; 415 } 416 417 /* ========================================================================= */ 418 int ZEXPORT deflateParams(strm, level, strategy) 419 z_streamp strm; 420 int level; 421 int strategy; 422 { 423 deflate_state *s; 424 compress_func func; 425 int err = Z_OK; 426 427 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 428 s = strm->state; 429 430 #ifdef FASTEST 431 if (level != 0) level = 1; 432 #else 433 if (level == Z_DEFAULT_COMPRESSION) level = 6; 434 #endif 435 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { 436 return Z_STREAM_ERROR; 437 } 438 func = configuration_table[s->level].func; 439 440 if ((strategy != s->strategy || func != configuration_table[level].func) && 441 strm->total_in != 0) { 442 /* Flush the last buffer: */ 443 err = deflate(strm, Z_BLOCK); 444 } 445 if (s->level != level) { 446 s->level = level; 447 s->max_lazy_match = configuration_table[level].max_lazy; 448 s->good_match = configuration_table[level].good_length; 449 s->nice_match = configuration_table[level].nice_length; 450 s->max_chain_length = configuration_table[level].max_chain; 451 } 452 s->strategy = strategy; 453 return err; 454 } 455 456 /* ========================================================================= */ 457 int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) 458 z_streamp strm; 459 int good_length; 460 int max_lazy; 461 int nice_length; 462 int max_chain; 463 { 464 deflate_state *s; 465 466 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 467 s = strm->state; 468 s->good_match = good_length; 469 s->max_lazy_match = max_lazy; 470 s->nice_match = nice_length; 471 s->max_chain_length = max_chain; 472 return Z_OK; 473 } 474 475 /* ========================================================================= 476 * For the default windowBits of 15 and memLevel of 8, this function returns 477 * a close to exact, as well as small, upper bound on the compressed size. 478 * They are coded as constants here for a reason--if the #define's are 479 * changed, then this function needs to be changed as well. The return 480 * value for 15 and 8 only works for those exact settings. 481 * 482 * For any setting other than those defaults for windowBits and memLevel, 483 * the value returned is a conservative worst case for the maximum expansion 484 * resulting from using fixed blocks instead of stored blocks, which deflate 485 * can emit on compressed data for some combinations of the parameters. 486 * 487 * This function could be more sophisticated to provide closer upper bounds for 488 * every combination of windowBits and memLevel. But even the conservative 489 * upper bound of about 14% expansion does not seem onerous for output buffer 490 * allocation. 491 */ 492 uLong ZEXPORT deflateBound(strm, sourceLen) 493 z_streamp strm; 494 uLong sourceLen; 495 { 496 deflate_state *s; 497 uLong complen, wraplen; 498 Bytef *str; 499 500 /* conservative upper bound for compressed data */ 501 complen = sourceLen + 502 ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; 503 504 /* if can't get parameters, return conservative bound plus zlib wrapper */ 505 if (strm == Z_NULL || strm->state == Z_NULL) 506 return complen + 6; 507 508 /* compute wrapper length */ 509 s = strm->state; 510 switch (s->wrap) { 511 case 0: /* raw deflate */ 512 wraplen = 0; 513 break; 514 case 1: /* zlib wrapper */ 515 wraplen = 6 + (s->strstart ? 4 : 0); 516 break; 517 case 2: /* gzip wrapper */ 518 wraplen = 18; 519 if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ 520 if (s->gzhead->extra != Z_NULL) 521 wraplen += 2 + s->gzhead->extra_len; 522 str = s->gzhead->name; 523 if (str != Z_NULL) 524 do { 525 wraplen++; 526 } while (*str++); 527 str = s->gzhead->comment; 528 if (str != Z_NULL) 529 do { 530 wraplen++; 531 } while (*str++); 532 if (s->gzhead->hcrc) 533 wraplen += 2; 534 } 535 break; 536 default: /* for compiler happiness */ 537 wraplen = 6; 538 } 539 540 /* if not default parameters, return conservative bound */ 541 if (s->w_bits != 15 || s->hash_bits != 8 + 7) 542 return complen + wraplen; 543 544 /* default settings: return tight bound for that case */ 545 return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + 546 (sourceLen >> 25) + 13 - 6 + wraplen; 547 } 548 549 /* ========================================================================= 550 * Put a short in the pending buffer. The 16-bit value is put in MSB order. 551 * IN assertion: the stream state is correct and there is enough room in 552 * pending_buf. 553 */ 554 local void putShortMSB (s, b) 555 deflate_state *s; 556 uInt b; 557 { 558 put_byte(s, (Byte)(b >> 8)); 559 put_byte(s, (Byte)(b & 0xff)); 560 } 561 562 /* ========================================================================= 563 * Flush as much pending output as possible. All deflate() output goes 564 * through this function so some applications may wish to modify it 565 * to avoid allocating a large strm->next_out buffer and copying into it. 566 * (See also read_buf()). 567 */ 568 local void flush_pending(strm) 569 z_streamp strm; 570 { 571 unsigned len = strm->state->pending; 572 573 if (len > strm->avail_out) len = strm->avail_out; 574 if (len == 0) return; 575 576 zmemcpy(strm->next_out, strm->state->pending_out, len); 577 strm->next_out += len; 578 strm->state->pending_out += len; 579 strm->total_out += len; 580 strm->avail_out -= len; 581 strm->state->pending -= len; 582 if (strm->state->pending == 0) { 583 strm->state->pending_out = strm->state->pending_buf; 584 } 585 } 586 587 /* ========================================================================= */ 588 int ZEXPORT deflate (strm, flush) 589 z_streamp strm; 590 int flush; 591 { 592 int old_flush; /* value of flush param for previous deflate call */ 593 deflate_state *s; 594 595 if (strm == Z_NULL || strm->state == Z_NULL || 596 flush > Z_BLOCK || flush < 0) { 597 return Z_STREAM_ERROR; 598 } 599 s = strm->state; 600 601 if (strm->next_out == Z_NULL || 602 (strm->next_in == Z_NULL && strm->avail_in != 0) || 603 (s->status == FINISH_STATE && flush != Z_FINISH)) { 604 ERR_RETURN(strm, Z_STREAM_ERROR); 605 } 606 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); 607 608 s->strm = strm; /* just in case */ 609 old_flush = s->last_flush; 610 s->last_flush = flush; 611 612 /* Write the header */ 613 if (s->status == INIT_STATE) { 614 #ifdef GZIP 615 if (s->wrap == 2) { 616 strm->adler = crc32(0L, Z_NULL, 0); 617 put_byte(s, 31); 618 put_byte(s, 139); 619 put_byte(s, 8); 620 if (s->gzhead == Z_NULL) { 621 put_byte(s, 0); 622 put_byte(s, 0); 623 put_byte(s, 0); 624 put_byte(s, 0); 625 put_byte(s, 0); 626 put_byte(s, s->level == 9 ? 2 : 627 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 628 4 : 0)); 629 put_byte(s, OS_CODE); 630 s->status = BUSY_STATE; 631 } 632 else { 633 put_byte(s, (s->gzhead->text ? 1 : 0) + 634 (s->gzhead->hcrc ? 2 : 0) + 635 (s->gzhead->extra == Z_NULL ? 0 : 4) + 636 (s->gzhead->name == Z_NULL ? 0 : 8) + 637 (s->gzhead->comment == Z_NULL ? 0 : 16) 638 ); 639 put_byte(s, (Byte)(s->gzhead->time & 0xff)); 640 put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); 641 put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); 642 put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); 643 put_byte(s, s->level == 9 ? 2 : 644 (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 645 4 : 0)); 646 put_byte(s, s->gzhead->os & 0xff); 647 if (s->gzhead->extra != Z_NULL) { 648 put_byte(s, s->gzhead->extra_len & 0xff); 649 put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); 650 } 651 if (s->gzhead->hcrc) 652 strm->adler = crc32(strm->adler, s->pending_buf, 653 s->pending); 654 s->gzindex = 0; 655 s->status = EXTRA_STATE; 656 } 657 } 658 else 659 #endif 660 { 661 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; 662 uInt level_flags; 663 664 if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) 665 level_flags = 0; 666 else if (s->level < 6) 667 level_flags = 1; 668 else if (s->level == 6) 669 level_flags = 2; 670 else 671 level_flags = 3; 672 header |= (level_flags << 6); 673 if (s->strstart != 0) header |= PRESET_DICT; 674 header += 31 - (header % 31); 675 676 s->status = BUSY_STATE; 677 putShortMSB(s, header); 678 679 /* Save the adler32 of the preset dictionary: */ 680 if (s->strstart != 0) { 681 putShortMSB(s, (uInt)(strm->adler >> 16)); 682 putShortMSB(s, (uInt)(strm->adler & 0xffff)); 683 } 684 strm->adler = adler32(0L, Z_NULL, 0); 685 } 686 } 687 #ifdef GZIP 688 if (s->status == EXTRA_STATE) { 689 if (s->gzhead->extra != Z_NULL) { 690 uInt beg = s->pending; /* start of bytes to update crc */ 691 692 while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { 693 if (s->pending == s->pending_buf_size) { 694 if (s->gzhead->hcrc && s->pending > beg) 695 strm->adler = crc32(strm->adler, s->pending_buf + beg, 696 s->pending - beg); 697 flush_pending(strm); 698 beg = s->pending; 699 if (s->pending == s->pending_buf_size) 700 break; 701 } 702 put_byte(s, s->gzhead->extra[s->gzindex]); 703 s->gzindex++; 704 } 705 if (s->gzhead->hcrc && s->pending > beg) 706 strm->adler = crc32(strm->adler, s->pending_buf + beg, 707 s->pending - beg); 708 if (s->gzindex == s->gzhead->extra_len) { 709 s->gzindex = 0; 710 s->status = NAME_STATE; 711 } 712 } 713 else 714 s->status = NAME_STATE; 715 } 716 if (s->status == NAME_STATE) { 717 if (s->gzhead->name != Z_NULL) { 718 uInt beg = s->pending; /* start of bytes to update crc */ 719 int val; 720 721 do { 722 if (s->pending == s->pending_buf_size) { 723 if (s->gzhead->hcrc && s->pending > beg) 724 strm->adler = crc32(strm->adler, s->pending_buf + beg, 725 s->pending - beg); 726 flush_pending(strm); 727 beg = s->pending; 728 if (s->pending == s->pending_buf_size) { 729 val = 1; 730 break; 731 } 732 } 733 val = s->gzhead->name[s->gzindex++]; 734 put_byte(s, val); 735 } while (val != 0); 736 if (s->gzhead->hcrc && s->pending > beg) 737 strm->adler = crc32(strm->adler, s->pending_buf + beg, 738 s->pending - beg); 739 if (val == 0) { 740 s->gzindex = 0; 741 s->status = COMMENT_STATE; 742 } 743 } 744 else 745 s->status = COMMENT_STATE; 746 } 747 if (s->status == COMMENT_STATE) { 748 if (s->gzhead->comment != Z_NULL) { 749 uInt beg = s->pending; /* start of bytes to update crc */ 750 int val; 751 752 do { 753 if (s->pending == s->pending_buf_size) { 754 if (s->gzhead->hcrc && s->pending > beg) 755 strm->adler = crc32(strm->adler, s->pending_buf + beg, 756 s->pending - beg); 757 flush_pending(strm); 758 beg = s->pending; 759 if (s->pending == s->pending_buf_size) { 760 val = 1; 761 break; 762 } 763 } 764 val = s->gzhead->comment[s->gzindex++]; 765 put_byte(s, val); 766 } while (val != 0); 767 if (s->gzhead->hcrc && s->pending > beg) 768 strm->adler = crc32(strm->adler, s->pending_buf + beg, 769 s->pending - beg); 770 if (val == 0) 771 s->status = HCRC_STATE; 772 } 773 else 774 s->status = HCRC_STATE; 775 } 776 if (s->status == HCRC_STATE) { 777 if (s->gzhead->hcrc) { 778 if (s->pending + 2 > s->pending_buf_size) 779 flush_pending(strm); 780 if (s->pending + 2 <= s->pending_buf_size) { 781 put_byte(s, (Byte)(strm->adler & 0xff)); 782 put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 783 strm->adler = crc32(0L, Z_NULL, 0); 784 s->status = BUSY_STATE; 785 } 786 } 787 else 788 s->status = BUSY_STATE; 789 } 790 #endif 791 792 /* Flush as much pending output as possible */ 793 if (s->pending != 0) { 794 flush_pending(strm); 795 if (strm->avail_out == 0) { 796 /* Since avail_out is 0, deflate will be called again with 797 * more output space, but possibly with both pending and 798 * avail_in equal to zero. There won't be anything to do, 799 * but this is not an error situation so make sure we 800 * return OK instead of BUF_ERROR at next call of deflate: 801 */ 802 s->last_flush = -1; 803 return Z_OK; 804 } 805 806 /* Make sure there is something to do and avoid duplicate consecutive 807 * flushes. For repeated and useless calls with Z_FINISH, we keep 808 * returning Z_STREAM_END instead of Z_BUF_ERROR. 809 */ 810 } else if (strm->avail_in == 0 && flush <= old_flush && 811 flush != Z_FINISH) { 812 ERR_RETURN(strm, Z_BUF_ERROR); 813 } 814 815 /* User must not provide more input after the first FINISH: */ 816 if (s->status == FINISH_STATE && strm->avail_in != 0) { 817 ERR_RETURN(strm, Z_BUF_ERROR); 818 } 819 820 /* Start a new block or continue the current one. 821 */ 822 if (strm->avail_in != 0 || s->lookahead != 0 || 823 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { 824 block_state bstate; 825 826 if (strm->clas && s->class_bitmap == NULL) { 827 /* This is the first time that we have seen alternative class 828 * data. All data up till this point has been standard class. */ 829 s->class_bitmap = (Bytef*) ZALLOC(strm, s->w_size/4, sizeof(Byte)); 830 zmemzero(s->class_bitmap, s->w_size/4); 831 } 832 833 if (strm->clas && s->strategy == Z_RLE) { 834 /* We haven't patched deflate_rle. */ 835 ERR_RETURN(strm, Z_BUF_ERROR); 836 } 837 838 if (s->strategy == Z_HUFFMAN_ONLY) { 839 bstate = deflate_huff(s, flush); 840 } else if (s->strategy == Z_RLE) { 841 bstate = deflate_rle(s, flush); 842 } else { 843 bstate = (*(configuration_table[s->level].func)) 844 (s, flush, strm->clas); 845 } 846 847 if (bstate == finish_started || bstate == finish_done) { 848 s->status = FINISH_STATE; 849 } 850 if (bstate == need_more || bstate == finish_started) { 851 if (strm->avail_out == 0) { 852 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ 853 } 854 return Z_OK; 855 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call 856 * of deflate should use the same flush parameter to make sure 857 * that the flush is complete. So we don't have to output an 858 * empty block here, this will be done at next call. This also 859 * ensures that for a very small output buffer, we emit at most 860 * one empty block. 861 */ 862 } 863 if (bstate == block_done) { 864 if (flush == Z_PARTIAL_FLUSH) { 865 _tr_align(s); 866 } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ 867 _tr_stored_block(s, (char*)0, 0L, 0); 868 /* For a full flush, this empty block will be recognized 869 * as a special marker by inflate_sync(). 870 */ 871 if (flush == Z_FULL_FLUSH) { 872 CLEAR_HASH(s); /* forget history */ 873 if (s->lookahead == 0) { 874 s->strstart = 0; 875 s->block_start = 0L; 876 } 877 } 878 } 879 flush_pending(strm); 880 if (strm->avail_out == 0) { 881 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ 882 return Z_OK; 883 } 884 } 885 } 886 Assert(strm->avail_out > 0, "bug2"); 887 888 if (flush != Z_FINISH) return Z_OK; 889 if (s->wrap <= 0) return Z_STREAM_END; 890 891 /* Write the trailer */ 892 #ifdef GZIP 893 if (s->wrap == 2) { 894 put_byte(s, (Byte)(strm->adler & 0xff)); 895 put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); 896 put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); 897 put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); 898 put_byte(s, (Byte)(strm->total_in & 0xff)); 899 put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); 900 put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); 901 put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); 902 } 903 else 904 #endif 905 { 906 putShortMSB(s, (uInt)(strm->adler >> 16)); 907 putShortMSB(s, (uInt)(strm->adler & 0xffff)); 908 } 909 flush_pending(strm); 910 /* If avail_out is zero, the application will call deflate again 911 * to flush the rest. 912 */ 913 if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ 914 return s->pending != 0 ? Z_OK : Z_STREAM_END; 915 } 916 917 /* ========================================================================= */ 918 int ZEXPORT deflateEnd (strm) 919 z_streamp strm; 920 { 921 int status; 922 923 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; 924 925 status = strm->state->status; 926 if (status != INIT_STATE && 927 status != EXTRA_STATE && 928 status != NAME_STATE && 929 status != COMMENT_STATE && 930 status != HCRC_STATE && 931 status != BUSY_STATE && 932 status != FINISH_STATE) { 933 return Z_STREAM_ERROR; 934 } 935 936 /* Deallocate in reverse order of allocations: */ 937 TRY_FREE(strm, strm->state->pending_buf); 938 TRY_FREE(strm, strm->state->head); 939 TRY_FREE(strm, strm->state->prev); 940 TRY_FREE(strm, strm->state->window); 941 TRY_FREE(strm, strm->state->class_bitmap); 942 943 ZFREE(strm, strm->state); 944 strm->state = Z_NULL; 945 946 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; 947 } 948 949 /* ========================================================================= 950 * Copy the source state to the destination state. 951 * To simplify the source, this is not supported for 16-bit MSDOS (which 952 * doesn't have enough memory anyway to duplicate compression states). 953 */ 954 int ZEXPORT deflateCopy (dest, source) 955 z_streamp dest; 956 z_streamp source; 957 { 958 #ifdef MAXSEG_64K 959 return Z_STREAM_ERROR; 960 #else 961 deflate_state *ds; 962 deflate_state *ss; 963 ushf *overlay; 964 965 966 if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { 967 return Z_STREAM_ERROR; 968 } 969 970 ss = source->state; 971 972 zmemcpy(dest, source, sizeof(z_stream)); 973 974 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); 975 if (ds == Z_NULL) return Z_MEM_ERROR; 976 dest->state = (struct internal_state FAR *) ds; 977 zmemcpy(ds, ss, sizeof(deflate_state)); 978 ds->strm = dest; 979 980 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); 981 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); 982 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); 983 overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); 984 ds->pending_buf = (uchf *) overlay; 985 986 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || 987 ds->pending_buf == Z_NULL) { 988 deflateEnd (dest); 989 return Z_MEM_ERROR; 990 } 991 /* following zmemcpy do not work for 16-bit MSDOS */ 992 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); 993 zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); 994 zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); 995 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); 996 997 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); 998 ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); 999 ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; 1000 1001 ds->l_desc.dyn_tree = ds->dyn_ltree; 1002 ds->d_desc.dyn_tree = ds->dyn_dtree; 1003 ds->bl_desc.dyn_tree = ds->bl_tree; 1004 1005 return Z_OK; 1006 #endif /* MAXSEG_64K */ 1007 } 1008 1009 /* =========================================================================== 1010 * Read a new buffer from the current input stream, update the adler32 1011 * and total number of bytes read. All deflate() input goes through 1012 * this function so some applications may wish to modify it to avoid 1013 * allocating a large strm->next_in buffer and copying from it. 1014 * (See also flush_pending()). 1015 */ 1016 local int read_buf(strm, buf, size) 1017 z_streamp strm; 1018 Bytef *buf; 1019 unsigned size; 1020 { 1021 unsigned len = strm->avail_in; 1022 1023 if (len > size) len = size; 1024 if (len == 0) return 0; 1025 1026 strm->avail_in -= len; 1027 1028 if (strm->state->wrap == 1) { 1029 strm->adler = adler32(strm->adler, strm->next_in, len); 1030 } 1031 #ifdef GZIP 1032 else if (strm->state->wrap == 2) { 1033 strm->adler = crc32(strm->adler, strm->next_in, len); 1034 } 1035 #endif 1036 zmemcpy(buf, strm->next_in, len); 1037 strm->next_in += len; 1038 strm->total_in += len; 1039 1040 return (int)len; 1041 } 1042 1043 /* =========================================================================== 1044 * Initialize the "longest match" routines for a new zlib stream 1045 */ 1046 local void lm_init (s) 1047 deflate_state *s; 1048 { 1049 s->window_size = (ulg)2L*s->w_size; 1050 1051 CLEAR_HASH(s); 1052 1053 /* Set the default configuration parameters: 1054 */ 1055 s->max_lazy_match = configuration_table[s->level].max_lazy; 1056 s->good_match = configuration_table[s->level].good_length; 1057 s->nice_match = configuration_table[s->level].nice_length; 1058 s->max_chain_length = configuration_table[s->level].max_chain; 1059 1060 s->strstart = 0; 1061 s->block_start = 0L; 1062 s->lookahead = 0; 1063 s->match_length = s->prev_length = MIN_MATCH-1; 1064 s->match_available = 0; 1065 s->ins_h = 0; 1066 #ifndef FASTEST 1067 #ifdef ASMV 1068 match_init(); /* initialize the asm code */ 1069 #endif 1070 #endif 1071 } 1072 1073 /* class_set sets bits [offset,offset+len) in s->class_bitmap to either 1 (if 1074 * class != 0) or 0 (otherwise). */ 1075 local void class_set(s, offset, len, clas) 1076 deflate_state *s; 1077 IPos offset; 1078 uInt len; 1079 int clas; 1080 { 1081 IPos byte = offset >> 3; 1082 IPos bit = offset & 7; 1083 Bytef class_byte_value = clas ? 0xff : 0x00; 1084 Bytef class_bit_value = clas ? 1 : 0; 1085 static const Bytef mask[8] = {0xfe, 0xfd, 0xfb, 0xf7, 1086 0xef, 0xdf, 0xbf, 0x7f}; 1087 1088 if (bit) { 1089 while (len) { 1090 s->class_bitmap[byte] &= mask[bit]; 1091 s->class_bitmap[byte] |= class_bit_value << bit; 1092 bit++; 1093 len--; 1094 if (bit == 8) { 1095 bit = 0; 1096 byte++; 1097 break; 1098 } 1099 } 1100 } 1101 1102 while (len >= 8) { 1103 s->class_bitmap[byte++] = class_byte_value; 1104 len -= 8; 1105 } 1106 1107 while (len) { 1108 s->class_bitmap[byte] &= mask[bit]; 1109 s->class_bitmap[byte] |= class_bit_value << bit; 1110 bit++; 1111 len--; 1112 } 1113 } 1114 1115 local int class_at(s, window_offset) 1116 deflate_state *s; 1117 IPos window_offset; 1118 { 1119 IPos byte = window_offset >> 3; 1120 IPos bit = window_offset & 7; 1121 return (s->class_bitmap[byte] >> bit) & 1; 1122 } 1123 1124 #ifndef FASTEST 1125 /* =========================================================================== 1126 * Set match_start to the longest match starting at the given string and 1127 * return its length. Matches shorter or equal to prev_length are discarded, 1128 * in which case the result is equal to prev_length and match_start is 1129 * garbage. 1130 * IN assertions: cur_match is the head of the hash chain for the current 1131 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 1132 * OUT assertion: the match length is not greater than s->lookahead. 1133 */ 1134 #ifndef ASMV 1135 /* For 80x86 and 680x0, an optimized version will be provided in match.asm or 1136 * match.S. The code will be functionally equivalent. 1137 */ 1138 local uInt longest_match(s, cur_match, clas) 1139 deflate_state *s; 1140 IPos cur_match; /* current match */ 1141 int clas; 1142 { 1143 unsigned chain_length = s->max_chain_length;/* max hash chain length */ 1144 register Bytef *scan = s->window + s->strstart; /* current string */ 1145 register Bytef *match; /* matched string */ 1146 register int len; /* length of current match */ 1147 int best_len = s->prev_length; /* best match length so far */ 1148 int nice_match = s->nice_match; /* stop if match long enough */ 1149 IPos limit = s->strstart > (IPos)MAX_DIST(s) ? 1150 s->strstart - (IPos)MAX_DIST(s) : NIL; 1151 /* Stop when cur_match becomes <= limit. To simplify the code, 1152 * we prevent matches with the string of window index 0. 1153 */ 1154 Posf *prev = s->prev; 1155 uInt wmask = s->w_mask; 1156 1157 #ifdef UNALIGNED_OK 1158 /* Compare two bytes at a time. Note: this is not always beneficial. 1159 * Try with and without -DUNALIGNED_OK to check. 1160 */ 1161 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; 1162 register ush scan_start = *(ushf*)scan; 1163 register ush scan_end = *(ushf*)(scan+best_len-1); 1164 #else 1165 register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1166 register Byte scan_end1 = scan[best_len-1]; 1167 register Byte scan_end = scan[best_len]; 1168 #endif 1169 1170 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1171 * It is easy to get rid of this optimization if necessary. 1172 */ 1173 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1174 1175 /* Do not waste too much time if we already have a good match: */ 1176 if (s->prev_length >= s->good_match) { 1177 chain_length >>= 2; 1178 } 1179 /* Do not look for matches beyond the end of the input. This is necessary 1180 * to make deflate deterministic. 1181 */ 1182 if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; 1183 1184 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1185 1186 do { 1187 Assert(cur_match < s->strstart, "no future"); 1188 match = s->window + cur_match; 1189 /* If the matched data is in the wrong class, skip it. */ 1190 if (s->class_bitmap && class_at(s, cur_match) != clas) 1191 continue; 1192 1193 /* Skip to next match if the match length cannot increase 1194 * or if the match length is less than 2. Note that the checks below 1195 * for insufficient lookahead only occur occasionally for performance 1196 * reasons. Therefore uninitialized memory will be accessed, and 1197 * conditional jumps will be made that depend on those values. 1198 * However the length of the match is limited to the lookahead, so 1199 * the output of deflate is not affected by the uninitialized values. 1200 */ 1201 #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) 1202 /* This code assumes sizeof(unsigned short) == 2. Do not use 1203 * UNALIGNED_OK if your compiler uses a different size. 1204 */ 1205 if (*(ushf*)(match+best_len-1) != scan_end || 1206 *(ushf*)match != scan_start) continue; 1207 1208 /* It is not necessary to compare scan[2] and match[2] since they are 1209 * always equal when the other bytes match, given that the hash keys 1210 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at 1211 * strstart+3, +5, ... up to strstart+257. We check for insufficient 1212 * lookahead only every 4th comparison; the 128th check will be made 1213 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is 1214 * necessary to put more guard bytes at the end of the window, or 1215 * to check more often for insufficient lookahead. 1216 */ 1217 Assert(scan[2] == match[2], "scan[2]?"); 1218 scan++, match++; 1219 do { 1220 } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1221 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1222 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1223 *(ushf*)(scan+=2) == *(ushf*)(match+=2) && 1224 scan < strend); 1225 /* The funny "do {}" generates better code on most compilers */ 1226 1227 /* Here, scan <= window+strstart+257 */ 1228 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1229 if (*scan == *match) scan++; 1230 1231 len = (MAX_MATCH - 1) - (int)(strend-scan); 1232 scan = strend - (MAX_MATCH-1); 1233 1234 #error "UNALIGNED_OK hasn't been patched." 1235 1236 #else /* UNALIGNED_OK */ 1237 1238 if (match[best_len] != scan_end || 1239 match[best_len-1] != scan_end1 || 1240 *match != *scan || 1241 *++match != scan[1]) continue; 1242 1243 /* The check at best_len-1 can be removed because it will be made 1244 * again later. (This heuristic is not always a win.) 1245 * It is not necessary to compare scan[2] and match[2] since they 1246 * are always equal when the other bytes match, given that 1247 * the hash keys are equal and that HASH_BITS >= 8. 1248 */ 1249 scan += 2, match++; 1250 Assert(*scan == *match, "match[2]?"); 1251 1252 if (!s->class_bitmap) { 1253 /* We check for insufficient lookahead only every 8th comparison; 1254 * the 256th check will be made at strstart+258. 1255 */ 1256 do { 1257 } while (*++scan == *++match && *++scan == *++match && 1258 *++scan == *++match && *++scan == *++match && 1259 *++scan == *++match && *++scan == *++match && 1260 *++scan == *++match && *++scan == *++match && 1261 scan < strend); 1262 } else { 1263 /* We have to be mindful of the class of the data and not stray. */ 1264 do { 1265 } while (*++scan == *++match && 1266 class_at(s, match - s->window) == clas && 1267 scan < strend); 1268 } 1269 1270 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1271 1272 len = MAX_MATCH - (int)(strend - scan); 1273 scan = strend - MAX_MATCH; 1274 1275 #endif /* UNALIGNED_OK */ 1276 1277 if (len > best_len) { 1278 s->match_start = cur_match; 1279 best_len = len; 1280 if (len >= nice_match) break; 1281 #ifdef UNALIGNED_OK 1282 scan_end = *(ushf*)(scan+best_len-1); 1283 #else 1284 scan_end1 = scan[best_len-1]; 1285 scan_end = scan[best_len]; 1286 #endif 1287 } 1288 } while ((cur_match = prev[cur_match & wmask]) > limit 1289 && --chain_length != 0); 1290 1291 if ((uInt)best_len <= s->lookahead) return (uInt)best_len; 1292 return s->lookahead; 1293 } 1294 #endif /* ASMV */ 1295 1296 /* cookie_match is a replacement for longest_match in the case of cookie data. 1297 * Here we only wish to match the entire value so trying the partial matches in 1298 * longest_match is both wasteful and often fails to find the correct match. 1299 * 1300 * So we take the djb2 hash of the cookie and look up the last position for a 1301 * match in a special hash table. */ 1302 local uInt cookie_match(s, start, len) 1303 deflate_state *s; 1304 IPos start; 1305 unsigned len; 1306 { 1307 unsigned hash = 5381; 1308 Bytef *str = s->window + start; 1309 unsigned i; 1310 IPos cookie_location; 1311 1312 if (len >= MAX_MATCH || len == 0) 1313 return 0; 1314 1315 for (i = 0; i < len; i++) 1316 hash = ((hash << 5) + hash) + str[i]; 1317 1318 hash &= Z_COOKIE_HASH_MASK; 1319 cookie_location = s->cookie_locations[hash]; 1320 s->cookie_locations[hash] = start; 1321 s->match_start = 0; 1322 if (cookie_location && 1323 (start - cookie_location) > len && 1324 (start - cookie_location) < MAX_DIST(s) && 1325 len <= s->lookahead) { 1326 for (i = 0; i < len; i++) { 1327 if (s->window[start+i] != s->window[cookie_location+i] || 1328 class_at(s, cookie_location+i) != 1) { 1329 return 0; 1330 } 1331 } 1332 /* Check that we aren't matching a prefix of another cookie by ensuring 1333 * that the final byte is either a semicolon (which cannot appear in a 1334 * cookie value), or the match is followed by non-cookie data. */ 1335 if (s->window[cookie_location+len-1] != ';' && 1336 class_at(s, cookie_location+len) != 0) { 1337 return 0; 1338 } 1339 s->match_start = cookie_location; 1340 return len; 1341 } 1342 1343 return 0; 1344 } 1345 1346 1347 #else /* FASTEST */ 1348 1349 /* --------------------------------------------------------------------------- 1350 * Optimized version for FASTEST only 1351 */ 1352 local uInt longest_match(s, cur_match, clas) 1353 deflate_state *s; 1354 IPos cur_match; /* current match */ 1355 int clas; 1356 { 1357 register Bytef *scan = s->window + s->strstart; /* current string */ 1358 register Bytef *match; /* matched string */ 1359 register int len; /* length of current match */ 1360 register Bytef *strend = s->window + s->strstart + MAX_MATCH; 1361 1362 #error "This code not patched" 1363 1364 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. 1365 * It is easy to get rid of this optimization if necessary. 1366 */ 1367 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); 1368 1369 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); 1370 1371 Assert(cur_match < s->strstart, "no future"); 1372 1373 match = s->window + cur_match; 1374 1375 /* Return failure if the match length is less than 2: 1376 */ 1377 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; 1378 1379 /* The check at best_len-1 can be removed because it will be made 1380 * again later. (This heuristic is not always a win.) 1381 * It is not necessary to compare scan[2] and match[2] since they 1382 * are always equal when the other bytes match, given that 1383 * the hash keys are equal and that HASH_BITS >= 8. 1384 */ 1385 scan += 2, match += 2; 1386 Assert(*scan == *match, "match[2]?"); 1387 1388 /* We check for insufficient lookahead only every 8th comparison; 1389 * the 256th check will be made at strstart+258. 1390 */ 1391 do { 1392 } while (*++scan == *++match && *++scan == *++match && 1393 *++scan == *++match && *++scan == *++match && 1394 *++scan == *++match && *++scan == *++match && 1395 *++scan == *++match && *++scan == *++match && 1396 scan < strend); 1397 1398 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); 1399 1400 len = MAX_MATCH - (int)(strend - scan); 1401 1402 if (len < MIN_MATCH) return MIN_MATCH - 1; 1403 1404 s->match_start = cur_match; 1405 return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; 1406 } 1407 1408 #endif /* FASTEST */ 1409 1410 #ifdef DEBUG 1411 /* =========================================================================== 1412 * Check that the match at match_start is indeed a match. 1413 */ 1414 local void check_match(s, start, match, length) 1415 deflate_state *s; 1416 IPos start, match; 1417 int length; 1418 { 1419 /* check that the match is indeed a match */ 1420 if (zmemcmp(s->window + match, 1421 s->window + start, length) != EQUAL) { 1422 fprintf(stderr, " start %u, match %u, length %d\n", 1423 start, match, length); 1424 do { 1425 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); 1426 } while (--length != 0); 1427 z_error("invalid match"); 1428 } 1429 if (z_verbose > 1) { 1430 fprintf(stderr,"\\[%d,%d]", start-match, length); 1431 do { putc(s->window[start++], stderr); } while (--length != 0); 1432 } 1433 } 1434 #else 1435 # define check_match(s, start, match, length) 1436 #endif /* DEBUG */ 1437 1438 /* =========================================================================== 1439 * Fill the window when the lookahead becomes insufficient. 1440 * Updates strstart and lookahead. 1441 * 1442 * IN assertion: lookahead < MIN_LOOKAHEAD 1443 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD 1444 * At least one byte has been read, or avail_in == 0; reads are 1445 * performed for at least two bytes (required for the zip translate_eol 1446 * option -- not supported here). 1447 */ 1448 local void fill_window(s) 1449 deflate_state *s; 1450 { 1451 register unsigned n, m; 1452 register Posf *p; 1453 unsigned more; /* Amount of free space at the end of the window. */ 1454 uInt wsize = s->w_size; 1455 1456 do { 1457 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); 1458 1459 /* Deal with !@#$% 64K limit: */ 1460 if (sizeof(int) <= 2) { 1461 if (more == 0 && s->strstart == 0 && s->lookahead == 0) { 1462 more = wsize; 1463 1464 } else if (more == (unsigned)(-1)) { 1465 /* Very unlikely, but possible on 16 bit machine if 1466 * strstart == 0 && lookahead == 1 (input done a byte at time) 1467 */ 1468 more--; 1469 } 1470 } 1471 1472 /* If the window is almost full and there is insufficient lookahead, 1473 * move the upper half to the lower one to make room in the upper half. 1474 */ 1475 if (s->strstart >= wsize+MAX_DIST(s)) { 1476 1477 zmemcpy(s->window, s->window+wsize, (unsigned)wsize); 1478 s->match_start -= wsize; 1479 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ 1480 s->block_start -= (long) wsize; 1481 1482 /* Slide the hash table (could be avoided with 32 bit values 1483 at the expense of memory usage). We slide even when level == 0 1484 to keep the hash table consistent if we switch back to level > 0 1485 later. (Using level 0 permanently is not an optimal usage of 1486 zlib, so we don't care about this pathological case.) 1487 */ 1488 n = s->hash_size; 1489 p = &s->head[n]; 1490 do { 1491 m = *--p; 1492 *p = (Pos)(m >= wsize ? m-wsize : NIL); 1493 } while (--n); 1494 1495 n = wsize; 1496 #ifndef FASTEST 1497 p = &s->prev[n]; 1498 do { 1499 m = *--p; 1500 *p = (Pos)(m >= wsize ? m-wsize : NIL); 1501 /* If n is not on any hash chain, prev[n] is garbage but 1502 * its value will never be used. 1503 */ 1504 } while (--n); 1505 #endif 1506 1507 for (n = 0; n < Z_COOKIE_HASH_SIZE; n++) { 1508 if (s->cookie_locations[n] > wsize) { 1509 s->cookie_locations[n] -= wsize; 1510 } else { 1511 s->cookie_locations[n] = 0; 1512 } 1513 } 1514 1515 if (s->class_bitmap) { 1516 zmemcpy(s->class_bitmap, s->class_bitmap + s->w_size/8, 1517 s->w_size/8); 1518 zmemzero(s->class_bitmap + s->w_size/8, s->w_size/8); 1519 } 1520 1521 more += wsize; 1522 } 1523 if (s->strm->avail_in == 0) return; 1524 1525 /* If there was no sliding: 1526 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && 1527 * more == window_size - lookahead - strstart 1528 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) 1529 * => more >= window_size - 2*WSIZE + 2 1530 * In the BIG_MEM or MMAP case (not yet supported), 1531 * window_size == input_size + MIN_LOOKAHEAD && 1532 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. 1533 * Otherwise, window_size == 2*WSIZE so more >= 2. 1534 * If there was sliding, more >= WSIZE. So in all cases, more >= 2. 1535 */ 1536 Assert(more >= 2, "more < 2"); 1537 1538 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); 1539 if (s->class_bitmap != NULL) { 1540 class_set(s, s->strstart + s->lookahead, n, s->strm->clas); 1541 } 1542 s->lookahead += n; 1543 1544 /* Initialize the hash value now that we have some input: */ 1545 if (s->lookahead >= MIN_MATCH) { 1546 s->ins_h = s->window[s->strstart]; 1547 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1548 #if MIN_MATCH != 3 1549 Call UPDATE_HASH() MIN_MATCH-3 more times 1550 #endif 1551 } 1552 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, 1553 * but this is not important since only literal bytes will be emitted. 1554 */ 1555 1556 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); 1557 1558 /* If the WIN_INIT bytes after the end of the current data have never been 1559 * written, then zero those bytes in order to avoid memory check reports of 1560 * the use of uninitialized (or uninitialised as Julian writes) bytes by 1561 * the longest match routines. Update the high water mark for the next 1562 * time through here. WIN_INIT is set to MAX_MATCH since the longest match 1563 * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. 1564 */ 1565 if (s->high_water < s->window_size) { 1566 ulg curr = s->strstart + (ulg)(s->lookahead); 1567 ulg init; 1568 1569 if (s->high_water < curr) { 1570 /* Previous high water mark below current data -- zero WIN_INIT 1571 * bytes or up to end of window, whichever is less. 1572 */ 1573 init = s->window_size - curr; 1574 if (init > WIN_INIT) 1575 init = WIN_INIT; 1576 zmemzero(s->window + curr, (unsigned)init); 1577 s->high_water = curr + init; 1578 } 1579 else if (s->high_water < (ulg)curr + WIN_INIT) { 1580 /* High water mark at or above current data, but below current data 1581 * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up 1582 * to end of window, whichever is less. 1583 */ 1584 init = (ulg)curr + WIN_INIT - s->high_water; 1585 if (init > s->window_size - s->high_water) 1586 init = s->window_size - s->high_water; 1587 zmemzero(s->window + s->high_water, (unsigned)init); 1588 s->high_water += init; 1589 } 1590 } 1591 } 1592 1593 /* =========================================================================== 1594 * Flush the current block, with given end-of-file flag. 1595 * IN assertion: strstart is set to the end of the current match. 1596 */ 1597 #define FLUSH_BLOCK_ONLY(s, last) { \ 1598 _tr_flush_block(s, (s->block_start >= 0L ? \ 1599 (charf *)&s->window[(unsigned)s->block_start] : \ 1600 (charf *)Z_NULL), \ 1601 (ulg)((long)s->strstart - s->block_start), \ 1602 (last)); \ 1603 s->block_start = s->strstart; \ 1604 flush_pending(s->strm); \ 1605 Tracev((stderr,"[FLUSH]")); \ 1606 } 1607 1608 /* Same but force premature exit if necessary. */ 1609 #define FLUSH_BLOCK(s, last) { \ 1610 FLUSH_BLOCK_ONLY(s, last); \ 1611 if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ 1612 } 1613 1614 /* =========================================================================== 1615 * Copy without compression as much as possible from the input stream, return 1616 * the current block state. 1617 * This function does not insert new strings in the dictionary since 1618 * uncompressible data is probably not useful. This function is used 1619 * only for the level=0 compression option. 1620 * NOTE: this function should be optimized to avoid extra copying from 1621 * window to pending_buf. 1622 */ 1623 local block_state deflate_stored(s, flush, clas) 1624 deflate_state *s; 1625 int flush; 1626 int clas; 1627 { 1628 /* Stored blocks are limited to 0xffff bytes, pending_buf is limited 1629 * to pending_buf_size, and each stored block has a 5 byte header: 1630 */ 1631 ulg max_block_size = 0xffff; 1632 ulg max_start; 1633 1634 if (max_block_size > s->pending_buf_size - 5) { 1635 max_block_size = s->pending_buf_size - 5; 1636 } 1637 1638 /* Copy as much as possible from input to output: */ 1639 for (;;) { 1640 /* Fill the window as much as possible: */ 1641 if (s->lookahead <= 1) { 1642 1643 Assert(s->strstart < s->w_size+MAX_DIST(s) || 1644 s->block_start >= (long)s->w_size, "slide too late"); 1645 1646 fill_window(s); 1647 if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; 1648 1649 if (s->lookahead == 0) break; /* flush the current block */ 1650 } 1651 Assert(s->block_start >= 0L, "block gone"); 1652 1653 s->strstart += s->lookahead; 1654 s->lookahead = 0; 1655 1656 /* Emit a stored block if pending_buf will be full: */ 1657 max_start = s->block_start + max_block_size; 1658 if (s->strstart == 0 || (ulg)s->strstart >= max_start) { 1659 /* strstart == 0 is possible when wraparound on 16-bit machine */ 1660 s->lookahead = (uInt)(s->strstart - max_start); 1661 s->strstart = (uInt)max_start; 1662 FLUSH_BLOCK(s, 0); 1663 } 1664 /* Flush if we may have to slide, otherwise block_start may become 1665 * negative and the data will be gone: 1666 */ 1667 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { 1668 FLUSH_BLOCK(s, 0); 1669 } 1670 } 1671 FLUSH_BLOCK(s, flush == Z_FINISH); 1672 return flush == Z_FINISH ? finish_done : block_done; 1673 } 1674 1675 /* =========================================================================== 1676 * Compress as much as possible from the input stream, return the current 1677 * block state. 1678 * This function does not perform lazy evaluation of matches and inserts 1679 * new strings in the dictionary only for unmatched strings or for short 1680 * matches. It is used only for the fast compression options. 1681 */ 1682 local block_state deflate_fast(s, flush, clas) 1683 deflate_state *s; 1684 int flush; 1685 int clas; 1686 { 1687 IPos hash_head; /* head of the hash chain */ 1688 int bflush; /* set if current block must be flushed */ 1689 1690 if (clas != 0) { 1691 /* We haven't patched this code for alternative class data. */ 1692 return Z_BUF_ERROR; 1693 } 1694 1695 for (;;) { 1696 /* Make sure that we always have enough lookahead, except 1697 * at the end of the input file. We need MAX_MATCH bytes 1698 * for the next match, plus MIN_MATCH bytes to insert the 1699 * string following the next match. 1700 */ 1701 if (s->lookahead < MIN_LOOKAHEAD) { 1702 fill_window(s); 1703 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1704 return need_more; 1705 } 1706 if (s->lookahead == 0) break; /* flush the current block */ 1707 } 1708 1709 /* Insert the string window[strstart .. strstart+2] in the 1710 * dictionary, and set hash_head to the head of the hash chain: 1711 */ 1712 hash_head = NIL; 1713 if (s->lookahead >= MIN_MATCH) { 1714 INSERT_STRING(s, s->strstart, hash_head); 1715 } 1716 1717 /* Find the longest match, discarding those <= prev_length. 1718 * At this point we have always match_length < MIN_MATCH 1719 */ 1720 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { 1721 /* To simplify the code, we prevent matches with the string 1722 * of window index 0 (in particular we have to avoid a match 1723 * of the string with itself at the start of the input file). 1724 */ 1725 s->match_length = longest_match (s, hash_head, clas); 1726 /* longest_match() sets match_start */ 1727 } 1728 if (s->match_length >= MIN_MATCH) { 1729 check_match(s, s->strstart, s->match_start, s->match_length); 1730 1731 _tr_tally_dist(s, s->strstart - s->match_start, 1732 s->match_length - MIN_MATCH, bflush); 1733 1734 s->lookahead -= s->match_length; 1735 1736 /* Insert new strings in the hash table only if the match length 1737 * is not too large. This saves time but degrades compression. 1738 */ 1739 #ifndef FASTEST 1740 if (s->match_length <= s->max_insert_length && 1741 s->lookahead >= MIN_MATCH) { 1742 s->match_length--; /* string at strstart already in table */ 1743 do { 1744 s->strstart++; 1745 INSERT_STRING(s, s->strstart, hash_head); 1746 /* strstart never exceeds WSIZE-MAX_MATCH, so there are 1747 * always MIN_MATCH bytes ahead. 1748 */ 1749 } while (--s->match_length != 0); 1750 s->strstart++; 1751 } else 1752 #endif 1753 { 1754 s->strstart += s->match_length; 1755 s->match_length = 0; 1756 s->ins_h = s->window[s->strstart]; 1757 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); 1758 #if MIN_MATCH != 3 1759 Call UPDATE_HASH() MIN_MATCH-3 more times 1760 #endif 1761 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not 1762 * matter since it will be recomputed at next deflate call. 1763 */ 1764 } 1765 } else { 1766 /* No match, output a literal byte */ 1767 Tracevv((stderr,"%c", s->window[s->strstart])); 1768 _tr_tally_lit (s, s->window[s->strstart], bflush); 1769 s->lookahead--; 1770 s->strstart++; 1771 } 1772 if (bflush) FLUSH_BLOCK(s, 0); 1773 } 1774 FLUSH_BLOCK(s, flush == Z_FINISH); 1775 return flush == Z_FINISH ? finish_done : block_done; 1776 } 1777 1778 #ifndef FASTEST 1779 /* =========================================================================== 1780 * Same as above, but achieves better compression. We use a lazy 1781 * evaluation for matches: a match is finally adopted only if there is 1782 * no better match at the next window position. 1783 */ 1784 local block_state deflate_slow(s, flush, clas) 1785 deflate_state *s; 1786 int flush; 1787 int clas; 1788 { 1789 IPos hash_head; /* head of hash chain */ 1790 int bflush; /* set if current block must be flushed */ 1791 uInt input_length ; 1792 int first = 1; /* first says whether this is the first iteration 1793 of the loop, below. */ 1794 1795 if (clas == Z_CLASS_COOKIE) { 1796 if (s->lookahead) { 1797 /* Alternative class data must always be presented at the beginning 1798 * of a block. */ 1799 return Z_BUF_ERROR; 1800 } 1801 input_length = s->strm->avail_in; 1802 } 1803 1804 /* Process the input block. */ 1805 for (;;) { 1806 /* Make sure that we always have enough lookahead, except 1807 * at the end of the input file. We need MAX_MATCH bytes 1808 * for the next match, plus MIN_MATCH bytes to insert the 1809 * string following the next match. 1810 */ 1811 if (s->lookahead < MIN_LOOKAHEAD) { 1812 fill_window(s); 1813 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { 1814 return need_more; 1815 } 1816 if (s->lookahead == 0) break; /* flush the current block */ 1817 } 1818 1819 /* Insert the string window[strstart .. strstart+2] in the 1820 * dictionary, and set hash_head to the head of the hash chain: 1821 */ 1822 hash_head = NIL; 1823 if (s->lookahead >= MIN_MATCH) { 1824 INSERT_STRING(s, s->strstart, hash_head); 1825 } 1826 1827 /* Find the longest match, discarding those <= prev_length. 1828 */ 1829 s->prev_length = s->match_length, s->prev_match = s->match_start; 1830 s->match_length = MIN_MATCH-1; 1831 1832 if (clas == Z_CLASS_COOKIE && first) { 1833 s->match_length = cookie_match(s, s->strstart, input_length); 1834 } else if (clas == Z_CLASS_STANDARD && 1835 hash_head != NIL && 1836 s->prev_length < s->max_lazy_match && 1837 s->strstart - hash_head <= MAX_DIST(s)) { 1838 /* To simplify the code, we prevent matches with the string 1839 * of window index 0 (in particular we have to avoid a match 1840 * of the string with itself at the start of the input file). 1841 */ 1842 s->match_length = longest_match (s, hash_head, clas); 1843 1844 /* longest_match() sets match_start */ 1845 1846 if (s->match_length <= 5 && (s->strategy == Z_FILTERED 1847 #if TOO_FAR <= 32767 1848 || (s->match_length == MIN_MATCH && 1849 s->strstart - s->match_start > TOO_FAR) 1850 #endif 1851 )) { 1852 1853 /* If prev_match is also MIN_MATCH, match_start is garbage 1854 * but we will ignore the current match anyway. 1855 */ 1856 s->match_length = MIN_MATCH-1; 1857 } 1858 } 1859 /* If there was a match at the previous step and the current 1860 * match is not better, output the previous match: 1861 */ 1862 first = 0; 1863 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length && 1864 /* We will only accept an exact match for Z_CLASS_COOKIE data and 1865 * we won't match Z_CLASS_HUFFMAN_ONLY data at all. */ 1866 (clas == Z_CLASS_STANDARD || (clas == Z_CLASS_COOKIE && 1867 s->prev_length == input_length && 1868 s->prev_match > 0 && 1869 /* We require that a Z_CLASS_COOKIE match be 1870 * preceded by either a semicolon (which cannot be 1871 * part of a cookie), or non-cookie data. This is 1872 * to prevent a cookie from being a suffix of 1873 * another. */ 1874 (class_at(s, s->prev_match-1) == Z_CLASS_STANDARD || 1875 *(s->window + s->prev_match-1) == ';')))) { 1876 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; 1877 /* Do not insert strings in hash table beyond this. */ 1878 1879 check_match(s, s->strstart-1, s->prev_match, s->prev_length); 1880 1881 _tr_tally_dist(s, s->strstart -1 - s->prev_match, 1882 s->prev_length - MIN_MATCH, bflush); 1883 1884 /* Insert in hash table all strings up to the end of the match. 1885 * strstart-1 and strstart are already inserted. If there is not 1886 * enough lookahead, the last two strings are not inserted in 1887 * the hash table. 1888 */ 1889 s->lookahead -= s->prev_length-1; 1890 s->prev_length -= 2; 1891 do { 1892 if (++s->strstart <= max_insert) { 1893 INSERT_STRING(s, s->strstart, hash_head); 1894 } 1895 } while (--s->prev_length != 0); 1896 s->match_available = 0; 1897 s->match_length = MIN_MATCH-1; 1898 s->strstart++; 1899 1900 if (bflush) FLUSH_BLOCK(s, 0); 1901 1902 } else if (s->match_available) { 1903 /* If there was no match at the previous position, output a 1904 * single literal. If there was a match but the current match 1905 * is longer, truncate the previous match to a single literal. 1906 */ 1907 Tracevv((stderr,"%c", s->window[s->strstart-1])); 1908 _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1909 if (bflush) { 1910 FLUSH_BLOCK_ONLY(s, 0); 1911 } 1912 s->strstart++; 1913 s->lookahead--; 1914 if (s->strm->avail_out == 0) return need_more; 1915 } else { 1916 /* There is no previous match to compare with, wait for 1917 * the next step to decide. 1918 */ 1919 s->match_available = 1; 1920 s->strstart++; 1921 s->lookahead--; 1922 } 1923 } 1924 Assert (flush != Z_NO_FLUSH, "no flush?"); 1925 if (s->match_available) { 1926 Tracevv((stderr,"%c", s->window[s->strstart-1])); 1927 _tr_tally_lit(s, s->window[s->strstart-1], bflush); 1928 s->match_available = 0; 1929 } 1930 FLUSH_BLOCK(s, flush == Z_FINISH); 1931 return flush == Z_FINISH ? finish_done : block_done; 1932 } 1933 #endif /* FASTEST */ 1934 1935 /* =========================================================================== 1936 * For Z_RLE, simply look for runs of bytes, generate matches only of distance 1937 * one. Do not maintain a hash table. (It will be regenerated if this run of 1938 * deflate switches away from Z_RLE.) 1939 */ 1940 local block_state deflate_rle(s, flush) 1941 deflate_state *s; 1942 int flush; 1943 { 1944 int bflush; /* set if current block must be flushed */ 1945 uInt prev; /* byte at distance one to match */ 1946 Bytef *scan, *strend; /* scan goes up to strend for length of run */ 1947 1948 for (;;) { 1949 /* Make sure that we always have enough lookahead, except 1950 * at the end of the input file. We need MAX_MATCH bytes 1951 * for the longest encodable run. 1952 */ 1953 if (s->lookahead < MAX_MATCH) { 1954 fill_window(s); 1955 if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { 1956 return need_more; 1957 } 1958 if (s->lookahead == 0) break; /* flush the current block */ 1959 } 1960 1961 /* See how many times the previous byte repeats */ 1962 s->match_length = 0; 1963 if (s->lookahead >= MIN_MATCH && s->strstart > 0) { 1964 scan = s->window + s->strstart - 1; 1965 prev = *scan; 1966 if (prev == *++scan && prev == *++scan && prev == *++scan) { 1967 strend = s->window + s->strstart + MAX_MATCH; 1968 do { 1969 } while (prev == *++scan && prev == *++scan && 1970 prev == *++scan && prev == *++scan && 1971 prev == *++scan && prev == *++scan && 1972 prev == *++scan && prev == *++scan && 1973 scan < strend); 1974 s->match_length = MAX_MATCH - (int)(strend - scan); 1975 if (s->match_length > s->lookahead) 1976 s->match_length = s->lookahead; 1977 } 1978 } 1979 1980 /* Emit match if have run of MIN_MATCH or longer, else emit literal */ 1981 if (s->match_length >= MIN_MATCH) { 1982 check_match(s, s->strstart, s->strstart - 1, s->match_length); 1983 1984 _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); 1985 1986 s->lookahead -= s->match_length; 1987 s->strstart += s->match_length; 1988 s->match_length = 0; 1989 } else { 1990 /* No match, output a literal byte */ 1991 Tracevv((stderr,"%c", s->window[s->strstart])); 1992 _tr_tally_lit (s, s->window[s->strstart], bflush); 1993 s->lookahead--; 1994 s->strstart++; 1995 } 1996 if (bflush) FLUSH_BLOCK(s, 0); 1997 } 1998 FLUSH_BLOCK(s, flush == Z_FINISH); 1999 return flush == Z_FINISH ? finish_done : block_done; 2000 } 2001 2002 /* =========================================================================== 2003 * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. 2004 * (It will be regenerated if this run of deflate switches away from Huffman.) 2005 */ 2006 local block_state deflate_huff(s, flush) 2007 deflate_state *s; 2008 int flush; 2009 { 2010 int bflush; /* set if current block must be flushed */ 2011 2012 for (;;) { 2013 /* Make sure that we have a literal to write. */ 2014 if (s->lookahead == 0) { 2015 fill_window(s); 2016 if (s->lookahead == 0) { 2017 if (flush == Z_NO_FLUSH) 2018 return need_more; 2019 break; /* flush the current block */ 2020 } 2021 } 2022 2023 /* Output a literal byte */ 2024 s->match_length = 0; 2025 Tracevv((stderr,"%c", s->window[s->strstart])); 2026 _tr_tally_lit (s, s->window[s->strstart], bflush); 2027 s->lookahead--; 2028 s->strstart++; 2029 if (bflush) FLUSH_BLOCK(s, 0); 2030 } 2031 FLUSH_BLOCK(s, flush == Z_FINISH); 2032 return flush == Z_FINISH ? finish_done : block_done; 2033 } 2034