]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * This file is derived from various .h and .c files from the zlib-1.0.4 | |
24 | * distribution by Jean-loup Gailly and Mark Adler, with some additions | |
25 | * by Paul Mackerras to aid in implementing Deflate compression and | |
26 | * decompression for PPP packets. See zlib.h for conditions of | |
27 | * distribution and use. | |
28 | * | |
29 | * Changes that have been made include: | |
30 | * - added Z_PACKET_FLUSH (see zlib.h for details) | |
31 | * - added inflateIncomp and deflateOutputPending | |
32 | * - allow strm->next_out to be NULL, meaning discard the output | |
33 | * | |
34 | */ | |
35 | ||
36 | /* | |
37 | * ==FILEVERSION 971210== | |
38 | * | |
39 | * This marker is used by the Linux installation script to determine | |
40 | * whether an up-to-date version of this file is already installed. | |
41 | */ | |
42 | ||
43 | #define NO_DUMMY_DECL | |
44 | #define NO_ZCFUNCS | |
45 | #define MY_ZCALLOC | |
46 | ||
47 | #if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL)) | |
48 | #define inflate inflate_ppp /* FreeBSD already has an inflate :-( */ | |
49 | #endif | |
50 | ||
51 | ||
52 | /* +++ zutil.h */ | |
53 | /* zutil.h -- internal interface and configuration of the compression library | |
54 | * Copyright (C) 1995-1996 Jean-loup Gailly. | |
55 | * For conditions of distribution and use, see copyright notice in zlib.h | |
56 | */ | |
57 | ||
58 | /* WARNING: this file should *not* be used by applications. It is | |
59 | part of the implementation of the compression library and is | |
60 | subject to change. Applications should only use zlib.h. | |
61 | */ | |
62 | ||
63 | /* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */ | |
64 | ||
65 | #ifndef _Z_UTIL_H | |
66 | #define _Z_UTIL_H | |
67 | ||
68 | ||
69 | #include <net/zlib.h> | |
70 | ||
71 | ||
72 | #if defined(KERNEL) | |
73 | /* Assume this is a *BSD or SVR4 kernel */ | |
74 | //#include <sys/types.h> | |
75 | #include <sys/time.h> | |
76 | //#include <sys/systm.h> | |
77 | # define HAVE_MEMCPY | |
78 | # define memcpy(d, s, n) bcopy((s), (d), (n)) | |
79 | # define memset(d, v, n) bzero((d), (n)) | |
80 | # define memcmp bcmp | |
81 | ||
82 | #ifdef STDC | |
83 | //# include <string.h> | |
84 | //# include <stdlib.h> | |
85 | #endif | |
86 | ||
87 | #endif /* _KERNEL || KERNEL */ | |
88 | ||
89 | #ifndef local | |
90 | # define local static | |
91 | #endif | |
92 | /* compile with -Dlocal if your debugger can't find static symbols */ | |
93 | ||
94 | typedef unsigned char uch; | |
95 | typedef uch FAR uchf; | |
96 | typedef unsigned short ush; | |
97 | typedef ush FAR ushf; | |
98 | typedef unsigned long ulg; | |
99 | ||
100 | extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */ | |
101 | /* (size given to avoid silly warnings with Visual C++) */ | |
102 | ||
103 | #define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] | |
104 | ||
105 | #define ERR_RETURN(strm,err) \ | |
106 | return (strm->msg = (char*)ERR_MSG(err), (err)) | |
107 | /* To be used only when the state is known to be valid */ | |
108 | ||
109 | /* common constants */ | |
110 | ||
111 | #ifndef DEF_WBITS | |
112 | # define DEF_WBITS MAX_WBITS | |
113 | #endif | |
114 | /* default windowBits for decompression. MAX_WBITS is for compression only */ | |
115 | ||
116 | #if MAX_MEM_LEVEL >= 8 | |
117 | # define DEF_MEM_LEVEL 8 | |
118 | #else | |
119 | # define DEF_MEM_LEVEL MAX_MEM_LEVEL | |
120 | #endif | |
121 | /* default memLevel */ | |
122 | ||
123 | #define STORED_BLOCK 0 | |
124 | #define STATIC_TREES 1 | |
125 | #define DYN_TREES 2 | |
126 | /* The three kinds of block type */ | |
127 | ||
128 | #define MIN_MATCH 3 | |
129 | #define MAX_MATCH 258 | |
130 | /* The minimum and maximum match lengths */ | |
131 | ||
132 | #define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ | |
133 | ||
134 | /* target dependencies */ | |
135 | ||
136 | #ifdef MSDOS | |
137 | # define OS_CODE 0x00 | |
138 | # ifdef __TURBOC__ | |
139 | # include <alloc.h> | |
140 | # else /* MSC or DJGPP */ | |
141 | # include <malloc.h> | |
142 | # endif | |
143 | #endif | |
144 | ||
145 | #ifdef OS2 | |
146 | # define OS_CODE 0x06 | |
147 | #endif | |
148 | ||
149 | #ifdef WIN32 /* Window 95 & Windows NT */ | |
150 | # define OS_CODE 0x0b | |
151 | #endif | |
152 | ||
153 | #if defined(VAXC) || defined(VMS) | |
154 | # define OS_CODE 0x02 | |
155 | # define FOPEN(name, mode) \ | |
156 | fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") | |
157 | #endif | |
158 | ||
159 | #ifdef AMIGA | |
160 | # define OS_CODE 0x01 | |
161 | #endif | |
162 | ||
163 | #if defined(ATARI) || defined(atarist) | |
164 | # define OS_CODE 0x05 | |
165 | #endif | |
166 | ||
167 | #ifdef MACOS | |
168 | # define OS_CODE 0x07 | |
169 | #endif | |
170 | ||
171 | #ifdef __50SERIES /* Prime/PRIMOS */ | |
172 | # define OS_CODE 0x0F | |
173 | #endif | |
174 | ||
175 | #ifdef TOPS20 | |
176 | # define OS_CODE 0x0a | |
177 | #endif | |
178 | ||
179 | #if defined(_BEOS_) || defined(RISCOS) | |
180 | # define fdopen(fd,mode) NULL /* No fdopen() */ | |
181 | #endif | |
182 | ||
183 | /* Common defaults */ | |
184 | ||
185 | #ifndef OS_CODE | |
186 | # define OS_CODE 0x03 /* assume Unix */ | |
187 | #endif | |
188 | ||
189 | #ifndef FOPEN | |
190 | # define FOPEN(name, mode) fopen((name), (mode)) | |
191 | #endif | |
192 | ||
193 | /* functions */ | |
194 | ||
195 | #ifdef HAVE_STRERROR | |
196 | extern char *strerror OF((int)); | |
197 | # define zstrerror(errnum) strerror(errnum) | |
198 | #else | |
199 | # define zstrerror(errnum) "" | |
200 | #endif | |
201 | ||
202 | #if defined(pyr) | |
203 | # define NO_MEMCPY | |
204 | #endif | |
205 | #if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER) | |
206 | /* Use our own functions for small and medium model with MSC <= 5.0. | |
207 | * You may have to use the same strategy for Borland C (untested). | |
208 | */ | |
209 | # define NO_MEMCPY | |
210 | #endif | |
211 | #if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) | |
212 | # define HAVE_MEMCPY | |
213 | #endif | |
214 | #ifdef HAVE_MEMCPY | |
215 | # ifdef SMALL_MEDIUM /* MSDOS small or medium model */ | |
216 | # define zmemcpy _fmemcpy | |
217 | # define zmemcmp _fmemcmp | |
218 | # define zmemzero(dest, len) _fmemset(dest, 0, len) | |
219 | # else | |
220 | # define zmemcpy memcpy | |
221 | # define zmemcmp memcmp | |
222 | # define zmemzero(dest, len) memset(dest, 0, len) | |
223 | # endif | |
224 | #else | |
225 | extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len)); | |
226 | extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len)); | |
227 | extern void zmemzero OF((Bytef* dest, uInt len)); | |
228 | #endif | |
229 | ||
230 | /* Diagnostic functions */ | |
231 | #ifdef DEBUG_ZLIB | |
232 | # include <stdio.h> | |
233 | # ifndef verbose | |
234 | # define verbose 0 | |
235 | # endif | |
236 | extern void z_error OF((char *m)); | |
237 | # define Assert(cond,msg) {if(!(cond)) z_error(msg);} | |
238 | # define Trace(x) fprintf x | |
239 | # define Tracev(x) {if (verbose) fprintf x ;} | |
240 | # define Tracevv(x) {if (verbose>1) fprintf x ;} | |
241 | # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} | |
242 | # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} | |
243 | #else | |
244 | # define Assert(cond,msg) | |
245 | # define Trace(x) | |
246 | # define Tracev(x) | |
247 | # define Tracevv(x) | |
248 | # define Tracec(c,x) | |
249 | # define Tracecv(c,x) | |
250 | #endif | |
251 | ||
252 | ||
253 | typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len)); | |
254 | ||
255 | voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); | |
256 | void zcfree OF((voidpf opaque, voidpf ptr)); | |
257 | ||
258 | #define ZALLOC(strm, items, size) \ | |
259 | (*((strm)->zalloc))((strm)->opaque, (items), (size)) | |
260 | #define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) | |
261 | #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} | |
262 | ||
263 | #endif /* _Z_UTIL_H */ | |
264 | /* --- zutil.h */ | |
265 | ||
266 | /* +++ deflate.h */ | |
267 | /* deflate.h -- internal compression state | |
268 | * Copyright (C) 1995-1996 Jean-loup Gailly | |
269 | * For conditions of distribution and use, see copyright notice in zlib.h | |
270 | */ | |
271 | ||
272 | /* WARNING: this file should *not* be used by applications. It is | |
273 | part of the implementation of the compression library and is | |
274 | subject to change. Applications should only use zlib.h. | |
275 | */ | |
276 | ||
277 | /* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */ | |
278 | ||
279 | #ifndef _DEFLATE_H | |
280 | #define _DEFLATE_H | |
281 | ||
282 | /* #include "zutil.h" */ | |
283 | ||
284 | /* =========================================================================== | |
285 | * Internal compression state. | |
286 | */ | |
287 | ||
288 | #define LENGTH_CODES 29 | |
289 | /* number of length codes, not counting the special END_BLOCK code */ | |
290 | ||
291 | #define LITERALS 256 | |
292 | /* number of literal bytes 0..255 */ | |
293 | ||
294 | #define L_CODES (LITERALS+1+LENGTH_CODES) | |
295 | /* number of Literal or Length codes, including the END_BLOCK code */ | |
296 | ||
297 | #define D_CODES 30 | |
298 | /* number of distance codes */ | |
299 | ||
300 | #define BL_CODES 19 | |
301 | /* number of codes used to transfer the bit lengths */ | |
302 | ||
303 | #define HEAP_SIZE (2*L_CODES+1) | |
304 | /* maximum heap size */ | |
305 | ||
306 | #define MAX_BITS 15 | |
307 | /* All codes must not exceed MAX_BITS bits */ | |
308 | ||
309 | #define INIT_STATE 42 | |
310 | #define BUSY_STATE 113 | |
311 | #define FINISH_STATE 666 | |
312 | /* Stream status */ | |
313 | ||
314 | ||
315 | /* Data structure describing a single value and its code string. */ | |
316 | typedef struct ct_data_s { | |
317 | union { | |
318 | ush freq; /* frequency count */ | |
319 | ush code; /* bit string */ | |
320 | } fc; | |
321 | union { | |
322 | ush dad; /* father node in Huffman tree */ | |
323 | ush len; /* length of bit string */ | |
324 | } dl; | |
325 | } FAR ct_data; | |
326 | ||
327 | #define Freq fc.freq | |
328 | #define Code fc.code | |
329 | #define Dad dl.dad | |
330 | #define Len dl.len | |
331 | ||
332 | typedef struct static_tree_desc_s static_tree_desc; | |
333 | ||
334 | typedef struct tree_desc_s { | |
335 | ct_data *dyn_tree; /* the dynamic tree */ | |
336 | int max_code; /* largest code with non zero frequency */ | |
337 | static_tree_desc *stat_desc; /* the corresponding static tree */ | |
338 | } FAR tree_desc; | |
339 | ||
340 | typedef ush Pos; | |
341 | typedef Pos FAR Posf; | |
342 | typedef unsigned IPos; | |
343 | ||
344 | /* A Pos is an index in the character window. We use short instead of int to | |
345 | * save space in the various tables. IPos is used only for parameter passing. | |
346 | */ | |
347 | ||
348 | typedef struct deflate_state { | |
349 | z_streamp strm; /* pointer back to this zlib stream */ | |
350 | int status; /* as the name implies */ | |
351 | Bytef *pending_buf; /* output still pending */ | |
352 | ulg pending_buf_size; /* size of pending_buf */ | |
353 | Bytef *pending_out; /* next pending byte to output to the stream */ | |
354 | int pending; /* nb of bytes in the pending buffer */ | |
355 | int noheader; /* suppress zlib header and adler32 */ | |
356 | Byte data_type; /* UNKNOWN, BINARY or ASCII */ | |
357 | Byte method; /* STORED (for zip only) or DEFLATED */ | |
358 | int last_flush; /* value of flush param for previous deflate call */ | |
359 | ||
360 | /* used by deflate.c: */ | |
361 | ||
362 | uInt w_size; /* LZ77 window size (32K by default) */ | |
363 | uInt w_bits; /* log2(w_size) (8..16) */ | |
364 | uInt w_mask; /* w_size - 1 */ | |
365 | ||
366 | Bytef *window; | |
367 | /* Sliding window. Input bytes are read into the second half of the window, | |
368 | * and move to the first half later to keep a dictionary of at least wSize | |
369 | * bytes. With this organization, matches are limited to a distance of | |
370 | * wSize-MAX_MATCH bytes, but this ensures that IO is always | |
371 | * performed with a length multiple of the block size. Also, it limits | |
372 | * the window size to 64K, which is quite useful on MSDOS. | |
373 | * To do: use the user input buffer as sliding window. | |
374 | */ | |
375 | ||
376 | ulg window_size; | |
377 | /* Actual size of window: 2*wSize, except when the user input buffer | |
378 | * is directly used as sliding window. | |
379 | */ | |
380 | ||
381 | Posf *prev; | |
382 | /* Link to older string with same hash index. To limit the size of this | |
383 | * array to 64K, this link is maintained only for the last 32K strings. | |
384 | * An index in this array is thus a window index modulo 32K. | |
385 | */ | |
386 | ||
387 | Posf *head; /* Heads of the hash chains or NIL. */ | |
388 | ||
389 | uInt ins_h; /* hash index of string to be inserted */ | |
390 | uInt hash_size; /* number of elements in hash table */ | |
391 | uInt hash_bits; /* log2(hash_size) */ | |
392 | uInt hash_mask; /* hash_size-1 */ | |
393 | ||
394 | uInt hash_shift; | |
395 | /* Number of bits by which ins_h must be shifted at each input | |
396 | * step. It must be such that after MIN_MATCH steps, the oldest | |
397 | * byte no longer takes part in the hash key, that is: | |
398 | * hash_shift * MIN_MATCH >= hash_bits | |
399 | */ | |
400 | ||
401 | long block_start; | |
402 | /* Window position at the beginning of the current output block. Gets | |
403 | * negative when the window is moved backwards. | |
404 | */ | |
405 | ||
406 | uInt match_length; /* length of best match */ | |
407 | IPos prev_match; /* previous match */ | |
408 | int match_available; /* set if previous match exists */ | |
409 | uInt strstart; /* start of string to insert */ | |
410 | uInt match_start; /* start of matching string */ | |
411 | uInt lookahead; /* number of valid bytes ahead in window */ | |
412 | ||
413 | uInt prev_length; | |
414 | /* Length of the best match at previous step. Matches not greater than this | |
415 | * are discarded. This is used in the lazy match evaluation. | |
416 | */ | |
417 | ||
418 | uInt max_chain_length; | |
419 | /* To speed up deflation, hash chains are never searched beyond this | |
420 | * length. A higher limit improves compression ratio but degrades the | |
421 | * speed. | |
422 | */ | |
423 | ||
424 | uInt max_lazy_match; | |
425 | /* Attempt to find a better match only when the current match is strictly | |
426 | * smaller than this value. This mechanism is used only for compression | |
427 | * levels >= 4. | |
428 | */ | |
429 | # define max_insert_length max_lazy_match | |
430 | /* Insert new strings in the hash table only if the match length is not | |
431 | * greater than this length. This saves time but degrades compression. | |
432 | * max_insert_length is used only for compression levels <= 3. | |
433 | */ | |
434 | ||
435 | int level; /* compression level (1..9) */ | |
436 | int strategy; /* favor or force Huffman coding*/ | |
437 | ||
438 | uInt good_match; | |
439 | /* Use a faster search when the previous match is longer than this */ | |
440 | ||
441 | int nice_match; /* Stop searching when current match exceeds this */ | |
442 | ||
443 | /* used by trees.c: */ | |
444 | /* Didn't use ct_data typedef below to supress compiler warning */ | |
445 | struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ | |
446 | struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ | |
447 | struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ | |
448 | ||
449 | struct tree_desc_s l_desc; /* desc. for literal tree */ | |
450 | struct tree_desc_s d_desc; /* desc. for distance tree */ | |
451 | struct tree_desc_s bl_desc; /* desc. for bit length tree */ | |
452 | ||
453 | ush bl_count[MAX_BITS+1]; | |
454 | /* number of codes at each bit length for an optimal tree */ | |
455 | ||
456 | int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ | |
457 | int heap_len; /* number of elements in the heap */ | |
458 | int heap_max; /* element of largest frequency */ | |
459 | /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. | |
460 | * The same heap array is used to build all trees. | |
461 | */ | |
462 | ||
463 | uch depth[2*L_CODES+1]; | |
464 | /* Depth of each subtree used as tie breaker for trees of equal frequency | |
465 | */ | |
466 | ||
467 | uchf *l_buf; /* buffer for literals or lengths */ | |
468 | ||
469 | uInt lit_bufsize; | |
470 | /* Size of match buffer for literals/lengths. There are 4 reasons for | |
471 | * limiting lit_bufsize to 64K: | |
472 | * - frequencies can be kept in 16 bit counters | |
473 | * - if compression is not successful for the first block, all input | |
474 | * data is still in the window so we can still emit a stored block even | |
475 | * when input comes from standard input. (This can also be done for | |
476 | * all blocks if lit_bufsize is not greater than 32K.) | |
477 | * - if compression is not successful for a file smaller than 64K, we can | |
478 | * even emit a stored file instead of a stored block (saving 5 bytes). | |
479 | * This is applicable only for zip (not gzip or zlib). | |
480 | * - creating new Huffman trees less frequently may not provide fast | |
481 | * adaptation to changes in the input data statistics. (Take for | |
482 | * example a binary file with poorly compressible code followed by | |
483 | * a highly compressible string table.) Smaller buffer sizes give | |
484 | * fast adaptation but have of course the overhead of transmitting | |
485 | * trees more frequently. | |
486 | * - I can't count above 4 | |
487 | */ | |
488 | ||
489 | uInt last_lit; /* running index in l_buf */ | |
490 | ||
491 | ushf *d_buf; | |
492 | /* Buffer for distances. To simplify the code, d_buf and l_buf have | |
493 | * the same number of elements. To use different lengths, an extra flag | |
494 | * array would be necessary. | |
495 | */ | |
496 | ||
497 | ulg opt_len; /* bit length of current block with optimal trees */ | |
498 | ulg static_len; /* bit length of current block with static trees */ | |
499 | ulg compressed_len; /* total bit length of compressed file */ | |
500 | uInt matches; /* number of string matches in current block */ | |
501 | int last_eob_len; /* bit length of EOB code for last block */ | |
502 | ||
503 | #ifdef DEBUG_ZLIB | |
504 | ulg bits_sent; /* bit length of the compressed data */ | |
505 | #endif | |
506 | ||
507 | ush bi_buf; | |
508 | /* Output buffer. bits are inserted starting at the bottom (least | |
509 | * significant bits). | |
510 | */ | |
511 | int bi_valid; | |
512 | /* Number of valid bits in bi_buf. All bits above the last valid bit | |
513 | * are always zero. | |
514 | */ | |
515 | ||
516 | } FAR deflate_state; | |
517 | ||
518 | /* Output a byte on the stream. | |
519 | * IN assertion: there is enough room in pending_buf. | |
520 | */ | |
521 | #define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} | |
522 | ||
523 | ||
524 | #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) | |
525 | /* Minimum amount of lookahead, except at the end of the input file. | |
526 | * See deflate.c for comments about the MIN_MATCH+1. | |
527 | */ | |
528 | ||
529 | #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) | |
530 | /* In order to simplify the code, particularly on 16 bit machines, match | |
531 | * distances are limited to MAX_DIST instead of WSIZE. | |
532 | */ | |
533 | ||
534 | /* in trees.c */ | |
535 | void _tr_init OF((deflate_state *s)); | |
536 | int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); | |
537 | ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, | |
538 | int eof)); | |
539 | void _tr_align OF((deflate_state *s)); | |
540 | void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, | |
541 | int eof)); | |
542 | void _tr_stored_type_only OF((deflate_state *)); | |
543 | ||
544 | #endif | |
545 | /* --- deflate.h */ | |
546 | ||
547 | /* +++ deflate.c */ | |
548 | /* deflate.c -- compress data using the deflation algorithm | |
549 | * Copyright (C) 1995-1996 Jean-loup Gailly. | |
550 | * For conditions of distribution and use, see copyright notice in zlib.h | |
551 | */ | |
552 | ||
553 | /* | |
554 | * ALGORITHM | |
555 | * | |
556 | * The "deflation" process depends on being able to identify portions | |
557 | * of the input text which are identical to earlier input (within a | |
558 | * sliding window trailing behind the input currently being processed). | |
559 | * | |
560 | * The most straightforward technique turns out to be the fastest for | |
561 | * most input files: try all possible matches and select the longest. | |
562 | * The key feature of this algorithm is that insertions into the string | |
563 | * dictionary are very simple and thus fast, and deletions are avoided | |
564 | * completely. Insertions are performed at each input character, whereas | |
565 | * string matches are performed only when the previous match ends. So it | |
566 | * is preferable to spend more time in matches to allow very fast string | |
567 | * insertions and avoid deletions. The matching algorithm for small | |
568 | * strings is inspired from that of Rabin & Karp. A brute force approach | |
569 | * is used to find longer strings when a small match has been found. | |
570 | * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze | |
571 | * (by Leonid Broukhis). | |
572 | * A previous version of this file used a more sophisticated algorithm | |
573 | * (by Fiala and Greene) which is guaranteed to run in linear amortized | |
574 | * time, but has a larger average cost, uses more memory and is patented. | |
575 | * However the F&G algorithm may be faster for some highly redundant | |
576 | * files if the parameter max_chain_length (described below) is too large. | |
577 | * | |
578 | * ACKNOWLEDGEMENTS | |
579 | * | |
580 | * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and | |
581 | * I found it in 'freeze' written by Leonid Broukhis. | |
582 | * Thanks to many people for bug reports and testing. | |
583 | * | |
584 | * REFERENCES | |
585 | * | |
586 | * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". | |
587 | * Available in ftp://ds.internic.net/rfc/rfc1951.txt | |
588 | * | |
589 | * A description of the Rabin and Karp algorithm is given in the book | |
590 | * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. | |
591 | * | |
592 | * Fiala,E.R., and Greene,D.H. | |
593 | * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 | |
594 | * | |
595 | */ | |
596 | ||
597 | /* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */ | |
598 | ||
599 | /* #include "deflate.h" */ | |
600 | ||
601 | char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly "; | |
602 | /* | |
603 | If you use the zlib library in a product, an acknowledgment is welcome | |
604 | in the documentation of your product. If for some reason you cannot | |
605 | include such an acknowledgment, I would appreciate that you keep this | |
606 | copyright string in the executable of your product. | |
607 | */ | |
608 | ||
609 | /* =========================================================================== | |
610 | * Function prototypes. | |
611 | */ | |
612 | typedef enum { | |
613 | need_more, /* block not completed, need more input or more output */ | |
614 | block_done, /* block flush performed */ | |
615 | finish_started, /* finish started, need only more output at next deflate */ | |
616 | finish_done /* finish done, accept no more input or output */ | |
617 | } block_state; | |
618 | ||
619 | typedef block_state (*compress_func) OF((deflate_state *s, int flush)); | |
620 | /* Compression function. Returns the block state after the call. */ | |
621 | ||
622 | local void fill_window OF((deflate_state *s)); | |
623 | local block_state deflate_stored OF((deflate_state *s, int flush)); | |
624 | local block_state deflate_fast OF((deflate_state *s, int flush)); | |
625 | local block_state deflate_slow OF((deflate_state *s, int flush)); | |
626 | local void lm_init OF((deflate_state *s)); | |
627 | local void putShortMSB OF((deflate_state *s, uInt b)); | |
628 | local void flush_pending OF((z_streamp strm)); | |
629 | local int read_buf OF((z_streamp strm, charf *buf, unsigned size)); | |
630 | #ifdef ASMV | |
631 | void match_init OF((void)); /* asm code initialization */ | |
632 | uInt longest_match OF((deflate_state *s, IPos cur_match)); | |
633 | #else | |
634 | local uInt longest_match OF((deflate_state *s, IPos cur_match)); | |
635 | #endif | |
636 | ||
637 | #ifdef DEBUG_ZLIB | |
638 | local void check_match OF((deflate_state *s, IPos start, IPos match, | |
639 | int length)); | |
640 | #endif | |
641 | ||
642 | /* =========================================================================== | |
643 | * Local data | |
644 | */ | |
645 | ||
646 | #define NIL 0 | |
647 | /* Tail of hash chains */ | |
648 | ||
649 | #ifndef TOO_FAR | |
650 | # define TOO_FAR 4096 | |
651 | #endif | |
652 | /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ | |
653 | ||
654 | #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) | |
655 | /* Minimum amount of lookahead, except at the end of the input file. | |
656 | * See deflate.c for comments about the MIN_MATCH+1. | |
657 | */ | |
658 | ||
659 | /* Values for max_lazy_match, good_match and max_chain_length, depending on | |
660 | * the desired pack level (0..9). The values given below have been tuned to | |
661 | * exclude worst case performance for pathological files. Better values may be | |
662 | * found for specific files. | |
663 | */ | |
664 | typedef struct config_s { | |
665 | ush good_length; /* reduce lazy search above this match length */ | |
666 | ush max_lazy; /* do not perform lazy search above this match length */ | |
667 | ush nice_length; /* quit search above this match length */ | |
668 | ush max_chain; | |
669 | compress_func func; | |
670 | } config; | |
671 | ||
672 | local config configuration_table[10] = { | |
673 | /* good lazy nice chain */ | |
674 | /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ | |
675 | /* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */ | |
676 | /* 2 */ {4, 5, 16, 8, deflate_fast}, | |
677 | /* 3 */ {4, 6, 32, 32, deflate_fast}, | |
678 | ||
679 | /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ | |
680 | /* 5 */ {8, 16, 32, 32, deflate_slow}, | |
681 | /* 6 */ {8, 16, 128, 128, deflate_slow}, | |
682 | /* 7 */ {8, 32, 128, 256, deflate_slow}, | |
683 | /* 8 */ {32, 128, 258, 1024, deflate_slow}, | |
684 | /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */ | |
685 | ||
686 | /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 | |
687 | * For deflate_fast() (levels <= 3) good is ignored and lazy has a different | |
688 | * meaning. | |
689 | */ | |
690 | ||
691 | #define EQUAL 0 | |
692 | /* result of memcmp for equal strings */ | |
693 | ||
694 | #ifndef NO_DUMMY_DECL | |
695 | struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ | |
696 | #endif | |
697 | ||
698 | /* =========================================================================== | |
699 | * Update a hash value with the given input byte | |
700 | * IN assertion: all calls to to UPDATE_HASH are made with consecutive | |
701 | * input characters, so that a running hash key can be computed from the | |
702 | * previous key instead of complete recalculation each time. | |
703 | */ | |
704 | #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) | |
705 | ||
706 | ||
707 | /* =========================================================================== | |
708 | * Insert string str in the dictionary and set match_head to the previous head | |
709 | * of the hash chain (the most recent string with same hash key). Return | |
710 | * the previous length of the hash chain. | |
711 | * IN assertion: all calls to to INSERT_STRING are made with consecutive | |
712 | * input characters and the first MIN_MATCH bytes of str are valid | |
713 | * (except for the last MIN_MATCH-1 bytes of the input file). | |
714 | */ | |
715 | #define INSERT_STRING(s, str, match_head) \ | |
716 | (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ | |
717 | s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \ | |
718 | s->head[s->ins_h] = (Pos)(str)) | |
719 | ||
720 | /* =========================================================================== | |
721 | * Initialize the hash table (avoiding 64K overflow for 16 bit systems). | |
722 | * prev[] will be initialized on the fly. | |
723 | */ | |
724 | #define CLEAR_HASH(s) \ | |
725 | s->head[s->hash_size-1] = NIL; \ | |
726 | zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); | |
727 | ||
728 | /* ========================================================================= */ | |
729 | int deflateInit_(strm, level, version, stream_size) | |
730 | z_streamp strm; | |
731 | int level; | |
732 | const char *version; | |
733 | int stream_size; | |
734 | { | |
735 | return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, | |
736 | Z_DEFAULT_STRATEGY, version, stream_size); | |
737 | /* To do: ignore strm->next_in if we use it as window */ | |
738 | } | |
739 | ||
740 | /* ========================================================================= */ | |
741 | int deflateInit2_(strm, level, method, windowBits, memLevel, strategy, | |
742 | version, stream_size) | |
743 | z_streamp strm; | |
744 | int level; | |
745 | int method; | |
746 | int windowBits; | |
747 | int memLevel; | |
748 | int strategy; | |
749 | const char *version; | |
750 | int stream_size; | |
751 | { | |
752 | deflate_state *s; | |
753 | int noheader = 0; | |
754 | static char* my_version = ZLIB_VERSION; | |
755 | ||
756 | ushf *overlay; | |
757 | /* We overlay pending_buf and d_buf+l_buf. This works since the average | |
758 | * output size for (length,distance) codes is <= 24 bits. | |
759 | */ | |
760 | ||
761 | if (version == Z_NULL || version[0] != my_version[0] || | |
762 | stream_size != sizeof(z_stream)) { | |
763 | return Z_VERSION_ERROR; | |
764 | } | |
765 | if (strm == Z_NULL) return Z_STREAM_ERROR; | |
766 | ||
767 | strm->msg = Z_NULL; | |
768 | #ifndef NO_ZCFUNCS | |
769 | if (strm->zalloc == Z_NULL) { | |
770 | strm->zalloc = zcalloc; | |
771 | strm->opaque = (voidpf)0; | |
772 | } | |
773 | if (strm->zfree == Z_NULL) strm->zfree = zcfree; | |
774 | #endif | |
775 | ||
776 | if (level == Z_DEFAULT_COMPRESSION) level = 6; | |
777 | ||
778 | if (windowBits < 0) { /* undocumented feature: suppress zlib header */ | |
779 | noheader = 1; | |
780 | windowBits = -windowBits; | |
781 | } | |
782 | if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || | |
783 | windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || | |
784 | strategy < 0 || strategy > Z_HUFFMAN_ONLY) { | |
785 | return Z_STREAM_ERROR; | |
786 | } | |
787 | s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); | |
788 | if (s == Z_NULL) return Z_MEM_ERROR; | |
789 | strm->state = (struct internal_state FAR *)s; | |
790 | s->strm = strm; | |
791 | ||
792 | s->noheader = noheader; | |
793 | s->w_bits = windowBits; | |
794 | s->w_size = 1 << s->w_bits; | |
795 | s->w_mask = s->w_size - 1; | |
796 | ||
797 | s->hash_bits = memLevel + 7; | |
798 | s->hash_size = 1 << s->hash_bits; | |
799 | s->hash_mask = s->hash_size - 1; | |
800 | s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); | |
801 | ||
802 | s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); | |
803 | s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); | |
804 | s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); | |
805 | ||
806 | s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ | |
807 | ||
808 | overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); | |
809 | s->pending_buf = (uchf *) overlay; | |
810 | s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); | |
811 | ||
812 | if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || | |
813 | s->pending_buf == Z_NULL) { | |
814 | strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); | |
815 | deflateEnd (strm); | |
816 | return Z_MEM_ERROR; | |
817 | } | |
818 | s->d_buf = overlay + s->lit_bufsize/sizeof(ush); | |
819 | s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; | |
820 | ||
821 | s->level = level; | |
822 | s->strategy = strategy; | |
823 | s->method = (Byte)method; | |
824 | ||
825 | return deflateReset(strm); | |
826 | } | |
827 | ||
828 | /* ========================================================================= */ | |
829 | int deflateSetDictionary (strm, dictionary, dictLength) | |
830 | z_streamp strm; | |
831 | const Bytef *dictionary; | |
832 | uInt dictLength; | |
833 | { | |
834 | deflate_state *s; | |
835 | uInt length = dictLength; | |
836 | uInt n; | |
837 | IPos hash_head = 0; | |
838 | ||
839 | if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) | |
840 | return Z_STREAM_ERROR; | |
841 | ||
842 | s = (deflate_state *) strm->state; | |
843 | if (s->status != INIT_STATE) return Z_STREAM_ERROR; | |
844 | ||
845 | strm->adler = adler32(strm->adler, dictionary, dictLength); | |
846 | ||
847 | if (length < MIN_MATCH) return Z_OK; | |
848 | if (length > MAX_DIST(s)) { | |
849 | length = MAX_DIST(s); | |
850 | #ifndef USE_DICT_HEAD | |
851 | dictionary += dictLength - length; /* use the tail of the dictionary */ | |
852 | #endif | |
853 | } | |
854 | zmemcpy((charf *)s->window, dictionary, length); | |
855 | s->strstart = length; | |
856 | s->block_start = (long)length; | |
857 | ||
858 | /* Insert all strings in the hash table (except for the last two bytes). | |
859 | * s->lookahead stays null, so s->ins_h will be recomputed at the next | |
860 | * call of fill_window. | |
861 | */ | |
862 | s->ins_h = s->window[0]; | |
863 | UPDATE_HASH(s, s->ins_h, s->window[1]); | |
864 | for (n = 0; n <= length - MIN_MATCH; n++) { | |
865 | INSERT_STRING(s, n, hash_head); | |
866 | } | |
867 | if (hash_head) hash_head = 0; /* to make compiler happy */ | |
868 | return Z_OK; | |
869 | } | |
870 | ||
871 | /* ========================================================================= */ | |
872 | int deflateReset (strm) | |
873 | z_streamp strm; | |
874 | { | |
875 | deflate_state *s; | |
876 | ||
877 | if (strm == Z_NULL || strm->state == Z_NULL || | |
878 | strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR; | |
879 | ||
880 | strm->total_in = strm->total_out = 0; | |
881 | strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ | |
882 | strm->data_type = Z_UNKNOWN; | |
883 | ||
884 | s = (deflate_state *)strm->state; | |
885 | s->pending = 0; | |
886 | s->pending_out = s->pending_buf; | |
887 | ||
888 | if (s->noheader < 0) { | |
889 | s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */ | |
890 | } | |
891 | s->status = s->noheader ? BUSY_STATE : INIT_STATE; | |
892 | strm->adler = 1; | |
893 | s->last_flush = Z_NO_FLUSH; | |
894 | ||
895 | _tr_init(s); | |
896 | lm_init(s); | |
897 | ||
898 | return Z_OK; | |
899 | } | |
900 | ||
901 | /* ========================================================================= */ | |
902 | int deflateParams(strm, level, strategy) | |
903 | z_streamp strm; | |
904 | int level; | |
905 | int strategy; | |
906 | { | |
907 | deflate_state *s; | |
908 | compress_func func; | |
909 | int err = Z_OK; | |
910 | ||
911 | if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; | |
912 | s = (deflate_state *) strm->state; | |
913 | ||
914 | if (level == Z_DEFAULT_COMPRESSION) { | |
915 | level = 6; | |
916 | } | |
917 | if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { | |
918 | return Z_STREAM_ERROR; | |
919 | } | |
920 | func = configuration_table[s->level].func; | |
921 | ||
922 | if (func != configuration_table[level].func && strm->total_in != 0) { | |
923 | /* Flush the last buffer: */ | |
924 | err = deflate(strm, Z_PARTIAL_FLUSH); | |
925 | } | |
926 | if (s->level != level) { | |
927 | s->level = level; | |
928 | s->max_lazy_match = configuration_table[level].max_lazy; | |
929 | s->good_match = configuration_table[level].good_length; | |
930 | s->nice_match = configuration_table[level].nice_length; | |
931 | s->max_chain_length = configuration_table[level].max_chain; | |
932 | } | |
933 | s->strategy = strategy; | |
934 | return err; | |
935 | } | |
936 | ||
937 | /* ========================================================================= | |
938 | * Put a short in the pending buffer. The 16-bit value is put in MSB order. | |
939 | * IN assertion: the stream state is correct and there is enough room in | |
940 | * pending_buf. | |
941 | */ | |
942 | local void putShortMSB (s, b) | |
943 | deflate_state *s; | |
944 | uInt b; | |
945 | { | |
946 | put_byte(s, (Byte)(b >> 8)); | |
947 | put_byte(s, (Byte)(b & 0xff)); | |
948 | } | |
949 | ||
950 | /* ========================================================================= | |
951 | * Flush as much pending output as possible. All deflate() output goes | |
952 | * through this function so some applications may wish to modify it | |
953 | * to avoid allocating a large strm->next_out buffer and copying into it. | |
954 | * (See also read_buf()). | |
955 | */ | |
956 | local void flush_pending(strm) | |
957 | z_streamp strm; | |
958 | { | |
959 | deflate_state *s = (deflate_state *) strm->state; | |
960 | unsigned len = s->pending; | |
961 | ||
962 | if (len > strm->avail_out) len = strm->avail_out; | |
963 | if (len == 0) return; | |
964 | ||
965 | if (strm->next_out != Z_NULL) { | |
966 | zmemcpy(strm->next_out, s->pending_out, len); | |
967 | strm->next_out += len; | |
968 | } | |
969 | s->pending_out += len; | |
970 | strm->total_out += len; | |
971 | strm->avail_out -= len; | |
972 | s->pending -= len; | |
973 | if (s->pending == 0) { | |
974 | s->pending_out = s->pending_buf; | |
975 | } | |
976 | } | |
977 | ||
978 | /* ========================================================================= */ | |
979 | int deflate (strm, flush) | |
980 | z_streamp strm; | |
981 | int flush; | |
982 | { | |
983 | int old_flush; /* value of flush param for previous deflate call */ | |
984 | deflate_state *s; | |
985 | ||
986 | if (strm == Z_NULL || strm->state == Z_NULL || | |
987 | flush > Z_FINISH || flush < 0) { | |
988 | return Z_STREAM_ERROR; | |
989 | } | |
990 | s = (deflate_state *) strm->state; | |
991 | ||
992 | if ((strm->next_in == Z_NULL && strm->avail_in != 0) || | |
993 | (s->status == FINISH_STATE && flush != Z_FINISH)) { | |
994 | ERR_RETURN(strm, Z_STREAM_ERROR); | |
995 | } | |
996 | if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); | |
997 | ||
998 | s->strm = strm; /* just in case */ | |
999 | old_flush = s->last_flush; | |
1000 | s->last_flush = flush; | |
1001 | ||
1002 | /* Write the zlib header */ | |
1003 | if (s->status == INIT_STATE) { | |
1004 | ||
1005 | uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; | |
1006 | uInt level_flags = (s->level-1) >> 1; | |
1007 | ||
1008 | if (level_flags > 3) level_flags = 3; | |
1009 | header |= (level_flags << 6); | |
1010 | if (s->strstart != 0) header |= PRESET_DICT; | |
1011 | header += 31 - (header % 31); | |
1012 | ||
1013 | s->status = BUSY_STATE; | |
1014 | putShortMSB(s, header); | |
1015 | ||
1016 | /* Save the adler32 of the preset dictionary: */ | |
1017 | if (s->strstart != 0) { | |
1018 | putShortMSB(s, (uInt)(strm->adler >> 16)); | |
1019 | putShortMSB(s, (uInt)(strm->adler & 0xffff)); | |
1020 | } | |
1021 | strm->adler = 1L; | |
1022 | } | |
1023 | ||
1024 | /* Flush as much pending output as possible */ | |
1025 | if (s->pending != 0) { | |
1026 | flush_pending(strm); | |
1027 | if (strm->avail_out == 0) { | |
1028 | /* Since avail_out is 0, deflate will be called again with | |
1029 | * more output space, but possibly with both pending and | |
1030 | * avail_in equal to zero. There won't be anything to do, | |
1031 | * but this is not an error situation so make sure we | |
1032 | * return OK instead of BUF_ERROR at next call of deflate: | |
1033 | */ | |
1034 | s->last_flush = -1; | |
1035 | return Z_OK; | |
1036 | } | |
1037 | ||
1038 | /* Make sure there is something to do and avoid duplicate consecutive | |
1039 | * flushes. For repeated and useless calls with Z_FINISH, we keep | |
1040 | * returning Z_STREAM_END instead of Z_BUFF_ERROR. | |
1041 | */ | |
1042 | } else if (strm->avail_in == 0 && flush <= old_flush && | |
1043 | flush != Z_FINISH) { | |
1044 | ERR_RETURN(strm, Z_BUF_ERROR); | |
1045 | } | |
1046 | ||
1047 | /* User must not provide more input after the first FINISH: */ | |
1048 | if (s->status == FINISH_STATE && strm->avail_in != 0) { | |
1049 | ERR_RETURN(strm, Z_BUF_ERROR); | |
1050 | } | |
1051 | ||
1052 | /* Start a new block or continue the current one. | |
1053 | */ | |
1054 | if (strm->avail_in != 0 || s->lookahead != 0 || | |
1055 | (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { | |
1056 | block_state bstate; | |
1057 | ||
1058 | bstate = (*(configuration_table[s->level].func))(s, flush); | |
1059 | ||
1060 | if (bstate == finish_started || bstate == finish_done) { | |
1061 | s->status = FINISH_STATE; | |
1062 | } | |
1063 | if (bstate == need_more || bstate == finish_started) { | |
1064 | if (strm->avail_out == 0) { | |
1065 | s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ | |
1066 | } | |
1067 | return Z_OK; | |
1068 | /* If flush != Z_NO_FLUSH && avail_out == 0, the next call | |
1069 | * of deflate should use the same flush parameter to make sure | |
1070 | * that the flush is complete. So we don't have to output an | |
1071 | * empty block here, this will be done at next call. This also | |
1072 | * ensures that for a very small output buffer, we emit at most | |
1073 | * one empty block. | |
1074 | */ | |
1075 | } | |
1076 | if (bstate == block_done) { | |
1077 | if (flush == Z_PARTIAL_FLUSH) { | |
1078 | _tr_align(s); | |
1079 | } else if (flush == Z_PACKET_FLUSH) { | |
1080 | /* Output just the 3-bit `stored' block type value, | |
1081 | but not a zero length. */ | |
1082 | _tr_stored_type_only(s); | |
1083 | } else { /* FULL_FLUSH or SYNC_FLUSH */ | |
1084 | _tr_stored_block(s, (char*)0, 0L, 0); | |
1085 | /* For a full flush, this empty block will be recognized | |
1086 | * as a special marker by inflate_sync(). | |
1087 | */ | |
1088 | if (flush == Z_FULL_FLUSH) { | |
1089 | CLEAR_HASH(s); /* forget history */ | |
1090 | } | |
1091 | } | |
1092 | flush_pending(strm); | |
1093 | if (strm->avail_out == 0) { | |
1094 | s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ | |
1095 | return Z_OK; | |
1096 | } | |
1097 | } | |
1098 | } | |
1099 | Assert(strm->avail_out > 0, "bug2"); | |
1100 | ||
1101 | if (flush != Z_FINISH) return Z_OK; | |
1102 | if (s->noheader) return Z_STREAM_END; | |
1103 | ||
1104 | /* Write the zlib trailer (adler32) */ | |
1105 | putShortMSB(s, (uInt)(strm->adler >> 16)); | |
1106 | putShortMSB(s, (uInt)(strm->adler & 0xffff)); | |
1107 | flush_pending(strm); | |
1108 | /* If avail_out is zero, the application will call deflate again | |
1109 | * to flush the rest. | |
1110 | */ | |
1111 | s->noheader = -1; /* write the trailer only once! */ | |
1112 | return s->pending != 0 ? Z_OK : Z_STREAM_END; | |
1113 | } | |
1114 | ||
1115 | /* ========================================================================= */ | |
1116 | int deflateEnd (strm) | |
1117 | z_streamp strm; | |
1118 | { | |
1119 | int status; | |
1120 | deflate_state *s; | |
1121 | ||
1122 | if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; | |
1123 | s = (deflate_state *) strm->state; | |
1124 | ||
1125 | status = s->status; | |
1126 | if (status != INIT_STATE && status != BUSY_STATE && | |
1127 | status != FINISH_STATE) { | |
1128 | return Z_STREAM_ERROR; | |
1129 | } | |
1130 | ||
1131 | /* Deallocate in reverse order of allocations: */ | |
1132 | TRY_FREE(strm, s->pending_buf); | |
1133 | TRY_FREE(strm, s->head); | |
1134 | TRY_FREE(strm, s->prev); | |
1135 | TRY_FREE(strm, s->window); | |
1136 | ||
1137 | ZFREE(strm, s); | |
1138 | strm->state = Z_NULL; | |
1139 | ||
1140 | return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; | |
1141 | } | |
1142 | ||
1143 | /* ========================================================================= | |
1144 | * Copy the source state to the destination state. | |
1145 | */ | |
1146 | int deflateCopy (dest, source) | |
1147 | z_streamp dest; | |
1148 | z_streamp source; | |
1149 | { | |
1150 | deflate_state *ds; | |
1151 | deflate_state *ss; | |
1152 | ushf *overlay; | |
1153 | ||
1154 | if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) | |
1155 | return Z_STREAM_ERROR; | |
1156 | ss = (deflate_state *) source->state; | |
1157 | ||
1158 | zmemcpy(dest, source, sizeof(*dest)); | |
1159 | ||
1160 | ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); | |
1161 | if (ds == Z_NULL) return Z_MEM_ERROR; | |
1162 | dest->state = (struct internal_state FAR *) ds; | |
1163 | zmemcpy(ds, ss, sizeof(*ds)); | |
1164 | ds->strm = dest; | |
1165 | ||
1166 | ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); | |
1167 | ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); | |
1168 | ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); | |
1169 | overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); | |
1170 | ds->pending_buf = (uchf *) overlay; | |
1171 | ||
1172 | if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || | |
1173 | ds->pending_buf == Z_NULL) { | |
1174 | deflateEnd (dest); | |
1175 | return Z_MEM_ERROR; | |
1176 | } | |
1177 | /* ??? following zmemcpy doesn't work for 16-bit MSDOS */ | |
1178 | zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); | |
1179 | zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); | |
1180 | zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); | |
1181 | zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); | |
1182 | ||
1183 | ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); | |
1184 | ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); | |
1185 | ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; | |
1186 | ||
1187 | ds->l_desc.dyn_tree = ds->dyn_ltree; | |
1188 | ds->d_desc.dyn_tree = ds->dyn_dtree; | |
1189 | ds->bl_desc.dyn_tree = ds->bl_tree; | |
1190 | ||
1191 | return Z_OK; | |
1192 | } | |
1193 | ||
1194 | /* =========================================================================== | |
1195 | * Return the number of bytes of output which are immediately available | |
1196 | * for output from the decompressor. | |
1197 | */ | |
1198 | int deflateOutputPending (strm) | |
1199 | z_streamp strm; | |
1200 | { | |
1201 | if (strm == Z_NULL || strm->state == Z_NULL) return 0; | |
1202 | ||
1203 | return ((deflate_state *)(strm->state))->pending; | |
1204 | } | |
1205 | ||
1206 | /* =========================================================================== | |
1207 | * Read a new buffer from the current input stream, update the adler32 | |
1208 | * and total number of bytes read. All deflate() input goes through | |
1209 | * this function so some applications may wish to modify it to avoid | |
1210 | * allocating a large strm->next_in buffer and copying from it. | |
1211 | * (See also flush_pending()). | |
1212 | */ | |
1213 | local int read_buf(strm, buf, size) | |
1214 | z_streamp strm; | |
1215 | charf *buf; | |
1216 | unsigned size; | |
1217 | { | |
1218 | unsigned len = strm->avail_in; | |
1219 | ||
1220 | if (len > size) len = size; | |
1221 | if (len == 0) return 0; | |
1222 | ||
1223 | strm->avail_in -= len; | |
1224 | ||
1225 | if (!((deflate_state *)(strm->state))->noheader) { | |
1226 | strm->adler = adler32(strm->adler, strm->next_in, len); | |
1227 | } | |
1228 | zmemcpy(buf, strm->next_in, len); | |
1229 | strm->next_in += len; | |
1230 | strm->total_in += len; | |
1231 | ||
1232 | return (int)len; | |
1233 | } | |
1234 | ||
1235 | /* =========================================================================== | |
1236 | * Initialize the "longest match" routines for a new zlib stream | |
1237 | */ | |
1238 | local void lm_init (s) | |
1239 | deflate_state *s; | |
1240 | { | |
1241 | s->window_size = (ulg)2L*s->w_size; | |
1242 | ||
1243 | CLEAR_HASH(s); | |
1244 | ||
1245 | /* Set the default configuration parameters: | |
1246 | */ | |
1247 | s->max_lazy_match = configuration_table[s->level].max_lazy; | |
1248 | s->good_match = configuration_table[s->level].good_length; | |
1249 | s->nice_match = configuration_table[s->level].nice_length; | |
1250 | s->max_chain_length = configuration_table[s->level].max_chain; | |
1251 | ||
1252 | s->strstart = 0; | |
1253 | s->block_start = 0L; | |
1254 | s->lookahead = 0; | |
1255 | s->match_length = s->prev_length = MIN_MATCH-1; | |
1256 | s->match_available = 0; | |
1257 | s->ins_h = 0; | |
1258 | #ifdef ASMV | |
1259 | match_init(); /* initialize the asm code */ | |
1260 | #endif | |
1261 | } | |
1262 | ||
1263 | /* =========================================================================== | |
1264 | * Set match_start to the longest match starting at the given string and | |
1265 | * return its length. Matches shorter or equal to prev_length are discarded, | |
1266 | * in which case the result is equal to prev_length and match_start is | |
1267 | * garbage. | |
1268 | * IN assertions: cur_match is the head of the hash chain for the current | |
1269 | * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 | |
1270 | * OUT assertion: the match length is not greater than s->lookahead. | |
1271 | */ | |
1272 | #ifndef ASMV | |
1273 | /* For 80x86 and 680x0, an optimized version will be provided in match.asm or | |
1274 | * match.S. The code will be functionally equivalent. | |
1275 | */ | |
1276 | local uInt longest_match(s, cur_match) | |
1277 | deflate_state *s; | |
1278 | IPos cur_match; /* current match */ | |
1279 | { | |
1280 | unsigned chain_length = s->max_chain_length;/* max hash chain length */ | |
1281 | register Bytef *scan = s->window + s->strstart; /* current string */ | |
1282 | register Bytef *match; /* matched string */ | |
1283 | register int len; /* length of current match */ | |
1284 | int best_len = s->prev_length; /* best match length so far */ | |
1285 | int nice_match = s->nice_match; /* stop if match long enough */ | |
1286 | IPos limit = s->strstart > (IPos)MAX_DIST(s) ? | |
1287 | s->strstart - (IPos)MAX_DIST(s) : NIL; | |
1288 | /* Stop when cur_match becomes <= limit. To simplify the code, | |
1289 | * we prevent matches with the string of window index 0. | |
1290 | */ | |
1291 | Posf *prev = s->prev; | |
1292 | uInt wmask = s->w_mask; | |
1293 | ||
1294 | #ifdef UNALIGNED_OK | |
1295 | /* Compare two bytes at a time. Note: this is not always beneficial. | |
1296 | * Try with and without -DUNALIGNED_OK to check. | |
1297 | */ | |
1298 | register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; | |
1299 | register ush scan_start = *(ushf*)scan; | |
1300 | register ush scan_end = *(ushf*)(scan+best_len-1); | |
1301 | #else | |
1302 | register Bytef *strend = s->window + s->strstart + MAX_MATCH; | |
1303 | register Byte scan_end1 = scan[best_len-1]; | |
1304 | register Byte scan_end = scan[best_len]; | |
1305 | #endif | |
1306 | ||
1307 | /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. | |
1308 | * It is easy to get rid of this optimization if necessary. | |
1309 | */ | |
1310 | Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); | |
1311 | ||
1312 | /* Do not waste too much time if we already have a good match: */ | |
1313 | if (s->prev_length >= s->good_match) { | |
1314 | chain_length >>= 2; | |
1315 | } | |
1316 | /* Do not look for matches beyond the end of the input. This is necessary | |
1317 | * to make deflate deterministic. | |
1318 | */ | |
1319 | if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; | |
1320 | ||
1321 | Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); | |
1322 | ||
1323 | do { | |
1324 | Assert(cur_match < s->strstart, "no future"); | |
1325 | match = s->window + cur_match; | |
1326 | ||
1327 | /* Skip to next match if the match length cannot increase | |
1328 | * or if the match length is less than 2: | |
1329 | */ | |
1330 | #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) | |
1331 | /* This code assumes sizeof(unsigned short) == 2. Do not use | |
1332 | * UNALIGNED_OK if your compiler uses a different size. | |
1333 | */ | |
1334 | if (*(ushf*)(match+best_len-1) != scan_end || | |
1335 | *(ushf*)match != scan_start) continue; | |
1336 | ||
1337 | /* It is not necessary to compare scan[2] and match[2] since they are | |
1338 | * always equal when the other bytes match, given that the hash keys | |
1339 | * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at | |
1340 | * strstart+3, +5, ... up to strstart+257. We check for insufficient | |
1341 | * lookahead only every 4th comparison; the 128th check will be made | |
1342 | * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is | |
1343 | * necessary to put more guard bytes at the end of the window, or | |
1344 | * to check more often for insufficient lookahead. | |
1345 | */ | |
1346 | Assert(scan[2] == match[2], "scan[2]?"); | |
1347 | scan++, match++; | |
1348 | do { | |
1349 | } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && | |
1350 | *(ushf*)(scan+=2) == *(ushf*)(match+=2) && | |
1351 | *(ushf*)(scan+=2) == *(ushf*)(match+=2) && | |
1352 | *(ushf*)(scan+=2) == *(ushf*)(match+=2) && | |
1353 | scan < strend); | |
1354 | /* The funny "do {}" generates better code on most compilers */ | |
1355 | ||
1356 | /* Here, scan <= window+strstart+257 */ | |
1357 | Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); | |
1358 | if (*scan == *match) scan++; | |
1359 | ||
1360 | len = (MAX_MATCH - 1) - (int)(strend-scan); | |
1361 | scan = strend - (MAX_MATCH-1); | |
1362 | ||
1363 | #else /* UNALIGNED_OK */ | |
1364 | ||
1365 | if (match[best_len] != scan_end || | |
1366 | match[best_len-1] != scan_end1 || | |
1367 | *match != *scan || | |
1368 | *++match != scan[1]) continue; | |
1369 | ||
1370 | /* The check at best_len-1 can be removed because it will be made | |
1371 | * again later. (This heuristic is not always a win.) | |
1372 | * It is not necessary to compare scan[2] and match[2] since they | |
1373 | * are always equal when the other bytes match, given that | |
1374 | * the hash keys are equal and that HASH_BITS >= 8. | |
1375 | */ | |
1376 | scan += 2, match++; | |
1377 | Assert(*scan == *match, "match[2]?"); | |
1378 | ||
1379 | /* We check for insufficient lookahead only every 8th comparison; | |
1380 | * the 256th check will be made at strstart+258. | |
1381 | */ | |
1382 | do { | |
1383 | } while (*++scan == *++match && *++scan == *++match && | |
1384 | *++scan == *++match && *++scan == *++match && | |
1385 | *++scan == *++match && *++scan == *++match && | |
1386 | *++scan == *++match && *++scan == *++match && | |
1387 | scan < strend); | |
1388 | ||
1389 | Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); | |
1390 | ||
1391 | len = MAX_MATCH - (int)(strend - scan); | |
1392 | scan = strend - MAX_MATCH; | |
1393 | ||
1394 | #endif /* UNALIGNED_OK */ | |
1395 | ||
1396 | if (len > best_len) { | |
1397 | s->match_start = cur_match; | |
1398 | best_len = len; | |
1399 | if (len >= nice_match) break; | |
1400 | #ifdef UNALIGNED_OK | |
1401 | scan_end = *(ushf*)(scan+best_len-1); | |
1402 | #else | |
1403 | scan_end1 = scan[best_len-1]; | |
1404 | scan_end = scan[best_len]; | |
1405 | #endif | |
1406 | } | |
1407 | } while ((cur_match = prev[cur_match & wmask]) > limit | |
1408 | && --chain_length != 0); | |
1409 | ||
1410 | if ((uInt)best_len <= s->lookahead) return best_len; | |
1411 | return s->lookahead; | |
1412 | } | |
1413 | #endif /* ASMV */ | |
1414 | ||
1415 | #ifdef DEBUG_ZLIB | |
1416 | /* =========================================================================== | |
1417 | * Check that the match at match_start is indeed a match. | |
1418 | */ | |
1419 | local void check_match(s, start, match, length) | |
1420 | deflate_state *s; | |
1421 | IPos start, match; | |
1422 | int length; | |
1423 | { | |
1424 | /* check that the match is indeed a match */ | |
1425 | if (zmemcmp((charf *)s->window + match, | |
1426 | (charf *)s->window + start, length) != EQUAL) { | |
1427 | fprintf(stderr, " start %u, match %u, length %d\n", | |
1428 | start, match, length); | |
1429 | do { | |
1430 | fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); | |
1431 | } while (--length != 0); | |
1432 | z_error("invalid match"); | |
1433 | } | |
1434 | if (z_verbose > 1) { | |
1435 | fprintf(stderr,"\\[%d,%d]", start-match, length); | |
1436 | do { putc(s->window[start++], stderr); } while (--length != 0); | |
1437 | } | |
1438 | } | |
1439 | #else | |
1440 | # define check_match(s, start, match, length) | |
1441 | #endif | |
1442 | ||
1443 | /* =========================================================================== | |
1444 | * Fill the window when the lookahead becomes insufficient. | |
1445 | * Updates strstart and lookahead. | |
1446 | * | |
1447 | * IN assertion: lookahead < MIN_LOOKAHEAD | |
1448 | * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD | |
1449 | * At least one byte has been read, or avail_in == 0; reads are | |
1450 | * performed for at least two bytes (required for the zip translate_eol | |
1451 | * option -- not supported here). | |
1452 | */ | |
1453 | local void fill_window(s) | |
1454 | deflate_state *s; | |
1455 | { | |
1456 | register unsigned n, m; | |
1457 | register Posf *p; | |
1458 | unsigned more; /* Amount of free space at the end of the window. */ | |
1459 | uInt wsize = s->w_size; | |
1460 | ||
1461 | do { | |
1462 | more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); | |
1463 | ||
1464 | /* Deal with !@#$% 64K limit: */ | |
1465 | if (more == 0 && s->strstart == 0 && s->lookahead == 0) { | |
1466 | more = wsize; | |
1467 | ||
1468 | } else if (more == (unsigned)(-1)) { | |
1469 | /* Very unlikely, but possible on 16 bit machine if strstart == 0 | |
1470 | * and lookahead == 1 (input done one byte at time) | |
1471 | */ | |
1472 | more--; | |
1473 | ||
1474 | /* If the window is almost full and there is insufficient lookahead, | |
1475 | * move the upper half to the lower one to make room in the upper half. | |
1476 | */ | |
1477 | } else if (s->strstart >= wsize+MAX_DIST(s)) { | |
1478 | ||
1479 | zmemcpy((charf *)s->window, (charf *)s->window+wsize, | |
1480 | (unsigned)wsize); | |
1481 | s->match_start -= wsize; | |
1482 | s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ | |
1483 | s->block_start -= (long) wsize; | |
1484 | ||
1485 | /* Slide the hash table (could be avoided with 32 bit values | |
1486 | at the expense of memory usage). We slide even when level == 0 | |
1487 | to keep the hash table consistent if we switch back to level > 0 | |
1488 | later. (Using level 0 permanently is not an optimal usage of | |
1489 | zlib, so we don't care about this pathological case.) | |
1490 | */ | |
1491 | n = s->hash_size; | |
1492 | p = &s->head[n]; | |
1493 | do { | |
1494 | m = *--p; | |
1495 | *p = (Pos)(m >= wsize ? m-wsize : NIL); | |
1496 | } while (--n); | |
1497 | ||
1498 | n = wsize; | |
1499 | p = &s->prev[n]; | |
1500 | do { | |
1501 | m = *--p; | |
1502 | *p = (Pos)(m >= wsize ? m-wsize : NIL); | |
1503 | /* If n is not on any hash chain, prev[n] is garbage but | |
1504 | * its value will never be used. | |
1505 | */ | |
1506 | } while (--n); | |
1507 | more += wsize; | |
1508 | } | |
1509 | if (s->strm->avail_in == 0) return; | |
1510 | ||
1511 | /* If there was no sliding: | |
1512 | * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && | |
1513 | * more == window_size - lookahead - strstart | |
1514 | * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) | |
1515 | * => more >= window_size - 2*WSIZE + 2 | |
1516 | * In the BIG_MEM or MMAP case (not yet supported), | |
1517 | * window_size == input_size + MIN_LOOKAHEAD && | |
1518 | * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. | |
1519 | * Otherwise, window_size == 2*WSIZE so more >= 2. | |
1520 | * If there was sliding, more >= WSIZE. So in all cases, more >= 2. | |
1521 | */ | |
1522 | Assert(more >= 2, "more < 2"); | |
1523 | ||
1524 | n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead, | |
1525 | more); | |
1526 | s->lookahead += n; | |
1527 | ||
1528 | /* Initialize the hash value now that we have some input: */ | |
1529 | if (s->lookahead >= MIN_MATCH) { | |
1530 | s->ins_h = s->window[s->strstart]; | |
1531 | UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); | |
1532 | #if MIN_MATCH != 3 | |
1533 | Call UPDATE_HASH() MIN_MATCH-3 more times | |
1534 | #endif | |
1535 | } | |
1536 | /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, | |
1537 | * but this is not important since only literal bytes will be emitted. | |
1538 | */ | |
1539 | ||
1540 | } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); | |
1541 | } | |
1542 | ||
1543 | /* =========================================================================== | |
1544 | * Flush the current block, with given end-of-file flag. | |
1545 | * IN assertion: strstart is set to the end of the current match. | |
1546 | */ | |
1547 | #define FLUSH_BLOCK_ONLY(s, eof) { \ | |
1548 | _tr_flush_block(s, (s->block_start >= 0L ? \ | |
1549 | (charf *)&s->window[(unsigned)s->block_start] : \ | |
1550 | (charf *)Z_NULL), \ | |
1551 | (ulg)((long)s->strstart - s->block_start), \ | |
1552 | (eof)); \ | |
1553 | s->block_start = s->strstart; \ | |
1554 | flush_pending(s->strm); \ | |
1555 | Tracev((stderr,"[FLUSH]")); \ | |
1556 | } | |
1557 | ||
1558 | /* Same but force premature exit if necessary. */ | |
1559 | #define FLUSH_BLOCK(s, eof) { \ | |
1560 | FLUSH_BLOCK_ONLY(s, eof); \ | |
1561 | if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ | |
1562 | } | |
1563 | ||
1564 | /* =========================================================================== | |
1565 | * Copy without compression as much as possible from the input stream, return | |
1566 | * the current block state. | |
1567 | * This function does not insert new strings in the dictionary since | |
1568 | * uncompressible data is probably not useful. This function is used | |
1569 | * only for the level=0 compression option. | |
1570 | * NOTE: this function should be optimized to avoid extra copying from | |
1571 | * window to pending_buf. | |
1572 | */ | |
1573 | local block_state deflate_stored(s, flush) | |
1574 | deflate_state *s; | |
1575 | int flush; | |
1576 | { | |
1577 | /* Stored blocks are limited to 0xffff bytes, pending_buf is limited | |
1578 | * to pending_buf_size, and each stored block has a 5 byte header: | |
1579 | */ | |
1580 | ulg max_block_size = 0xffff; | |
1581 | ulg max_start; | |
1582 | ||
1583 | if (max_block_size > s->pending_buf_size - 5) { | |
1584 | max_block_size = s->pending_buf_size - 5; | |
1585 | } | |
1586 | ||
1587 | /* Copy as much as possible from input to output: */ | |
1588 | for (;;) { | |
1589 | /* Fill the window as much as possible: */ | |
1590 | if (s->lookahead <= 1) { | |
1591 | ||
1592 | Assert(s->strstart < s->w_size+MAX_DIST(s) || | |
1593 | s->block_start >= (long)s->w_size, "slide too late"); | |
1594 | ||
1595 | fill_window(s); | |
1596 | if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; | |
1597 | ||
1598 | if (s->lookahead == 0) break; /* flush the current block */ | |
1599 | } | |
1600 | Assert(s->block_start >= 0L, "block gone"); | |
1601 | ||
1602 | s->strstart += s->lookahead; | |
1603 | s->lookahead = 0; | |
1604 | ||
1605 | /* Emit a stored block if pending_buf will be full: */ | |
1606 | max_start = s->block_start + max_block_size; | |
1607 | if (s->strstart == 0 || (ulg)s->strstart >= max_start) { | |
1608 | /* strstart == 0 is possible when wraparound on 16-bit machine */ | |
1609 | s->lookahead = (uInt)(s->strstart - max_start); | |
1610 | s->strstart = (uInt)max_start; | |
1611 | FLUSH_BLOCK(s, 0); | |
1612 | } | |
1613 | /* Flush if we may have to slide, otherwise block_start may become | |
1614 | * negative and the data will be gone: | |
1615 | */ | |
1616 | if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { | |
1617 | FLUSH_BLOCK(s, 0); | |
1618 | } | |
1619 | } | |
1620 | FLUSH_BLOCK(s, flush == Z_FINISH); | |
1621 | return flush == Z_FINISH ? finish_done : block_done; | |
1622 | } | |
1623 | ||
1624 | /* =========================================================================== | |
1625 | * Compress as much as possible from the input stream, return the current | |
1626 | * block state. | |
1627 | * This function does not perform lazy evaluation of matches and inserts | |
1628 | * new strings in the dictionary only for unmatched strings or for short | |
1629 | * matches. It is used only for the fast compression options. | |
1630 | */ | |
1631 | local block_state deflate_fast(s, flush) | |
1632 | deflate_state *s; | |
1633 | int flush; | |
1634 | { | |
1635 | IPos hash_head = NIL; /* head of the hash chain */ | |
1636 | int bflush; /* set if current block must be flushed */ | |
1637 | ||
1638 | for (;;) { | |
1639 | /* Make sure that we always have enough lookahead, except | |
1640 | * at the end of the input file. We need MAX_MATCH bytes | |
1641 | * for the next match, plus MIN_MATCH bytes to insert the | |
1642 | * string following the next match. | |
1643 | */ | |
1644 | if (s->lookahead < MIN_LOOKAHEAD) { | |
1645 | fill_window(s); | |
1646 | if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { | |
1647 | return need_more; | |
1648 | } | |
1649 | if (s->lookahead == 0) break; /* flush the current block */ | |
1650 | } | |
1651 | ||
1652 | /* Insert the string window[strstart .. strstart+2] in the | |
1653 | * dictionary, and set hash_head to the head of the hash chain: | |
1654 | */ | |
1655 | if (s->lookahead >= MIN_MATCH) { | |
1656 | INSERT_STRING(s, s->strstart, hash_head); | |
1657 | } | |
1658 | ||
1659 | /* Find the longest match, discarding those <= prev_length. | |
1660 | * At this point we have always match_length < MIN_MATCH | |
1661 | */ | |
1662 | if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { | |
1663 | /* To simplify the code, we prevent matches with the string | |
1664 | * of window index 0 (in particular we have to avoid a match | |
1665 | * of the string with itself at the start of the input file). | |
1666 | */ | |
1667 | if (s->strategy != Z_HUFFMAN_ONLY) { | |
1668 | s->match_length = longest_match (s, hash_head); | |
1669 | } | |
1670 | /* longest_match() sets match_start */ | |
1671 | } | |
1672 | if (s->match_length >= MIN_MATCH) { | |
1673 | check_match(s, s->strstart, s->match_start, s->match_length); | |
1674 | ||
1675 | bflush = _tr_tally(s, s->strstart - s->match_start, | |
1676 | s->match_length - MIN_MATCH); | |
1677 | ||
1678 | s->lookahead -= s->match_length; | |
1679 | ||
1680 | /* Insert new strings in the hash table only if the match length | |
1681 | * is not too large. This saves time but degrades compression. | |
1682 | */ | |
1683 | if (s->match_length <= s->max_insert_length && | |
1684 | s->lookahead >= MIN_MATCH) { | |
1685 | s->match_length--; /* string at strstart already in hash table */ | |
1686 | do { | |
1687 | s->strstart++; | |
1688 | INSERT_STRING(s, s->strstart, hash_head); | |
1689 | /* strstart never exceeds WSIZE-MAX_MATCH, so there are | |
1690 | * always MIN_MATCH bytes ahead. | |
1691 | */ | |
1692 | } while (--s->match_length != 0); | |
1693 | s->strstart++; | |
1694 | } else { | |
1695 | s->strstart += s->match_length; | |
1696 | s->match_length = 0; | |
1697 | s->ins_h = s->window[s->strstart]; | |
1698 | UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); | |
1699 | #if MIN_MATCH != 3 | |
1700 | Call UPDATE_HASH() MIN_MATCH-3 more times | |
1701 | #endif | |
1702 | /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not | |
1703 | * matter since it will be recomputed at next deflate call. | |
1704 | */ | |
1705 | } | |
1706 | } else { | |
1707 | /* No match, output a literal byte */ | |
1708 | Tracevv((stderr,"%c", s->window[s->strstart])); | |
1709 | bflush = _tr_tally (s, 0, s->window[s->strstart]); | |
1710 | s->lookahead--; | |
1711 | s->strstart++; | |
1712 | } | |
1713 | if (bflush) FLUSH_BLOCK(s, 0); | |
1714 | } | |
1715 | FLUSH_BLOCK(s, flush == Z_FINISH); | |
1716 | return flush == Z_FINISH ? finish_done : block_done; | |
1717 | } | |
1718 | ||
1719 | /* =========================================================================== | |
1720 | * Same as above, but achieves better compression. We use a lazy | |
1721 | * evaluation for matches: a match is finally adopted only if there is | |
1722 | * no better match at the next window position. | |
1723 | */ | |
1724 | local block_state deflate_slow(s, flush) | |
1725 | deflate_state *s; | |
1726 | int flush; | |
1727 | { | |
1728 | IPos hash_head = NIL; /* head of hash chain */ | |
1729 | int bflush; /* set if current block must be flushed */ | |
1730 | ||
1731 | /* Process the input block. */ | |
1732 | for (;;) { | |
1733 | /* Make sure that we always have enough lookahead, except | |
1734 | * at the end of the input file. We need MAX_MATCH bytes | |
1735 | * for the next match, plus MIN_MATCH bytes to insert the | |
1736 | * string following the next match. | |
1737 | */ | |
1738 | if (s->lookahead < MIN_LOOKAHEAD) { | |
1739 | fill_window(s); | |
1740 | if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { | |
1741 | return need_more; | |
1742 | } | |
1743 | if (s->lookahead == 0) break; /* flush the current block */ | |
1744 | } | |
1745 | ||
1746 | /* Insert the string window[strstart .. strstart+2] in the | |
1747 | * dictionary, and set hash_head to the head of the hash chain: | |
1748 | */ | |
1749 | if (s->lookahead >= MIN_MATCH) { | |
1750 | INSERT_STRING(s, s->strstart, hash_head); | |
1751 | } | |
1752 | ||
1753 | /* Find the longest match, discarding those <= prev_length. | |
1754 | */ | |
1755 | s->prev_length = s->match_length, s->prev_match = s->match_start; | |
1756 | s->match_length = MIN_MATCH-1; | |
1757 | ||
1758 | if (hash_head != NIL && s->prev_length < s->max_lazy_match && | |
1759 | s->strstart - hash_head <= MAX_DIST(s)) { | |
1760 | /* To simplify the code, we prevent matches with the string | |
1761 | * of window index 0 (in particular we have to avoid a match | |
1762 | * of the string with itself at the start of the input file). | |
1763 | */ | |
1764 | if (s->strategy != Z_HUFFMAN_ONLY) { | |
1765 | s->match_length = longest_match (s, hash_head); | |
1766 | } | |
1767 | /* longest_match() sets match_start */ | |
1768 | ||
1769 | if (s->match_length <= 5 && (s->strategy == Z_FILTERED || | |
1770 | (s->match_length == MIN_MATCH && | |
1771 | s->strstart - s->match_start > TOO_FAR))) { | |
1772 | ||
1773 | /* If prev_match is also MIN_MATCH, match_start is garbage | |
1774 | * but we will ignore the current match anyway. | |
1775 | */ | |
1776 | s->match_length = MIN_MATCH-1; | |
1777 | } | |
1778 | } | |
1779 | /* If there was a match at the previous step and the current | |
1780 | * match is not better, output the previous match: | |
1781 | */ | |
1782 | if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { | |
1783 | uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; | |
1784 | /* Do not insert strings in hash table beyond this. */ | |
1785 | ||
1786 | check_match(s, s->strstart-1, s->prev_match, s->prev_length); | |
1787 | ||
1788 | bflush = _tr_tally(s, s->strstart -1 - s->prev_match, | |
1789 | s->prev_length - MIN_MATCH); | |
1790 | ||
1791 | /* Insert in hash table all strings up to the end of the match. | |
1792 | * strstart-1 and strstart are already inserted. If there is not | |
1793 | * enough lookahead, the last two strings are not inserted in | |
1794 | * the hash table. | |
1795 | */ | |
1796 | s->lookahead -= s->prev_length-1; | |
1797 | s->prev_length -= 2; | |
1798 | do { | |
1799 | if (++s->strstart <= max_insert) { | |
1800 | INSERT_STRING(s, s->strstart, hash_head); | |
1801 | } | |
1802 | } while (--s->prev_length != 0); | |
1803 | s->match_available = 0; | |
1804 | s->match_length = MIN_MATCH-1; | |
1805 | s->strstart++; | |
1806 | ||
1807 | if (bflush) FLUSH_BLOCK(s, 0); | |
1808 | ||
1809 | } else if (s->match_available) { | |
1810 | /* If there was no match at the previous position, output a | |
1811 | * single literal. If there was a match but the current match | |
1812 | * is longer, truncate the previous match to a single literal. | |
1813 | */ | |
1814 | Tracevv((stderr,"%c", s->window[s->strstart-1])); | |
1815 | if (_tr_tally (s, 0, s->window[s->strstart-1])) { | |
1816 | FLUSH_BLOCK_ONLY(s, 0); | |
1817 | } | |
1818 | s->strstart++; | |
1819 | s->lookahead--; | |
1820 | if (s->strm->avail_out == 0) return need_more; | |
1821 | } else { | |
1822 | /* There is no previous match to compare with, wait for | |
1823 | * the next step to decide. | |
1824 | */ | |
1825 | s->match_available = 1; | |
1826 | s->strstart++; | |
1827 | s->lookahead--; | |
1828 | } | |
1829 | } | |
1830 | Assert (flush != Z_NO_FLUSH, "no flush?"); | |
1831 | if (s->match_available) { | |
1832 | Tracevv((stderr,"%c", s->window[s->strstart-1])); | |
1833 | _tr_tally (s, 0, s->window[s->strstart-1]); | |
1834 | s->match_available = 0; | |
1835 | } | |
1836 | FLUSH_BLOCK(s, flush == Z_FINISH); | |
1837 | return flush == Z_FINISH ? finish_done : block_done; | |
1838 | } | |
1839 | /* --- deflate.c */ | |
1840 | ||
1841 | /* +++ trees.c */ | |
1842 | /* trees.c -- output deflated data using Huffman coding | |
1843 | * Copyright (C) 1995-1996 Jean-loup Gailly | |
1844 | * For conditions of distribution and use, see copyright notice in zlib.h | |
1845 | */ | |
1846 | ||
1847 | /* | |
1848 | * ALGORITHM | |
1849 | * | |
1850 | * The "deflation" process uses several Huffman trees. The more | |
1851 | * common source values are represented by shorter bit sequences. | |
1852 | * | |
1853 | * Each code tree is stored in a compressed form which is itself | |
1854 | * a Huffman encoding of the lengths of all the code strings (in | |
1855 | * ascending order by source values). The actual code strings are | |
1856 | * reconstructed from the lengths in the inflate process, as described | |
1857 | * in the deflate specification. | |
1858 | * | |
1859 | * REFERENCES | |
1860 | * | |
1861 | * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". | |
1862 | * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc | |
1863 | * | |
1864 | * Storer, James A. | |
1865 | * Data Compression: Methods and Theory, pp. 49-50. | |
1866 | * Computer Science Press, 1988. ISBN 0-7167-8156-5. | |
1867 | * | |
1868 | * Sedgewick, R. | |
1869 | * Algorithms, p290. | |
1870 | * Addison-Wesley, 1983. ISBN 0-201-06672-6. | |
1871 | */ | |
1872 | ||
1873 | /* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */ | |
1874 | ||
1875 | /* #include "deflate.h" */ | |
1876 | ||
1877 | #ifdef DEBUG_ZLIB | |
1878 | # include <ctype.h> | |
1879 | #endif | |
1880 | ||
1881 | /* =========================================================================== | |
1882 | * Constants | |
1883 | */ | |
1884 | ||
1885 | #define MAX_BL_BITS 7 | |
1886 | /* Bit length codes must not exceed MAX_BL_BITS bits */ | |
1887 | ||
1888 | #define END_BLOCK 256 | |
1889 | /* end of block literal code */ | |
1890 | ||
1891 | #define REP_3_6 16 | |
1892 | /* repeat previous bit length 3-6 times (2 bits of repeat count) */ | |
1893 | ||
1894 | #define REPZ_3_10 17 | |
1895 | /* repeat a zero length 3-10 times (3 bits of repeat count) */ | |
1896 | ||
1897 | #define REPZ_11_138 18 | |
1898 | /* repeat a zero length 11-138 times (7 bits of repeat count) */ | |
1899 | ||
1900 | local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ | |
1901 | = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; | |
1902 | ||
1903 | local int extra_dbits[D_CODES] /* extra bits for each distance code */ | |
1904 | = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; | |
1905 | ||
1906 | local int extra_blbits[BL_CODES]/* extra bits for each bit length code */ | |
1907 | = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; | |
1908 | ||
1909 | local uch bl_order[BL_CODES] | |
1910 | = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; | |
1911 | /* The lengths of the bit length codes are sent in order of decreasing | |
1912 | * probability, to avoid transmitting the lengths for unused bit length codes. | |
1913 | */ | |
1914 | ||
1915 | #define Buf_size (8 * 2*sizeof(char)) | |
1916 | /* Number of bits used within bi_buf. (bi_buf might be implemented on | |
1917 | * more than 16 bits on some systems.) | |
1918 | */ | |
1919 | ||
1920 | /* =========================================================================== | |
1921 | * Local data. These are initialized only once. | |
1922 | */ | |
1923 | ||
1924 | local ct_data static_ltree[L_CODES+2]; | |
1925 | /* The static literal tree. Since the bit lengths are imposed, there is no | |
1926 | * need for the L_CODES extra codes used during heap construction. However | |
1927 | * The codes 286 and 287 are needed to build a canonical tree (see _tr_init | |
1928 | * below). | |
1929 | */ | |
1930 | ||
1931 | local ct_data static_dtree[D_CODES]; | |
1932 | /* The static distance tree. (Actually a trivial tree since all codes use | |
1933 | * 5 bits.) | |
1934 | */ | |
1935 | ||
1936 | local uch dist_code[512]; | |
1937 | /* distance codes. The first 256 values correspond to the distances | |
1938 | * 3 .. 258, the last 256 values correspond to the top 8 bits of | |
1939 | * the 15 bit distances. | |
1940 | */ | |
1941 | ||
1942 | local uch length_code[MAX_MATCH-MIN_MATCH+1]; | |
1943 | /* length code for each normalized match length (0 == MIN_MATCH) */ | |
1944 | ||
1945 | local int base_length[LENGTH_CODES]; | |
1946 | /* First normalized length for each code (0 = MIN_MATCH) */ | |
1947 | ||
1948 | local int base_dist[D_CODES]; | |
1949 | /* First normalized distance for each code (0 = distance of 1) */ | |
1950 | ||
1951 | struct static_tree_desc_s { | |
1952 | ct_data *static_tree; /* static tree or NULL */ | |
1953 | intf *extra_bits; /* extra bits for each code or NULL */ | |
1954 | int extra_base; /* base index for extra_bits */ | |
1955 | int elems; /* max number of elements in the tree */ | |
1956 | int max_length; /* max bit length for the codes */ | |
1957 | }; | |
1958 | ||
1959 | local static_tree_desc static_l_desc = | |
1960 | {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; | |
1961 | ||
1962 | local static_tree_desc static_d_desc = | |
1963 | {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; | |
1964 | ||
1965 | local static_tree_desc static_bl_desc = | |
1966 | {(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; | |
1967 | ||
1968 | /* =========================================================================== | |
1969 | * Local (static) routines in this file. | |
1970 | */ | |
1971 | ||
1972 | local void tr_static_init OF((void)); | |
1973 | local void init_block OF((deflate_state *s)); | |
1974 | local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); | |
1975 | local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); | |
1976 | local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); | |
1977 | local void build_tree OF((deflate_state *s, tree_desc *desc)); | |
1978 | local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); | |
1979 | local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); | |
1980 | local int build_bl_tree OF((deflate_state *s)); | |
1981 | local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, | |
1982 | int blcodes)); | |
1983 | local void compress_block OF((deflate_state *s, ct_data *ltree, | |
1984 | ct_data *dtree)); | |
1985 | local void set_data_type OF((deflate_state *s)); | |
1986 | local unsigned bi_reverse OF((unsigned value, int length)); | |
1987 | local void bi_windup OF((deflate_state *s)); | |
1988 | local void bi_flush OF((deflate_state *s)); | |
1989 | local void copy_block OF((deflate_state *s, charf *buf, unsigned len, | |
1990 | int header)); | |
1991 | ||
1992 | #ifndef DEBUG_ZLIB | |
1993 | # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) | |
1994 | /* Send a code of the given tree. c and tree must not have side effects */ | |
1995 | ||
1996 | #else /* DEBUG_ZLIB */ | |
1997 | # define send_code(s, c, tree) \ | |
1998 | { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ | |
1999 | send_bits(s, tree[c].Code, tree[c].Len); } | |
2000 | #endif | |
2001 | ||
2002 | #define d_code(dist) \ | |
2003 | ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)]) | |
2004 | /* Mapping from a distance to a distance code. dist is the distance - 1 and | |
2005 | * must not have side effects. dist_code[256] and dist_code[257] are never | |
2006 | * used. | |
2007 | */ | |
2008 | ||
2009 | /* =========================================================================== | |
2010 | * Output a short LSB first on the stream. | |
2011 | * IN assertion: there is enough room in pendingBuf. | |
2012 | */ | |
2013 | #define put_short(s, w) { \ | |
2014 | put_byte(s, (uch)((w) & 0xff)); \ | |
2015 | put_byte(s, (uch)((ush)(w) >> 8)); \ | |
2016 | } | |
2017 | ||
2018 | /* =========================================================================== | |
2019 | * Send a value on a given number of bits. | |
2020 | * IN assertion: length <= 16 and value fits in length bits. | |
2021 | */ | |
2022 | #ifdef DEBUG_ZLIB | |
2023 | local void send_bits OF((deflate_state *s, int value, int length)); | |
2024 | ||
2025 | local void send_bits(s, value, length) | |
2026 | deflate_state *s; | |
2027 | int value; /* value to send */ | |
2028 | int length; /* number of bits */ | |
2029 | { | |
2030 | Tracevv((stderr," l %2d v %4x ", length, value)); | |
2031 | Assert(length > 0 && length <= 15, "invalid length"); | |
2032 | s->bits_sent += (ulg)length; | |
2033 | ||
2034 | /* If not enough room in bi_buf, use (valid) bits from bi_buf and | |
2035 | * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) | |
2036 | * unused bits in value. | |
2037 | */ | |
2038 | if (s->bi_valid > (int)Buf_size - length) { | |
2039 | s->bi_buf |= (value << s->bi_valid); | |
2040 | put_short(s, s->bi_buf); | |
2041 | s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); | |
2042 | s->bi_valid += length - Buf_size; | |
2043 | } else { | |
2044 | s->bi_buf |= value << s->bi_valid; | |
2045 | s->bi_valid += length; | |
2046 | } | |
2047 | } | |
2048 | #else /* !DEBUG_ZLIB */ | |
2049 | ||
2050 | #define send_bits(s, value, length) \ | |
2051 | { int len = length;\ | |
2052 | if (s->bi_valid > (int)Buf_size - len) {\ | |
2053 | int val = value;\ | |
2054 | s->bi_buf |= (val << s->bi_valid);\ | |
2055 | put_short(s, s->bi_buf);\ | |
2056 | s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ | |
2057 | s->bi_valid += len - Buf_size;\ | |
2058 | } else {\ | |
2059 | s->bi_buf |= (value) << s->bi_valid;\ | |
2060 | s->bi_valid += len;\ | |
2061 | }\ | |
2062 | } | |
2063 | #endif /* DEBUG_ZLIB */ | |
2064 | ||
2065 | ||
2066 | #define MAX(a,b) (a >= b ? a : b) | |
2067 | /* the arguments must not have side effects */ | |
2068 | ||
2069 | /* =========================================================================== | |
2070 | * Initialize the various 'constant' tables. In a multi-threaded environment, | |
2071 | * this function may be called by two threads concurrently, but this is | |
2072 | * harmless since both invocations do exactly the same thing. | |
2073 | */ | |
2074 | local void tr_static_init() | |
2075 | { | |
2076 | static int static_init_done = 0; | |
2077 | int n; /* iterates over tree elements */ | |
2078 | int bits; /* bit counter */ | |
2079 | int length; /* length value */ | |
2080 | int code; /* code value */ | |
2081 | int dist; /* distance index */ | |
2082 | ush bl_count[MAX_BITS+1]; | |
2083 | /* number of codes at each bit length for an optimal tree */ | |
2084 | ||
2085 | if (static_init_done) return; | |
2086 | ||
2087 | /* Initialize the mapping length (0..255) -> length code (0..28) */ | |
2088 | length = 0; | |
2089 | for (code = 0; code < LENGTH_CODES-1; code++) { | |
2090 | base_length[code] = length; | |
2091 | for (n = 0; n < (1<<extra_lbits[code]); n++) { | |
2092 | length_code[length++] = (uch)code; | |
2093 | } | |
2094 | } | |
2095 | Assert (length == 256, "tr_static_init: length != 256"); | |
2096 | /* Note that the length 255 (match length 258) can be represented | |
2097 | * in two different ways: code 284 + 5 bits or code 285, so we | |
2098 | * overwrite length_code[255] to use the best encoding: | |
2099 | */ | |
2100 | length_code[length-1] = (uch)code; | |
2101 | ||
2102 | /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ | |
2103 | dist = 0; | |
2104 | for (code = 0 ; code < 16; code++) { | |
2105 | base_dist[code] = dist; | |
2106 | for (n = 0; n < (1<<extra_dbits[code]); n++) { | |
2107 | dist_code[dist++] = (uch)code; | |
2108 | } | |
2109 | } | |
2110 | Assert (dist == 256, "tr_static_init: dist != 256"); | |
2111 | dist >>= 7; /* from now on, all distances are divided by 128 */ | |
2112 | for ( ; code < D_CODES; code++) { | |
2113 | base_dist[code] = dist << 7; | |
2114 | for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { | |
2115 | dist_code[256 + dist++] = (uch)code; | |
2116 | } | |
2117 | } | |
2118 | Assert (dist == 256, "tr_static_init: 256+dist != 512"); | |
2119 | ||
2120 | /* Construct the codes of the static literal tree */ | |
2121 | for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; | |
2122 | n = 0; | |
2123 | while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; | |
2124 | while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; | |
2125 | while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; | |
2126 | while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; | |
2127 | /* Codes 286 and 287 do not exist, but we must include them in the | |
2128 | * tree construction to get a canonical Huffman tree (longest code | |
2129 | * all ones) | |
2130 | */ | |
2131 | gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); | |
2132 | ||
2133 | /* The static distance tree is trivial: */ | |
2134 | for (n = 0; n < D_CODES; n++) { | |
2135 | static_dtree[n].Len = 5; | |
2136 | static_dtree[n].Code = bi_reverse((unsigned)n, 5); | |
2137 | } | |
2138 | static_init_done = 1; | |
2139 | } | |
2140 | ||
2141 | /* =========================================================================== | |
2142 | * Initialize the tree data structures for a new zlib stream. | |
2143 | */ | |
2144 | void _tr_init(s) | |
2145 | deflate_state *s; | |
2146 | { | |
2147 | tr_static_init(); | |
2148 | ||
2149 | s->compressed_len = 0L; | |
2150 | ||
2151 | s->l_desc.dyn_tree = s->dyn_ltree; | |
2152 | s->l_desc.stat_desc = &static_l_desc; | |
2153 | ||
2154 | s->d_desc.dyn_tree = s->dyn_dtree; | |
2155 | s->d_desc.stat_desc = &static_d_desc; | |
2156 | ||
2157 | s->bl_desc.dyn_tree = s->bl_tree; | |
2158 | s->bl_desc.stat_desc = &static_bl_desc; | |
2159 | ||
2160 | s->bi_buf = 0; | |
2161 | s->bi_valid = 0; | |
2162 | s->last_eob_len = 8; /* enough lookahead for inflate */ | |
2163 | #ifdef DEBUG_ZLIB | |
2164 | s->bits_sent = 0L; | |
2165 | #endif | |
2166 | ||
2167 | /* Initialize the first block of the first file: */ | |
2168 | init_block(s); | |
2169 | } | |
2170 | ||
2171 | /* =========================================================================== | |
2172 | * Initialize a new block. | |
2173 | */ | |
2174 | local void init_block(s) | |
2175 | deflate_state *s; | |
2176 | { | |
2177 | int n; /* iterates over tree elements */ | |
2178 | ||
2179 | /* Initialize the trees. */ | |
2180 | for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; | |
2181 | for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; | |
2182 | for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; | |
2183 | ||
2184 | s->dyn_ltree[END_BLOCK].Freq = 1; | |
2185 | s->opt_len = s->static_len = 0L; | |
2186 | s->last_lit = s->matches = 0; | |
2187 | } | |
2188 | ||
2189 | #define SMALLEST 1 | |
2190 | /* Index within the heap array of least frequent node in the Huffman tree */ | |
2191 | ||
2192 | ||
2193 | /* =========================================================================== | |
2194 | * Remove the smallest element from the heap and recreate the heap with | |
2195 | * one less element. Updates heap and heap_len. | |
2196 | */ | |
2197 | #define pqremove(s, tree, top) \ | |
2198 | {\ | |
2199 | top = s->heap[SMALLEST]; \ | |
2200 | s->heap[SMALLEST] = s->heap[s->heap_len--]; \ | |
2201 | pqdownheap(s, tree, SMALLEST); \ | |
2202 | } | |
2203 | ||
2204 | /* =========================================================================== | |
2205 | * Compares to subtrees, using the tree depth as tie breaker when | |
2206 | * the subtrees have equal frequency. This minimizes the worst case length. | |
2207 | */ | |
2208 | #define smaller(tree, n, m, depth) \ | |
2209 | (tree[n].Freq < tree[m].Freq || \ | |
2210 | (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) | |
2211 | ||
2212 | /* =========================================================================== | |
2213 | * Restore the heap property by moving down the tree starting at node k, | |
2214 | * exchanging a node with the smallest of its two sons if necessary, stopping | |
2215 | * when the heap property is re-established (each father smaller than its | |
2216 | * two sons). | |
2217 | */ | |
2218 | local void pqdownheap(s, tree, k) | |
2219 | deflate_state *s; | |
2220 | ct_data *tree; /* the tree to restore */ | |
2221 | int k; /* node to move down */ | |
2222 | { | |
2223 | int v = s->heap[k]; | |
2224 | int j = k << 1; /* left son of k */ | |
2225 | while (j <= s->heap_len) { | |
2226 | /* Set j to the smallest of the two sons: */ | |
2227 | if (j < s->heap_len && | |
2228 | smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { | |
2229 | j++; | |
2230 | } | |
2231 | /* Exit if v is smaller than both sons */ | |
2232 | if (smaller(tree, v, s->heap[j], s->depth)) break; | |
2233 | ||
2234 | /* Exchange v with the smallest son */ | |
2235 | s->heap[k] = s->heap[j]; k = j; | |
2236 | ||
2237 | /* And continue down the tree, setting j to the left son of k */ | |
2238 | j <<= 1; | |
2239 | } | |
2240 | s->heap[k] = v; | |
2241 | } | |
2242 | ||
2243 | /* =========================================================================== | |
2244 | * Compute the optimal bit lengths for a tree and update the total bit length | |
2245 | * for the current block. | |
2246 | * IN assertion: the fields freq and dad are set, heap[heap_max] and | |
2247 | * above are the tree nodes sorted by increasing frequency. | |
2248 | * OUT assertions: the field len is set to the optimal bit length, the | |
2249 | * array bl_count contains the frequencies for each bit length. | |
2250 | * The length opt_len is updated; static_len is also updated if stree is | |
2251 | * not null. | |
2252 | */ | |
2253 | local void gen_bitlen(s, desc) | |
2254 | deflate_state *s; | |
2255 | tree_desc *desc; /* the tree descriptor */ | |
2256 | { | |
2257 | ct_data *tree = desc->dyn_tree; | |
2258 | int max_code = desc->max_code; | |
2259 | ct_data *stree = desc->stat_desc->static_tree; | |
2260 | intf *extra = desc->stat_desc->extra_bits; | |
2261 | int base = desc->stat_desc->extra_base; | |
2262 | int max_length = desc->stat_desc->max_length; | |
2263 | int h; /* heap index */ | |
2264 | int n, m; /* iterate over the tree elements */ | |
2265 | int bits; /* bit length */ | |
2266 | int xbits; /* extra bits */ | |
2267 | ush f; /* frequency */ | |
2268 | int overflow = 0; /* number of elements with bit length too large */ | |
2269 | ||
2270 | for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; | |
2271 | ||
2272 | /* In a first pass, compute the optimal bit lengths (which may | |
2273 | * overflow in the case of the bit length tree). | |
2274 | */ | |
2275 | tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ | |
2276 | ||
2277 | for (h = s->heap_max+1; h < HEAP_SIZE; h++) { | |
2278 | n = s->heap[h]; | |
2279 | bits = tree[tree[n].Dad].Len + 1; | |
2280 | if (bits > max_length) bits = max_length, overflow++; | |
2281 | tree[n].Len = (ush)bits; | |
2282 | /* We overwrite tree[n].Dad which is no longer needed */ | |
2283 | ||
2284 | if (n > max_code) continue; /* not a leaf node */ | |
2285 | ||
2286 | s->bl_count[bits]++; | |
2287 | xbits = 0; | |
2288 | if (n >= base) xbits = extra[n-base]; | |
2289 | f = tree[n].Freq; | |
2290 | s->opt_len += (ulg)f * (bits + xbits); | |
2291 | if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); | |
2292 | } | |
2293 | if (overflow == 0) return; | |
2294 | ||
2295 | Trace((stderr,"\nbit length overflow\n")); | |
2296 | /* This happens for example on obj2 and pic of the Calgary corpus */ | |
2297 | ||
2298 | /* Find the first bit length which could increase: */ | |
2299 | do { | |
2300 | bits = max_length-1; | |
2301 | while (s->bl_count[bits] == 0) bits--; | |
2302 | s->bl_count[bits]--; /* move one leaf down the tree */ | |
2303 | s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ | |
2304 | s->bl_count[max_length]--; | |
2305 | /* The brother of the overflow item also moves one step up, | |
2306 | * but this does not affect bl_count[max_length] | |
2307 | */ | |
2308 | overflow -= 2; | |
2309 | } while (overflow > 0); | |
2310 | ||
2311 | /* Now recompute all bit lengths, scanning in increasing frequency. | |
2312 | * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all | |
2313 | * lengths instead of fixing only the wrong ones. This idea is taken | |
2314 | * from 'ar' written by Haruhiko Okumura.) | |
2315 | */ | |
2316 | for (bits = max_length; bits != 0; bits--) { | |
2317 | n = s->bl_count[bits]; | |
2318 | while (n != 0) { | |
2319 | m = s->heap[--h]; | |
2320 | if (m > max_code) continue; | |
2321 | if (tree[m].Len != (unsigned) bits) { | |
2322 | Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); | |
2323 | s->opt_len += ((long)bits - (long)tree[m].Len) | |
2324 | *(long)tree[m].Freq; | |
2325 | tree[m].Len = (ush)bits; | |
2326 | } | |
2327 | n--; | |
2328 | } | |
2329 | } | |
2330 | } | |
2331 | ||
2332 | /* =========================================================================== | |
2333 | * Generate the codes for a given tree and bit counts (which need not be | |
2334 | * optimal). | |
2335 | * IN assertion: the array bl_count contains the bit length statistics for | |
2336 | * the given tree and the field len is set for all tree elements. | |
2337 | * OUT assertion: the field code is set for all tree elements of non | |
2338 | * zero code length. | |
2339 | */ | |
2340 | local void gen_codes (tree, max_code, bl_count) | |
2341 | ct_data *tree; /* the tree to decorate */ | |
2342 | int max_code; /* largest code with non zero frequency */ | |
2343 | ushf *bl_count; /* number of codes at each bit length */ | |
2344 | { | |
2345 | ush next_code[MAX_BITS+1]; /* next code value for each bit length */ | |
2346 | ush code = 0; /* running code value */ | |
2347 | int bits; /* bit index */ | |
2348 | int n; /* code index */ | |
2349 | ||
2350 | /* The distribution counts are first used to generate the code values | |
2351 | * without bit reversal. | |
2352 | */ | |
2353 | for (bits = 1; bits <= MAX_BITS; bits++) { | |
2354 | next_code[bits] = code = (code + bl_count[bits-1]) << 1; | |
2355 | } | |
2356 | /* Check that the bit counts in bl_count are consistent. The last code | |
2357 | * must be all ones. | |
2358 | */ | |
2359 | Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, | |
2360 | "inconsistent bit counts"); | |
2361 | Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); | |
2362 | ||
2363 | for (n = 0; n <= max_code; n++) { | |
2364 | int len = tree[n].Len; | |
2365 | if (len == 0) continue; | |
2366 | /* Now reverse the bits */ | |
2367 | tree[n].Code = bi_reverse(next_code[len]++, len); | |
2368 | ||
2369 | Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", | |
2370 | n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); | |
2371 | } | |
2372 | } | |
2373 | ||
2374 | /* =========================================================================== | |
2375 | * Construct one Huffman tree and assigns the code bit strings and lengths. | |
2376 | * Update the total bit length for the current block. | |
2377 | * IN assertion: the field freq is set for all tree elements. | |
2378 | * OUT assertions: the fields len and code are set to the optimal bit length | |
2379 | * and corresponding code. The length opt_len is updated; static_len is | |
2380 | * also updated if stree is not null. The field max_code is set. | |
2381 | */ | |
2382 | local void build_tree(s, desc) | |
2383 | deflate_state *s; | |
2384 | tree_desc *desc; /* the tree descriptor */ | |
2385 | { | |
2386 | ct_data *tree = desc->dyn_tree; | |
2387 | ct_data *stree = desc->stat_desc->static_tree; | |
2388 | int elems = desc->stat_desc->elems; | |
2389 | int n, m; /* iterate over heap elements */ | |
2390 | int max_code = -1; /* largest code with non zero frequency */ | |
2391 | int node; /* new node being created */ | |
2392 | ||
2393 | /* Construct the initial heap, with least frequent element in | |
2394 | * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. | |
2395 | * heap[0] is not used. | |
2396 | */ | |
2397 | s->heap_len = 0, s->heap_max = HEAP_SIZE; | |
2398 | ||
2399 | for (n = 0; n < elems; n++) { | |
2400 | if (tree[n].Freq != 0) { | |
2401 | s->heap[++(s->heap_len)] = max_code = n; | |
2402 | s->depth[n] = 0; | |
2403 | } else { | |
2404 | tree[n].Len = 0; | |
2405 | } | |
2406 | } | |
2407 | ||
2408 | /* The pkzip format requires that at least one distance code exists, | |
2409 | * and that at least one bit should be sent even if there is only one | |
2410 | * possible code. So to avoid special checks later on we force at least | |
2411 | * two codes of non zero frequency. | |
2412 | */ | |
2413 | while (s->heap_len < 2) { | |
2414 | node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); | |
2415 | tree[node].Freq = 1; | |
2416 | s->depth[node] = 0; | |
2417 | s->opt_len--; if (stree) s->static_len -= stree[node].Len; | |
2418 | /* node is 0 or 1 so it does not have extra bits */ | |
2419 | } | |
2420 | desc->max_code = max_code; | |
2421 | ||
2422 | /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, | |
2423 | * establish sub-heaps of increasing lengths: | |
2424 | */ | |
2425 | for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); | |
2426 | ||
2427 | /* Construct the Huffman tree by repeatedly combining the least two | |
2428 | * frequent nodes. | |
2429 | */ | |
2430 | node = elems; /* next internal node of the tree */ | |
2431 | do { | |
2432 | pqremove(s, tree, n); /* n = node of least frequency */ | |
2433 | m = s->heap[SMALLEST]; /* m = node of next least frequency */ | |
2434 | ||
2435 | s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ | |
2436 | s->heap[--(s->heap_max)] = m; | |
2437 | ||
2438 | /* Create a new node father of n and m */ | |
2439 | tree[node].Freq = tree[n].Freq + tree[m].Freq; | |
2440 | s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1); | |
2441 | tree[n].Dad = tree[m].Dad = (ush)node; | |
2442 | #ifdef DUMP_BL_TREE | |
2443 | if (tree == s->bl_tree) { | |
2444 | fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", | |
2445 | node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); | |
2446 | } | |
2447 | #endif | |
2448 | /* and insert the new node in the heap */ | |
2449 | s->heap[SMALLEST] = node++; | |
2450 | pqdownheap(s, tree, SMALLEST); | |
2451 | ||
2452 | } while (s->heap_len >= 2); | |
2453 | ||
2454 | s->heap[--(s->heap_max)] = s->heap[SMALLEST]; | |
2455 | ||
2456 | /* At this point, the fields freq and dad are set. We can now | |
2457 | * generate the bit lengths. | |
2458 | */ | |
2459 | gen_bitlen(s, (tree_desc *)desc); | |
2460 | ||
2461 | /* The field len is now set, we can generate the bit codes */ | |
2462 | gen_codes ((ct_data *)tree, max_code, s->bl_count); | |
2463 | } | |
2464 | ||
2465 | /* =========================================================================== | |
2466 | * Scan a literal or distance tree to determine the frequencies of the codes | |
2467 | * in the bit length tree. | |
2468 | */ | |
2469 | local void scan_tree (s, tree, max_code) | |
2470 | deflate_state *s; | |
2471 | ct_data *tree; /* the tree to be scanned */ | |
2472 | int max_code; /* and its largest code of non zero frequency */ | |
2473 | { | |
2474 | int n; /* iterates over all tree elements */ | |
2475 | int prevlen = -1; /* last emitted length */ | |
2476 | int curlen; /* length of current code */ | |
2477 | int nextlen = tree[0].Len; /* length of next code */ | |
2478 | int count = 0; /* repeat count of the current code */ | |
2479 | int max_count = 7; /* max repeat count */ | |
2480 | int min_count = 4; /* min repeat count */ | |
2481 | ||
2482 | if (nextlen == 0) max_count = 138, min_count = 3; | |
2483 | tree[max_code+1].Len = (ush)0xffff; /* guard */ | |
2484 | ||
2485 | for (n = 0; n <= max_code; n++) { | |
2486 | curlen = nextlen; nextlen = tree[n+1].Len; | |
2487 | if (++count < max_count && curlen == nextlen) { | |
2488 | continue; | |
2489 | } else if (count < min_count) { | |
2490 | s->bl_tree[curlen].Freq += count; | |
2491 | } else if (curlen != 0) { | |
2492 | if (curlen != prevlen) s->bl_tree[curlen].Freq++; | |
2493 | s->bl_tree[REP_3_6].Freq++; | |
2494 | } else if (count <= 10) { | |
2495 | s->bl_tree[REPZ_3_10].Freq++; | |
2496 | } else { | |
2497 | s->bl_tree[REPZ_11_138].Freq++; | |
2498 | } | |
2499 | count = 0; prevlen = curlen; | |
2500 | if (nextlen == 0) { | |
2501 | max_count = 138, min_count = 3; | |
2502 | } else if (curlen == nextlen) { | |
2503 | max_count = 6, min_count = 3; | |
2504 | } else { | |
2505 | max_count = 7, min_count = 4; | |
2506 | } | |
2507 | } | |
2508 | } | |
2509 | ||
2510 | /* =========================================================================== | |
2511 | * Send a literal or distance tree in compressed form, using the codes in | |
2512 | * bl_tree. | |
2513 | */ | |
2514 | local void send_tree (s, tree, max_code) | |
2515 | deflate_state *s; | |
2516 | ct_data *tree; /* the tree to be scanned */ | |
2517 | int max_code; /* and its largest code of non zero frequency */ | |
2518 | { | |
2519 | int n; /* iterates over all tree elements */ | |
2520 | int prevlen = -1; /* last emitted length */ | |
2521 | int curlen; /* length of current code */ | |
2522 | int nextlen = tree[0].Len; /* length of next code */ | |
2523 | int count = 0; /* repeat count of the current code */ | |
2524 | int max_count = 7; /* max repeat count */ | |
2525 | int min_count = 4; /* min repeat count */ | |
2526 | ||
2527 | /* tree[max_code+1].Len = -1; */ /* guard already set */ | |
2528 | if (nextlen == 0) max_count = 138, min_count = 3; | |
2529 | ||
2530 | for (n = 0; n <= max_code; n++) { | |
2531 | curlen = nextlen; nextlen = tree[n+1].Len; | |
2532 | if (++count < max_count && curlen == nextlen) { | |
2533 | continue; | |
2534 | } else if (count < min_count) { | |
2535 | do { send_code(s, curlen, s->bl_tree); } while (--count != 0); | |
2536 | ||
2537 | } else if (curlen != 0) { | |
2538 | if (curlen != prevlen) { | |
2539 | send_code(s, curlen, s->bl_tree); count--; | |
2540 | } | |
2541 | Assert(count >= 3 && count <= 6, " 3_6?"); | |
2542 | send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); | |
2543 | ||
2544 | } else if (count <= 10) { | |
2545 | send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); | |
2546 | ||
2547 | } else { | |
2548 | send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); | |
2549 | } | |
2550 | count = 0; prevlen = curlen; | |
2551 | if (nextlen == 0) { | |
2552 | max_count = 138, min_count = 3; | |
2553 | } else if (curlen == nextlen) { | |
2554 | max_count = 6, min_count = 3; | |
2555 | } else { | |
2556 | max_count = 7, min_count = 4; | |
2557 | } | |
2558 | } | |
2559 | } | |
2560 | ||
2561 | /* =========================================================================== | |
2562 | * Construct the Huffman tree for the bit lengths and return the index in | |
2563 | * bl_order of the last bit length code to send. | |
2564 | */ | |
2565 | local int build_bl_tree(s) | |
2566 | deflate_state *s; | |
2567 | { | |
2568 | int max_blindex; /* index of last bit length code of non zero freq */ | |
2569 | ||
2570 | /* Determine the bit length frequencies for literal and distance trees */ | |
2571 | scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); | |
2572 | scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); | |
2573 | ||
2574 | /* Build the bit length tree: */ | |
2575 | build_tree(s, (tree_desc *)(&(s->bl_desc))); | |
2576 | /* opt_len now includes the length of the tree representations, except | |
2577 | * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. | |
2578 | */ | |
2579 | ||
2580 | /* Determine the number of bit length codes to send. The pkzip format | |
2581 | * requires that at least 4 bit length codes be sent. (appnote.txt says | |
2582 | * 3 but the actual value used is 4.) | |
2583 | */ | |
2584 | for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { | |
2585 | if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; | |
2586 | } | |
2587 | /* Update opt_len to include the bit length tree and counts */ | |
2588 | s->opt_len += 3*(max_blindex+1) + 5+5+4; | |
2589 | Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", | |
2590 | s->opt_len, s->static_len)); | |
2591 | ||
2592 | return max_blindex; | |
2593 | } | |
2594 | ||
2595 | /* =========================================================================== | |
2596 | * Send the header for a block using dynamic Huffman trees: the counts, the | |
2597 | * lengths of the bit length codes, the literal tree and the distance tree. | |
2598 | * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. | |
2599 | */ | |
2600 | local void send_all_trees(s, lcodes, dcodes, blcodes) | |
2601 | deflate_state *s; | |
2602 | int lcodes, dcodes, blcodes; /* number of codes for each tree */ | |
2603 | { | |
2604 | int rank; /* index in bl_order */ | |
2605 | ||
2606 | Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); | |
2607 | Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, | |
2608 | "too many codes"); | |
2609 | Tracev((stderr, "\nbl counts: ")); | |
2610 | send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ | |
2611 | send_bits(s, dcodes-1, 5); | |
2612 | send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ | |
2613 | for (rank = 0; rank < blcodes; rank++) { | |
2614 | Tracev((stderr, "\nbl code %2d ", bl_order[rank])); | |
2615 | send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); | |
2616 | } | |
2617 | Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); | |
2618 | ||
2619 | send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ | |
2620 | Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); | |
2621 | ||
2622 | send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ | |
2623 | Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); | |
2624 | } | |
2625 | ||
2626 | /* =========================================================================== | |
2627 | * Send a stored block | |
2628 | */ | |
2629 | void _tr_stored_block(s, buf, stored_len, eof) | |
2630 | deflate_state *s; | |
2631 | charf *buf; /* input block */ | |
2632 | ulg stored_len; /* length of input block */ | |
2633 | int eof; /* true if this is the last block for a file */ | |
2634 | { | |
2635 | send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ | |
2636 | s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; | |
2637 | s->compressed_len += (stored_len + 4) << 3; | |
2638 | ||
2639 | copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ | |
2640 | } | |
2641 | ||
2642 | /* Send just the `stored block' type code without any length bytes or data. | |
2643 | */ | |
2644 | void _tr_stored_type_only(s) | |
2645 | deflate_state *s; | |
2646 | { | |
2647 | send_bits(s, (STORED_BLOCK << 1), 3); | |
2648 | bi_windup(s); | |
2649 | s->compressed_len = (s->compressed_len + 3) & ~7L; | |
2650 | } | |
2651 | ||
2652 | ||
2653 | /* =========================================================================== | |
2654 | * Send one empty static block to give enough lookahead for inflate. | |
2655 | * This takes 10 bits, of which 7 may remain in the bit buffer. | |
2656 | * The current inflate code requires 9 bits of lookahead. If the | |
2657 | * last two codes for the previous block (real code plus EOB) were coded | |
2658 | * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode | |
2659 | * the last real code. In this case we send two empty static blocks instead | |
2660 | * of one. (There are no problems if the previous block is stored or fixed.) | |
2661 | * To simplify the code, we assume the worst case of last real code encoded | |
2662 | * on one bit only. | |
2663 | */ | |
2664 | void _tr_align(s) | |
2665 | deflate_state *s; | |
2666 | { | |
2667 | send_bits(s, STATIC_TREES<<1, 3); | |
2668 | send_code(s, END_BLOCK, static_ltree); | |
2669 | s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ | |
2670 | bi_flush(s); | |
2671 | /* Of the 10 bits for the empty block, we have already sent | |
2672 | * (10 - bi_valid) bits. The lookahead for the last real code (before | |
2673 | * the EOB of the previous block) was thus at least one plus the length | |
2674 | * of the EOB plus what we have just sent of the empty static block. | |
2675 | */ | |
2676 | if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { | |
2677 | send_bits(s, STATIC_TREES<<1, 3); | |
2678 | send_code(s, END_BLOCK, static_ltree); | |
2679 | s->compressed_len += 10L; | |
2680 | bi_flush(s); | |
2681 | } | |
2682 | s->last_eob_len = 7; | |
2683 | } | |
2684 | ||
2685 | /* =========================================================================== | |
2686 | * Determine the best encoding for the current block: dynamic trees, static | |
2687 | * trees or store, and output the encoded block to the zip file. This function | |
2688 | * returns the total compressed length for the file so far. | |
2689 | */ | |
2690 | ulg _tr_flush_block(s, buf, stored_len, eof) | |
2691 | deflate_state *s; | |
2692 | charf *buf; /* input block, or NULL if too old */ | |
2693 | ulg stored_len; /* length of input block */ | |
2694 | int eof; /* true if this is the last block for a file */ | |
2695 | { | |
2696 | ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ | |
2697 | int max_blindex = 0; /* index of last bit length code of non zero freq */ | |
2698 | ||
2699 | /* Build the Huffman trees unless a stored block is forced */ | |
2700 | if (s->level > 0) { | |
2701 | ||
2702 | /* Check if the file is ascii or binary */ | |
2703 | if (s->data_type == Z_UNKNOWN) set_data_type(s); | |
2704 | ||
2705 | /* Construct the literal and distance trees */ | |
2706 | build_tree(s, (tree_desc *)(&(s->l_desc))); | |
2707 | Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, | |
2708 | s->static_len)); | |
2709 | ||
2710 | build_tree(s, (tree_desc *)(&(s->d_desc))); | |
2711 | Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, | |
2712 | s->static_len)); | |
2713 | /* At this point, opt_len and static_len are the total bit lengths of | |
2714 | * the compressed block data, excluding the tree representations. | |
2715 | */ | |
2716 | ||
2717 | /* Build the bit length tree for the above two trees, and get the index | |
2718 | * in bl_order of the last bit length code to send. | |
2719 | */ | |
2720 | max_blindex = build_bl_tree(s); | |
2721 | ||
2722 | /* Determine the best encoding. Compute first the block length in bytes*/ | |
2723 | opt_lenb = (s->opt_len+3+7)>>3; | |
2724 | static_lenb = (s->static_len+3+7)>>3; | |
2725 | ||
2726 | Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", | |
2727 | opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, | |
2728 | s->last_lit)); | |
2729 | ||
2730 | if (static_lenb <= opt_lenb) opt_lenb = static_lenb; | |
2731 | ||
2732 | } else { | |
2733 | Assert(buf != (char*)0, "lost buf"); | |
2734 | opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ | |
2735 | } | |
2736 | ||
2737 | /* If compression failed and this is the first and last block, | |
2738 | * and if the .zip file can be seeked (to rewrite the local header), | |
2739 | * the whole file is transformed into a stored file: | |
2740 | */ | |
2741 | #ifdef STORED_FILE_OK | |
2742 | # ifdef FORCE_STORED_FILE | |
2743 | if (eof && s->compressed_len == 0L) { /* force stored file */ | |
2744 | # else | |
2745 | if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) { | |
2746 | # endif | |
2747 | /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */ | |
2748 | if (buf == (charf*)0) error ("block vanished"); | |
2749 | ||
2750 | copy_block(s, buf, (unsigned)stored_len, 0); /* without header */ | |
2751 | s->compressed_len = stored_len << 3; | |
2752 | s->method = STORED; | |
2753 | } else | |
2754 | #endif /* STORED_FILE_OK */ | |
2755 | ||
2756 | #ifdef FORCE_STORED | |
2757 | if (buf != (char*)0) { /* force stored block */ | |
2758 | #else | |
2759 | if (stored_len+4 <= opt_lenb && buf != (char*)0) { | |
2760 | /* 4: two words for the lengths */ | |
2761 | #endif | |
2762 | /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. | |
2763 | * Otherwise we can't have processed more than WSIZE input bytes since | |
2764 | * the last block flush, because compression would have been | |
2765 | * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to | |
2766 | * transform a block into a stored block. | |
2767 | */ | |
2768 | _tr_stored_block(s, buf, stored_len, eof); | |
2769 | ||
2770 | #ifdef FORCE_STATIC | |
2771 | } else if (static_lenb >= 0) { /* force static trees */ | |
2772 | #else | |
2773 | } else if (static_lenb == opt_lenb) { | |
2774 | #endif | |
2775 | send_bits(s, (STATIC_TREES<<1)+eof, 3); | |
2776 | compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); | |
2777 | s->compressed_len += 3 + s->static_len; | |
2778 | } else { | |
2779 | send_bits(s, (DYN_TREES<<1)+eof, 3); | |
2780 | send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, | |
2781 | max_blindex+1); | |
2782 | compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); | |
2783 | s->compressed_len += 3 + s->opt_len; | |
2784 | } | |
2785 | Assert (s->compressed_len == s->bits_sent, "bad compressed size"); | |
2786 | init_block(s); | |
2787 | ||
2788 | if (eof) { | |
2789 | bi_windup(s); | |
2790 | s->compressed_len += 7; /* align on byte boundary */ | |
2791 | } | |
2792 | Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, | |
2793 | s->compressed_len-7*eof)); | |
2794 | ||
2795 | return s->compressed_len >> 3; | |
2796 | } | |
2797 | ||
2798 | /* =========================================================================== | |
2799 | * Save the match info and tally the frequency counts. Return true if | |
2800 | * the current block must be flushed. | |
2801 | */ | |
2802 | int _tr_tally (s, dist, lc) | |
2803 | deflate_state *s; | |
2804 | unsigned dist; /* distance of matched string */ | |
2805 | unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ | |
2806 | { | |
2807 | s->d_buf[s->last_lit] = (ush)dist; | |
2808 | s->l_buf[s->last_lit++] = (uch)lc; | |
2809 | if (dist == 0) { | |
2810 | /* lc is the unmatched char */ | |
2811 | s->dyn_ltree[lc].Freq++; | |
2812 | } else { | |
2813 | s->matches++; | |
2814 | /* Here, lc is the match length - MIN_MATCH */ | |
2815 | dist--; /* dist = match distance - 1 */ | |
2816 | Assert((ush)dist < (ush)MAX_DIST(s) && | |
2817 | (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && | |
2818 | (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); | |
2819 | ||
2820 | s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++; | |
2821 | s->dyn_dtree[d_code(dist)].Freq++; | |
2822 | } | |
2823 | ||
2824 | /* Try to guess if it is profitable to stop the current block here */ | |
2825 | if (s->level > 2 && (s->last_lit & 0xfff) == 0) { | |
2826 | /* Compute an upper bound for the compressed length */ | |
2827 | ulg out_length = (ulg)s->last_lit*8L; | |
2828 | ulg in_length = (ulg)((long)s->strstart - s->block_start); | |
2829 | int dcode; | |
2830 | for (dcode = 0; dcode < D_CODES; dcode++) { | |
2831 | out_length += (ulg)s->dyn_dtree[dcode].Freq * | |
2832 | (5L+extra_dbits[dcode]); | |
2833 | } | |
2834 | out_length >>= 3; | |
2835 | Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", | |
2836 | s->last_lit, in_length, out_length, | |
2837 | 100L - out_length*100L/in_length)); | |
2838 | if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; | |
2839 | } | |
2840 | return (s->last_lit == s->lit_bufsize-1); | |
2841 | /* We avoid equality with lit_bufsize because of wraparound at 64K | |
2842 | * on 16 bit machines and because stored blocks are restricted to | |
2843 | * 64K-1 bytes. | |
2844 | */ | |
2845 | } | |
2846 | ||
2847 | /* =========================================================================== | |
2848 | * Send the block data compressed using the given Huffman trees | |
2849 | */ | |
2850 | local void compress_block(s, ltree, dtree) | |
2851 | deflate_state *s; | |
2852 | ct_data *ltree; /* literal tree */ | |
2853 | ct_data *dtree; /* distance tree */ | |
2854 | { | |
2855 | unsigned dist; /* distance of matched string */ | |
2856 | int lc; /* match length or unmatched char (if dist == 0) */ | |
2857 | unsigned lx = 0; /* running index in l_buf */ | |
2858 | unsigned code; /* the code to send */ | |
2859 | int extra; /* number of extra bits to send */ | |
2860 | ||
2861 | if (s->last_lit != 0) do { | |
2862 | dist = s->d_buf[lx]; | |
2863 | lc = s->l_buf[lx++]; | |
2864 | if (dist == 0) { | |
2865 | send_code(s, lc, ltree); /* send a literal byte */ | |
2866 | Tracecv(isgraph(lc), (stderr," '%c' ", lc)); | |
2867 | } else { | |
2868 | /* Here, lc is the match length - MIN_MATCH */ | |
2869 | code = length_code[lc]; | |
2870 | send_code(s, code+LITERALS+1, ltree); /* send the length code */ | |
2871 | extra = extra_lbits[code]; | |
2872 | if (extra != 0) { | |
2873 | lc -= base_length[code]; | |
2874 | send_bits(s, lc, extra); /* send the extra length bits */ | |
2875 | } | |
2876 | dist--; /* dist is now the match distance - 1 */ | |
2877 | code = d_code(dist); | |
2878 | Assert (code < D_CODES, "bad d_code"); | |
2879 | ||
2880 | send_code(s, code, dtree); /* send the distance code */ | |
2881 | extra = extra_dbits[code]; | |
2882 | if (extra != 0) { | |
2883 | dist -= base_dist[code]; | |
2884 | send_bits(s, dist, extra); /* send the extra distance bits */ | |
2885 | } | |
2886 | } /* literal or match pair ? */ | |
2887 | ||
2888 | /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ | |
2889 | Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow"); | |
2890 | ||
2891 | } while (lx < s->last_lit); | |
2892 | ||
2893 | send_code(s, END_BLOCK, ltree); | |
2894 | s->last_eob_len = ltree[END_BLOCK].Len; | |
2895 | } | |
2896 | ||
2897 | /* =========================================================================== | |
2898 | * Set the data type to ASCII or BINARY, using a crude approximation: | |
2899 | * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. | |
2900 | * IN assertion: the fields freq of dyn_ltree are set and the total of all | |
2901 | * frequencies does not exceed 64K (to fit in an int on 16 bit machines). | |
2902 | */ | |
2903 | local void set_data_type(s) | |
2904 | deflate_state *s; | |
2905 | { | |
2906 | int n = 0; | |
2907 | unsigned ascii_freq = 0; | |
2908 | unsigned bin_freq = 0; | |
2909 | while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; | |
2910 | while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; | |
2911 | while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; | |
2912 | s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); | |
2913 | } | |
2914 | ||
2915 | /* =========================================================================== | |
2916 | * Reverse the first len bits of a code, using straightforward code (a faster | |
2917 | * method would use a table) | |
2918 | * IN assertion: 1 <= len <= 15 | |
2919 | */ | |
2920 | local unsigned bi_reverse(code, len) | |
2921 | unsigned code; /* the value to invert */ | |
2922 | int len; /* its bit length */ | |
2923 | { | |
2924 | register unsigned res = 0; | |
2925 | do { | |
2926 | res |= code & 1; | |
2927 | code >>= 1, res <<= 1; | |
2928 | } while (--len > 0); | |
2929 | return res >> 1; | |
2930 | } | |
2931 | ||
2932 | /* =========================================================================== | |
2933 | * Flush the bit buffer, keeping at most 7 bits in it. | |
2934 | */ | |
2935 | local void bi_flush(s) | |
2936 | deflate_state *s; | |
2937 | { | |
2938 | if (s->bi_valid == 16) { | |
2939 | put_short(s, s->bi_buf); | |
2940 | s->bi_buf = 0; | |
2941 | s->bi_valid = 0; | |
2942 | } else if (s->bi_valid >= 8) { | |
2943 | put_byte(s, (Byte)s->bi_buf); | |
2944 | s->bi_buf >>= 8; | |
2945 | s->bi_valid -= 8; | |
2946 | } | |
2947 | } | |
2948 | ||
2949 | /* =========================================================================== | |
2950 | * Flush the bit buffer and align the output on a byte boundary | |
2951 | */ | |
2952 | local void bi_windup(s) | |
2953 | deflate_state *s; | |
2954 | { | |
2955 | if (s->bi_valid > 8) { | |
2956 | put_short(s, s->bi_buf); | |
2957 | } else if (s->bi_valid > 0) { | |
2958 | put_byte(s, (Byte)s->bi_buf); | |
2959 | } | |
2960 | s->bi_buf = 0; | |
2961 | s->bi_valid = 0; | |
2962 | #ifdef DEBUG_ZLIB | |
2963 | s->bits_sent = (s->bits_sent+7) & ~7; | |
2964 | #endif | |
2965 | } | |
2966 | ||
2967 | /* =========================================================================== | |
2968 | * Copy a stored block, storing first the length and its | |
2969 | * one's complement if requested. | |
2970 | */ | |
2971 | local void copy_block(s, buf, len, header) | |
2972 | deflate_state *s; | |
2973 | charf *buf; /* the input data */ | |
2974 | unsigned len; /* its length */ | |
2975 | int header; /* true if block header must be written */ | |
2976 | { | |
2977 | bi_windup(s); /* align on byte boundary */ | |
2978 | s->last_eob_len = 8; /* enough lookahead for inflate */ | |
2979 | ||
2980 | if (header) { | |
2981 | put_short(s, (ush)len); | |
2982 | put_short(s, (ush)~len); | |
2983 | #ifdef DEBUG_ZLIB | |
2984 | s->bits_sent += 2*16; | |
2985 | #endif | |
2986 | } | |
2987 | #ifdef DEBUG_ZLIB | |
2988 | s->bits_sent += (ulg)len<<3; | |
2989 | #endif | |
2990 | /* bundle up the put_byte(s, *buf++) calls */ | |
2991 | zmemcpy(&s->pending_buf[s->pending], buf, len); | |
2992 | s->pending += len; | |
2993 | } | |
2994 | /* --- trees.c */ | |
2995 | ||
2996 | /* +++ inflate.c */ | |
2997 | /* inflate.c -- zlib interface to inflate modules | |
2998 | * Copyright (C) 1995-1996 Mark Adler | |
2999 | * For conditions of distribution and use, see copyright notice in zlib.h | |
3000 | */ | |
3001 | ||
3002 | /* #include "zutil.h" */ | |
3003 | ||
3004 | /* +++ infblock.h */ | |
3005 | /* infblock.h -- header to use infblock.c | |
3006 | * Copyright (C) 1995-1996 Mark Adler | |
3007 | * For conditions of distribution and use, see copyright notice in zlib.h | |
3008 | */ | |
3009 | ||
3010 | /* WARNING: this file should *not* be used by applications. It is | |
3011 | part of the implementation of the compression library and is | |
3012 | subject to change. Applications should only use zlib.h. | |
3013 | */ | |
3014 | ||
3015 | struct inflate_blocks_state; | |
3016 | typedef struct inflate_blocks_state FAR inflate_blocks_statef; | |
3017 | ||
3018 | extern inflate_blocks_statef * inflate_blocks_new OF(( | |
3019 | z_streamp z, | |
3020 | check_func c, /* check function */ | |
3021 | uInt w)); /* window size */ | |
3022 | ||
3023 | extern int inflate_blocks OF(( | |
3024 | inflate_blocks_statef *, | |
3025 | z_streamp , | |
3026 | int)); /* initial return code */ | |
3027 | ||
3028 | extern void inflate_blocks_reset OF(( | |
3029 | inflate_blocks_statef *, | |
3030 | z_streamp , | |
3031 | uLongf *)); /* check value on output */ | |
3032 | ||
3033 | extern int inflate_blocks_free OF(( | |
3034 | inflate_blocks_statef *, | |
3035 | z_streamp , | |
3036 | uLongf *)); /* check value on output */ | |
3037 | ||
3038 | extern void inflate_set_dictionary OF(( | |
3039 | inflate_blocks_statef *s, | |
3040 | const Bytef *d, /* dictionary */ | |
3041 | uInt n)); /* dictionary length */ | |
3042 | ||
3043 | extern int inflate_addhistory OF(( | |
3044 | inflate_blocks_statef *, | |
3045 | z_streamp)); | |
3046 | ||
3047 | extern int inflate_packet_flush OF(( | |
3048 | inflate_blocks_statef *)); | |
3049 | /* --- infblock.h */ | |
3050 | ||
3051 | #ifndef NO_DUMMY_DECL | |
3052 | struct inflate_blocks_state {int dummy;}; /* for buggy compilers */ | |
3053 | #endif | |
3054 | ||
3055 | /* inflate private state */ | |
3056 | struct internal_state { | |
3057 | ||
3058 | /* mode */ | |
3059 | enum { | |
3060 | METHOD, /* waiting for method byte */ | |
3061 | FLAG, /* waiting for flag byte */ | |
3062 | DICT4, /* four dictionary check bytes to go */ | |
3063 | DICT3, /* three dictionary check bytes to go */ | |
3064 | DICT2, /* two dictionary check bytes to go */ | |
3065 | DICT1, /* one dictionary check byte to go */ | |
3066 | DICT0, /* waiting for inflateSetDictionary */ | |
3067 | BLOCKS, /* decompressing blocks */ | |
3068 | CHECK4, /* four check bytes to go */ | |
3069 | CHECK3, /* three check bytes to go */ | |
3070 | CHECK2, /* two check bytes to go */ | |
3071 | CHECK1, /* one check byte to go */ | |
3072 | DONE, /* finished check, done */ | |
3073 | BAD} /* got an error--stay here */ | |
3074 | mode; /* current inflate mode */ | |
3075 | ||
3076 | /* mode dependent information */ | |
3077 | union { | |
3078 | uInt method; /* if FLAGS, method byte */ | |
3079 | struct { | |
3080 | uLong was; /* computed check value */ | |
3081 | uLong need; /* stream check value */ | |
3082 | } check; /* if CHECK, check values to compare */ | |
3083 | uInt marker; /* if BAD, inflateSync's marker bytes count */ | |
3084 | } sub; /* submode */ | |
3085 | ||
3086 | /* mode independent information */ | |
3087 | int nowrap; /* flag for no wrapper */ | |
3088 | uInt wbits; /* log2(window size) (8..15, defaults to 15) */ | |
3089 | inflate_blocks_statef | |
3090 | *blocks; /* current inflate_blocks state */ | |
3091 | ||
3092 | }; | |
3093 | ||
3094 | ||
3095 | int inflateReset(z) | |
3096 | z_streamp z; | |
3097 | { | |
3098 | uLong c; | |
3099 | ||
3100 | if (z == Z_NULL || z->state == Z_NULL) | |
3101 | return Z_STREAM_ERROR; | |
3102 | z->total_in = z->total_out = 0; | |
3103 | z->msg = Z_NULL; | |
3104 | z->state->mode = z->state->nowrap ? BLOCKS : METHOD; | |
3105 | inflate_blocks_reset(z->state->blocks, z, &c); | |
3106 | Trace((stderr, "inflate: reset\n")); | |
3107 | return Z_OK; | |
3108 | } | |
3109 | ||
3110 | ||
3111 | int inflateEnd(z) | |
3112 | z_streamp z; | |
3113 | { | |
3114 | uLong c; | |
3115 | ||
3116 | if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL) | |
3117 | return Z_STREAM_ERROR; | |
3118 | if (z->state->blocks != Z_NULL) | |
3119 | inflate_blocks_free(z->state->blocks, z, &c); | |
3120 | ZFREE(z, z->state); | |
3121 | z->state = Z_NULL; | |
3122 | Trace((stderr, "inflate: end\n")); | |
3123 | return Z_OK; | |
3124 | } | |
3125 | ||
3126 | ||
3127 | int inflateInit2_(z, w, version, stream_size) | |
3128 | z_streamp z; | |
3129 | int w; | |
3130 | const char *version; | |
3131 | int stream_size; | |
3132 | { | |
3133 | if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || | |
3134 | stream_size != sizeof(z_stream)) | |
3135 | return Z_VERSION_ERROR; | |
3136 | ||
3137 | /* initialize state */ | |
3138 | if (z == Z_NULL) | |
3139 | return Z_STREAM_ERROR; | |
3140 | z->msg = Z_NULL; | |
3141 | #ifndef NO_ZCFUNCS | |
3142 | if (z->zalloc == Z_NULL) | |
3143 | { | |
3144 | z->zalloc = zcalloc; | |
3145 | z->opaque = (voidpf)0; | |
3146 | } | |
3147 | if (z->zfree == Z_NULL) z->zfree = zcfree; | |
3148 | #endif | |
3149 | if ((z->state = (struct internal_state FAR *) | |
3150 | ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL) | |
3151 | return Z_MEM_ERROR; | |
3152 | z->state->blocks = Z_NULL; | |
3153 | ||
3154 | /* handle undocumented nowrap option (no zlib header or check) */ | |
3155 | z->state->nowrap = 0; | |
3156 | if (w < 0) | |
3157 | { | |
3158 | w = - w; | |
3159 | z->state->nowrap = 1; | |
3160 | } | |
3161 | ||
3162 | /* set window size */ | |
3163 | if (w < 8 || w > 15) | |
3164 | { | |
3165 | inflateEnd(z); | |
3166 | return Z_STREAM_ERROR; | |
3167 | } | |
3168 | z->state->wbits = (uInt)w; | |
3169 | ||
3170 | /* create inflate_blocks state */ | |
3171 | if ((z->state->blocks = | |
3172 | inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w)) | |
3173 | == Z_NULL) | |
3174 | { | |
3175 | inflateEnd(z); | |
3176 | return Z_MEM_ERROR; | |
3177 | } | |
3178 | Trace((stderr, "inflate: allocated\n")); | |
3179 | ||
3180 | /* reset state */ | |
3181 | inflateReset(z); | |
3182 | return Z_OK; | |
3183 | } | |
3184 | ||
3185 | ||
3186 | int inflateInit_(z, version, stream_size) | |
3187 | z_streamp z; | |
3188 | const char *version; | |
3189 | int stream_size; | |
3190 | { | |
3191 | return inflateInit2_(z, DEF_WBITS, version, stream_size); | |
3192 | } | |
3193 | ||
3194 | ||
3195 | #define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;} | |
3196 | #define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++) | |
3197 | ||
3198 | int inflate(z, f) | |
3199 | z_streamp z; | |
3200 | int f; | |
3201 | { | |
3202 | int r; | |
3203 | uInt b; | |
3204 | ||
3205 | if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0) | |
3206 | return Z_STREAM_ERROR; | |
3207 | r = Z_BUF_ERROR; | |
3208 | while (1) switch (z->state->mode) | |
3209 | { | |
3210 | case METHOD: | |
3211 | NEEDBYTE | |
3212 | if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED) | |
3213 | { | |
3214 | z->state->mode = BAD; | |
3215 | z->msg = (char*)"unknown compression method"; | |
3216 | z->state->sub.marker = 5; /* can't try inflateSync */ | |
3217 | break; | |
3218 | } | |
3219 | if ((z->state->sub.method >> 4) + 8 > z->state->wbits) | |
3220 | { | |
3221 | z->state->mode = BAD; | |
3222 | z->msg = (char*)"invalid window size"; | |
3223 | z->state->sub.marker = 5; /* can't try inflateSync */ | |
3224 | break; | |
3225 | } | |
3226 | z->state->mode = FLAG; | |
3227 | case FLAG: | |
3228 | NEEDBYTE | |
3229 | b = NEXTBYTE; | |
3230 | if (((z->state->sub.method << 8) + b) % 31) | |
3231 | { | |
3232 | z->state->mode = BAD; | |
3233 | z->msg = (char*)"incorrect header check"; | |
3234 | z->state->sub.marker = 5; /* can't try inflateSync */ | |
3235 | break; | |
3236 | } | |
3237 | Trace((stderr, "inflate: zlib header ok\n")); | |
3238 | if (!(b & PRESET_DICT)) | |
3239 | { | |
3240 | z->state->mode = BLOCKS; | |
3241 | break; | |
3242 | } | |
3243 | z->state->mode = DICT4; | |
3244 | case DICT4: | |
3245 | NEEDBYTE | |
3246 | z->state->sub.check.need = (uLong)NEXTBYTE << 24; | |
3247 | z->state->mode = DICT3; | |
3248 | case DICT3: | |
3249 | NEEDBYTE | |
3250 | z->state->sub.check.need += (uLong)NEXTBYTE << 16; | |
3251 | z->state->mode = DICT2; | |
3252 | case DICT2: | |
3253 | NEEDBYTE | |
3254 | z->state->sub.check.need += (uLong)NEXTBYTE << 8; | |
3255 | z->state->mode = DICT1; | |
3256 | case DICT1: | |
3257 | NEEDBYTE | |
3258 | z->state->sub.check.need += (uLong)NEXTBYTE; | |
3259 | z->adler = z->state->sub.check.need; | |
3260 | z->state->mode = DICT0; | |
3261 | return Z_NEED_DICT; | |
3262 | case DICT0: | |
3263 | z->state->mode = BAD; | |
3264 | z->msg = (char*)"need dictionary"; | |
3265 | z->state->sub.marker = 0; /* can try inflateSync */ | |
3266 | return Z_STREAM_ERROR; | |
3267 | case BLOCKS: | |
3268 | r = inflate_blocks(z->state->blocks, z, r); | |
3269 | if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0) | |
3270 | r = inflate_packet_flush(z->state->blocks); | |
3271 | if (r == Z_DATA_ERROR) | |
3272 | { | |
3273 | z->state->mode = BAD; | |
3274 | z->state->sub.marker = 0; /* can try inflateSync */ | |
3275 | break; | |
3276 | } | |
3277 | if (r != Z_STREAM_END) | |
3278 | return r; | |
3279 | r = Z_OK; | |
3280 | inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was); | |
3281 | if (z->state->nowrap) | |
3282 | { | |
3283 | z->state->mode = DONE; | |
3284 | break; | |
3285 | } | |
3286 | z->state->mode = CHECK4; | |
3287 | case CHECK4: | |
3288 | NEEDBYTE | |
3289 | z->state->sub.check.need = (uLong)NEXTBYTE << 24; | |
3290 | z->state->mode = CHECK3; | |
3291 | case CHECK3: | |
3292 | NEEDBYTE | |
3293 | z->state->sub.check.need += (uLong)NEXTBYTE << 16; | |
3294 | z->state->mode = CHECK2; | |
3295 | case CHECK2: | |
3296 | NEEDBYTE | |
3297 | z->state->sub.check.need += (uLong)NEXTBYTE << 8; | |
3298 | z->state->mode = CHECK1; | |
3299 | case CHECK1: | |
3300 | NEEDBYTE | |
3301 | z->state->sub.check.need += (uLong)NEXTBYTE; | |
3302 | ||
3303 | if (z->state->sub.check.was != z->state->sub.check.need) | |
3304 | { | |
3305 | z->state->mode = BAD; | |
3306 | z->msg = (char*)"incorrect data check"; | |
3307 | z->state->sub.marker = 5; /* can't try inflateSync */ | |
3308 | break; | |
3309 | } | |
3310 | Trace((stderr, "inflate: zlib check ok\n")); | |
3311 | z->state->mode = DONE; | |
3312 | case DONE: | |
3313 | return Z_STREAM_END; | |
3314 | case BAD: | |
3315 | return Z_DATA_ERROR; | |
3316 | default: | |
3317 | return Z_STREAM_ERROR; | |
3318 | } | |
3319 | ||
3320 | empty: | |
3321 | if (f != Z_PACKET_FLUSH) | |
3322 | return r; | |
3323 | z->state->mode = BAD; | |
3324 | z->msg = (char *)"need more for packet flush"; | |
3325 | z->state->sub.marker = 0; /* can try inflateSync */ | |
3326 | return Z_DATA_ERROR; | |
3327 | } | |
3328 | ||
3329 | ||
3330 | int inflateSetDictionary(z, dictionary, dictLength) | |
3331 | z_streamp z; | |
3332 | const Bytef *dictionary; | |
3333 | uInt dictLength; | |
3334 | { | |
3335 | uInt length = dictLength; | |
3336 | ||
3337 | if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0) | |
3338 | return Z_STREAM_ERROR; | |
3339 | ||
3340 | if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR; | |
3341 | z->adler = 1L; | |
3342 | ||
3343 | if (length >= ((uInt)1<<z->state->wbits)) | |
3344 | { | |
3345 | length = (1<<z->state->wbits)-1; | |
3346 | dictionary += dictLength - length; | |
3347 | } | |
3348 | inflate_set_dictionary(z->state->blocks, dictionary, length); | |
3349 | z->state->mode = BLOCKS; | |
3350 | return Z_OK; | |
3351 | } | |
3352 | ||
3353 | /* | |
3354 | * This subroutine adds the data at next_in/avail_in to the output history | |
3355 | * without performing any output. The output buffer must be "caught up"; | |
3356 | * i.e. no pending output (hence s->read equals s->write), and the state must | |
3357 | * be BLOCKS (i.e. we should be willing to see the start of a series of | |
3358 | * BLOCKS). On exit, the output will also be caught up, and the checksum | |
3359 | * will have been updated if need be. | |
3360 | */ | |
3361 | ||
3362 | int inflateIncomp(z) | |
3363 | z_stream *z; | |
3364 | { | |
3365 | if (z->state->mode != BLOCKS) | |
3366 | return Z_DATA_ERROR; | |
3367 | return inflate_addhistory(z->state->blocks, z); | |
3368 | } | |
3369 | ||
3370 | ||
3371 | int inflateSync(z) | |
3372 | z_streamp z; | |
3373 | { | |
3374 | uInt n; /* number of bytes to look at */ | |
3375 | Bytef *p; /* pointer to bytes */ | |
3376 | uInt m; /* number of marker bytes found in a row */ | |
3377 | uLong r, w; /* temporaries to save total_in and total_out */ | |
3378 | ||
3379 | /* set up */ | |
3380 | if (z == Z_NULL || z->state == Z_NULL) | |
3381 | return Z_STREAM_ERROR; | |
3382 | if (z->state->mode != BAD) | |
3383 | { | |
3384 | z->state->mode = BAD; | |
3385 | z->state->sub.marker = 0; | |
3386 | } | |
3387 | if ((n = z->avail_in) == 0) | |
3388 | return Z_BUF_ERROR; | |
3389 | p = z->next_in; | |
3390 | m = z->state->sub.marker; | |
3391 | ||
3392 | /* search */ | |
3393 | while (n && m < 4) | |
3394 | { | |
3395 | if (*p == (Byte)(m < 2 ? 0 : 0xff)) | |
3396 | m++; | |
3397 | else if (*p) | |
3398 | m = 0; | |
3399 | else | |
3400 | m = 4 - m; | |
3401 | p++, n--; | |
3402 | } | |
3403 | ||
3404 | /* restore */ | |
3405 | z->total_in += p - z->next_in; | |
3406 | z->next_in = p; | |
3407 | z->avail_in = n; | |
3408 | z->state->sub.marker = m; | |
3409 | ||
3410 | /* return no joy or set up to restart on a new block */ | |
3411 | if (m != 4) | |
3412 | return Z_DATA_ERROR; | |
3413 | r = z->total_in; w = z->total_out; | |
3414 | inflateReset(z); | |
3415 | z->total_in = r; z->total_out = w; | |
3416 | z->state->mode = BLOCKS; | |
3417 | return Z_OK; | |
3418 | } | |
3419 | ||
3420 | #undef NEEDBYTE | |
3421 | #undef NEXTBYTE | |
3422 | /* --- inflate.c */ | |
3423 | ||
3424 | /* +++ infblock.c */ | |
3425 | /* infblock.c -- interpret and process block types to last block | |
3426 | * Copyright (C) 1995-1996 Mark Adler | |
3427 | * For conditions of distribution and use, see copyright notice in zlib.h | |
3428 | */ | |
3429 | ||
3430 | /* #include "zutil.h" */ | |
3431 | /* #include "infblock.h" */ | |
3432 | ||
3433 | /* +++ inftrees.h */ | |
3434 | /* inftrees.h -- header to use inftrees.c | |
3435 | * Copyright (C) 1995-1996 Mark Adler | |
3436 | * For conditions of distribution and use, see copyright notice in zlib.h | |
3437 | */ | |
3438 | ||
3439 | /* WARNING: this file should *not* be used by applications. It is | |
3440 | part of the implementation of the compression library and is | |
3441 | subject to change. Applications should only use zlib.h. | |
3442 | */ | |
3443 | ||
3444 | /* Huffman code lookup table entry--this entry is four bytes for machines | |
3445 | that have 16-bit pointers (e.g. PC's in the small or medium model). */ | |
3446 | ||
3447 | typedef struct inflate_huft_s FAR inflate_huft; | |
3448 | ||
3449 | struct inflate_huft_s { | |
3450 | union { | |
3451 | struct { | |
3452 | Byte Exop; /* number of extra bits or operation */ | |
3453 | Byte Bits; /* number of bits in this code or subcode */ | |
3454 | } what; | |
3455 | Bytef *pad; /* pad structure to a power of 2 (4 bytes for */ | |
3456 | } word; /* 16-bit, 8 bytes for 32-bit machines) */ | |
3457 | union { | |
3458 | uInt Base; /* literal, length base, or distance base */ | |
3459 | inflate_huft *Next; /* pointer to next level of table */ | |
3460 | } more; | |
3461 | }; | |
3462 | ||
3463 | #ifdef DEBUG_ZLIB | |
3464 | extern uInt inflate_hufts; | |
3465 | #endif | |
3466 | ||
3467 | extern int inflate_trees_bits OF(( | |
3468 | uIntf *, /* 19 code lengths */ | |
3469 | uIntf *, /* bits tree desired/actual depth */ | |
3470 | inflate_huft * FAR *, /* bits tree result */ | |
3471 | z_streamp )); /* for zalloc, zfree functions */ | |
3472 | ||
3473 | extern int inflate_trees_dynamic OF(( | |
3474 | uInt, /* number of literal/length codes */ | |
3475 | uInt, /* number of distance codes */ | |
3476 | uIntf *, /* that many (total) code lengths */ | |
3477 | uIntf *, /* literal desired/actual bit depth */ | |
3478 | uIntf *, /* distance desired/actual bit depth */ | |
3479 | inflate_huft * FAR *, /* literal/length tree result */ | |
3480 | inflate_huft * FAR *, /* distance tree result */ | |
3481 | z_streamp )); /* for zalloc, zfree functions */ | |
3482 | ||
3483 | extern int inflate_trees_fixed OF(( | |
3484 | uIntf *, /* literal desired/actual bit depth */ | |
3485 | uIntf *, /* distance desired/actual bit depth */ | |
3486 | inflate_huft * FAR *, /* literal/length tree result */ | |
3487 | inflate_huft * FAR *)); /* distance tree result */ | |
3488 | ||
3489 | extern int inflate_trees_free OF(( | |
3490 | inflate_huft *, /* tables to free */ | |
3491 | z_streamp )); /* for zfree function */ | |
3492 | ||
3493 | /* --- inftrees.h */ | |
3494 | ||
3495 | /* +++ infcodes.h */ | |
3496 | /* infcodes.h -- header to use infcodes.c | |
3497 | * Copyright (C) 1995-1996 Mark Adler | |
3498 | * For conditions of distribution and use, see copyright notice in zlib.h | |
3499 | */ | |
3500 | ||
3501 | /* WARNING: this file should *not* be used by applications. It is | |
3502 | part of the implementation of the compression library and is | |
3503 | subject to change. Applications should only use zlib.h. | |
3504 | */ | |
3505 | ||
3506 | struct inflate_codes_state; | |
3507 | typedef struct inflate_codes_state FAR inflate_codes_statef; | |
3508 | ||
3509 | extern inflate_codes_statef *inflate_codes_new OF(( | |
3510 | uInt, uInt, | |
3511 | inflate_huft *, inflate_huft *, | |
3512 | z_streamp )); | |
3513 | ||
3514 | extern int inflate_codes OF(( | |
3515 | inflate_blocks_statef *, | |
3516 | z_streamp , | |
3517 | int)); | |
3518 | ||
3519 | extern void inflate_codes_free OF(( | |
3520 | inflate_codes_statef *, | |
3521 | z_streamp )); | |
3522 | ||
3523 | /* --- infcodes.h */ | |
3524 | ||
3525 | /* +++ infutil.h */ | |
3526 | /* infutil.h -- types and macros common to blocks and codes | |
3527 | * Copyright (C) 1995-1996 Mark Adler | |
3528 | * For conditions of distribution and use, see copyright notice in zlib.h | |
3529 | */ | |
3530 | ||
3531 | /* WARNING: this file should *not* be used by applications. It is | |
3532 | part of the implementation of the compression library and is | |
3533 | subject to change. Applications should only use zlib.h. | |
3534 | */ | |
3535 | ||
3536 | #ifndef _INFUTIL_H | |
3537 | #define _INFUTIL_H | |
3538 | ||
3539 | typedef enum { | |
3540 | TYPE, /* get type bits (3, including end bit) */ | |
3541 | LENS, /* get lengths for stored */ | |
3542 | STORED, /* processing stored block */ | |
3543 | TABLE, /* get table lengths */ | |
3544 | BTREE, /* get bit lengths tree for a dynamic block */ | |
3545 | DTREE, /* get length, distance trees for a dynamic block */ | |
3546 | CODES, /* processing fixed or dynamic block */ | |
3547 | DRY, /* output remaining window bytes */ | |
3548 | DONEB, /* finished last block, done */ | |
3549 | BADB} /* got a data error--stuck here */ | |
3550 | inflate_block_mode; | |
3551 | ||
3552 | /* inflate blocks semi-private state */ | |
3553 | struct inflate_blocks_state { | |
3554 | ||
3555 | /* mode */ | |
3556 | inflate_block_mode mode; /* current inflate_block mode */ | |
3557 | ||
3558 | /* mode dependent information */ | |
3559 | union { | |
3560 | uInt left; /* if STORED, bytes left to copy */ | |
3561 | struct { | |
3562 | uInt table; /* table lengths (14 bits) */ | |
3563 | uInt index; /* index into blens (or border) */ | |
3564 | uIntf *blens; /* bit lengths of codes */ | |
3565 | uInt bb; /* bit length tree depth */ | |
3566 | inflate_huft *tb; /* bit length decoding tree */ | |
3567 | } trees; /* if DTREE, decoding info for trees */ | |
3568 | struct { | |
3569 | inflate_huft *tl; | |
3570 | inflate_huft *td; /* trees to free */ | |
3571 | inflate_codes_statef | |
3572 | *codes; | |
3573 | } decode; /* if CODES, current state */ | |
3574 | } sub; /* submode */ | |
3575 | uInt last; /* true if this block is the last block */ | |
3576 | ||
3577 | /* mode independent information */ | |
3578 | uInt bitk; /* bits in bit buffer */ | |
3579 | uLong bitb; /* bit buffer */ | |
3580 | Bytef *window; /* sliding window */ | |
3581 | Bytef *end; /* one byte after sliding window */ | |
3582 | Bytef *read; /* window read pointer */ | |
3583 | Bytef *write; /* window write pointer */ | |
3584 | check_func checkfn; /* check function */ | |
3585 | uLong check; /* check on output */ | |
3586 | ||
3587 | }; | |
3588 | ||
3589 | ||
3590 | /* defines for inflate input/output */ | |
3591 | /* update pointers and return */ | |
3592 | #define UPDBITS {s->bitb=b;s->bitk=k;} | |
3593 | #define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;} | |
3594 | #define UPDOUT {s->write=q;} | |
3595 | #define UPDATE {UPDBITS UPDIN UPDOUT} | |
3596 | #define LEAVE {UPDATE return inflate_flush(s,z,r);} | |
3597 | /* get bytes and bits */ | |
3598 | #define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;} | |
3599 | #define NEEDBYTE {if(n)r=Z_OK;else LEAVE} | |
3600 | #define NEXTBYTE (n--,*p++) | |
3601 | #define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}} | |
3602 | #define DUMPBITS(j) {b>>=(j);k-=(j);} | |
3603 | /* output bytes */ | |
3604 | #define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q) | |
3605 | #define LOADOUT {q=s->write;m=(uInt)WAVAIL;} | |
3606 | #define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}} | |
3607 | #define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT} | |
3608 | #define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;} | |
3609 | #define OUTBYTE(a) {*q++=(Byte)(a);m--;} | |
3610 | /* load local pointers */ | |
3611 | #define LOAD {LOADIN LOADOUT} | |
3612 | ||
3613 | /* masks for lower bits (size given to avoid silly warnings with Visual C++) */ | |
3614 | extern uInt inflate_mask[17]; | |
3615 | ||
3616 | /* copy as much as possible from the sliding window to the output area */ | |
3617 | extern int inflate_flush OF(( | |
3618 | inflate_blocks_statef *, | |
3619 | z_streamp , | |
3620 | int)); | |
3621 | ||
3622 | #ifndef NO_DUMMY_DECL | |
3623 | struct internal_state {int dummy;}; /* for buggy compilers */ | |
3624 | #endif | |
3625 | ||
3626 | #endif | |
3627 | /* --- infutil.h */ | |
3628 | ||
3629 | #ifndef NO_DUMMY_DECL | |
3630 | struct inflate_codes_state {int dummy;}; /* for buggy compilers */ | |
3631 | #endif | |
3632 | ||
3633 | /* Table for deflate from PKZIP's appnote.txt. */ | |
3634 | local const uInt border[] = { /* Order of the bit length code lengths */ | |
3635 | 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; | |
3636 | ||
3637 | /* | |
3638 | Notes beyond the 1.93a appnote.txt: | |
3639 | ||
3640 | 1. Distance pointers never point before the beginning of the output | |
3641 | stream. | |
3642 | 2. Distance pointers can point back across blocks, up to 32k away. | |
3643 | 3. There is an implied maximum of 7 bits for the bit length table and | |
3644 | 15 bits for the actual data. | |
3645 | 4. If only one code exists, then it is encoded using one bit. (Zero | |
3646 | would be more efficient, but perhaps a little confusing.) If two | |
3647 | codes exist, they are coded using one bit each (0 and 1). | |
3648 | 5. There is no way of sending zero distance codes--a dummy must be | |
3649 | sent if there are none. (History: a pre 2.0 version of PKZIP would | |
3650 | store blocks with no distance codes, but this was discovered to be | |
3651 | too harsh a criterion.) Valid only for 1.93a. 2.04c does allow | |
3652 | zero distance codes, which is sent as one code of zero bits in | |
3653 | length. | |
3654 | 6. There are up to 286 literal/length codes. Code 256 represents the | |
3655 | end-of-block. Note however that the static length tree defines | |
3656 | 288 codes just to fill out the Huffman codes. Codes 286 and 287 | |
3657 | cannot be used though, since there is no length base or extra bits | |
3658 | defined for them. Similarily, there are up to 30 distance codes. | |
3659 | However, static trees define 32 codes (all 5 bits) to fill out the | |
3660 | Huffman codes, but the last two had better not show up in the data. | |
3661 | 7. Unzip can check dynamic Huffman blocks for complete code sets. | |
3662 | The exception is that a single code would not be complete (see #4). | |
3663 | 8. The five bits following the block type is really the number of | |
3664 | literal codes sent minus 257. | |
3665 | 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits | |
3666 | (1+6+6). Therefore, to output three times the length, you output | |
3667 | three codes (1+1+1), whereas to output four times the same length, | |
3668 | you only need two codes (1+3). Hmm. | |
3669 | 10. In the tree reconstruction algorithm, Code = Code + Increment | |
3670 | only if BitLength(i) is not zero. (Pretty obvious.) | |
3671 | 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) | |
3672 | 12. Note: length code 284 can represent 227-258, but length code 285 | |
3673 | really is 258. The last length deserves its own, short code | |
3674 | since it gets used a lot in very redundant files. The length | |
3675 | 258 is special since 258 - 3 (the min match length) is 255. | |
3676 | 13. The literal/length and distance code bit lengths are read as a | |
3677 | single stream of lengths. It is possible (and advantageous) for | |
3678 | a repeat code (16, 17, or 18) to go across the boundary between | |
3679 | the two sets of lengths. | |
3680 | */ | |
3681 | ||
3682 | ||
3683 | void inflate_blocks_reset(s, z, c) | |
3684 | inflate_blocks_statef *s; | |
3685 | z_streamp z; | |
3686 | uLongf *c; | |
3687 | { | |
3688 | if (s->checkfn != Z_NULL) | |
3689 | *c = s->check; | |
3690 | if (s->mode == BTREE || s->mode == DTREE) | |
3691 | ZFREE(z, s->sub.trees.blens); | |
3692 | if (s->mode == CODES) | |
3693 | { | |
3694 | inflate_codes_free(s->sub.decode.codes, z); | |
3695 | inflate_trees_free(s->sub.decode.td, z); | |
3696 | inflate_trees_free(s->sub.decode.tl, z); | |
3697 | } | |
3698 | s->mode = TYPE; | |
3699 | s->bitk = 0; | |
3700 | s->bitb = 0; | |
3701 | s->read = s->write = s->window; | |
3702 | if (s->checkfn != Z_NULL) | |
3703 | z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0); | |
3704 | Trace((stderr, "inflate: blocks reset\n")); | |
3705 | } | |
3706 | ||
3707 | ||
3708 | inflate_blocks_statef *inflate_blocks_new(z, c, w) | |
3709 | z_streamp z; | |
3710 | check_func c; | |
3711 | uInt w; | |
3712 | { | |
3713 | inflate_blocks_statef *s; | |
3714 | ||
3715 | if ((s = (inflate_blocks_statef *)ZALLOC | |
3716 | (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL) | |
3717 | return s; | |
3718 | if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL) | |
3719 | { | |
3720 | ZFREE(z, s); | |
3721 | return Z_NULL; | |
3722 | } | |
3723 | s->end = s->window + w; | |
3724 | s->checkfn = c; | |
3725 | s->mode = TYPE; | |
3726 | Trace((stderr, "inflate: blocks allocated\n")); | |
3727 | inflate_blocks_reset(s, z, &s->check); | |
3728 | return s; | |
3729 | } | |
3730 | ||
3731 | ||
3732 | #ifdef DEBUG_ZLIB | |
3733 | extern uInt inflate_hufts; | |
3734 | #endif | |
3735 | int inflate_blocks(s, z, r) | |
3736 | inflate_blocks_statef *s; | |
3737 | z_streamp z; | |
3738 | int r; | |
3739 | { | |
3740 | uInt t; /* temporary storage */ | |
3741 | uLong b; /* bit buffer */ | |
3742 | uInt k; /* bits in bit buffer */ | |
3743 | Bytef *p; /* input data pointer */ | |
3744 | uInt n; /* bytes available there */ | |
3745 | Bytef *q; /* output window write pointer */ | |
3746 | uInt m; /* bytes to end of window or read pointer */ | |
3747 | ||
3748 | /* copy input/output information to locals (UPDATE macro restores) */ | |
3749 | LOAD | |
3750 | ||
3751 | /* process input based on current state */ | |
3752 | while (1) switch (s->mode) | |
3753 | { | |
3754 | case TYPE: | |
3755 | NEEDBITS(3) | |
3756 | t = (uInt)b & 7; | |
3757 | s->last = t & 1; | |
3758 | switch (t >> 1) | |
3759 | { | |
3760 | case 0: /* stored */ | |
3761 | Trace((stderr, "inflate: stored block%s\n", | |
3762 | s->last ? " (last)" : "")); | |
3763 | DUMPBITS(3) | |
3764 | t = k & 7; /* go to byte boundary */ | |
3765 | DUMPBITS(t) | |
3766 | s->mode = LENS; /* get length of stored block */ | |
3767 | break; | |
3768 | case 1: /* fixed */ | |
3769 | Trace((stderr, "inflate: fixed codes block%s\n", | |
3770 | s->last ? " (last)" : "")); | |
3771 | { | |
3772 | uInt bl, bd; | |
3773 | inflate_huft *tl, *td; | |
3774 | ||
3775 | inflate_trees_fixed(&bl, &bd, &tl, &td); | |
3776 | s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z); | |
3777 | if (s->sub.decode.codes == Z_NULL) | |
3778 | { | |
3779 | r = Z_MEM_ERROR; | |
3780 | LEAVE | |
3781 | } | |
3782 | s->sub.decode.tl = Z_NULL; /* don't try to free these */ | |
3783 | s->sub.decode.td = Z_NULL; | |
3784 | } | |
3785 | DUMPBITS(3) | |
3786 | s->mode = CODES; | |
3787 | break; | |
3788 | case 2: /* dynamic */ | |
3789 | Trace((stderr, "inflate: dynamic codes block%s\n", | |
3790 | s->last ? " (last)" : "")); | |
3791 | DUMPBITS(3) | |
3792 | s->mode = TABLE; | |
3793 | break; | |
3794 | case 3: /* illegal */ | |
3795 | DUMPBITS(3) | |
3796 | s->mode = BADB; | |
3797 | z->msg = (char*)"invalid block type"; | |
3798 | r = Z_DATA_ERROR; | |
3799 | LEAVE | |
3800 | } | |
3801 | break; | |
3802 | case LENS: | |
3803 | NEEDBITS(32) | |
3804 | if ((((~b) >> 16) & 0xffff) != (b & 0xffff)) | |
3805 | { | |
3806 | s->mode = BADB; | |
3807 | z->msg = (char*)"invalid stored block lengths"; | |
3808 | r = Z_DATA_ERROR; | |
3809 | LEAVE | |
3810 | } | |
3811 | s->sub.left = (uInt)b & 0xffff; | |
3812 | b = k = 0; /* dump bits */ | |
3813 | Tracev((stderr, "inflate: stored length %u\n", s->sub.left)); | |
3814 | s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE); | |
3815 | break; | |
3816 | case STORED: | |
3817 | if (n == 0) | |
3818 | LEAVE | |
3819 | NEEDOUT | |
3820 | t = s->sub.left; | |
3821 | if (t > n) t = n; | |
3822 | if (t > m) t = m; | |
3823 | zmemcpy(q, p, t); | |
3824 | p += t; n -= t; | |
3825 | q += t; m -= t; | |
3826 | if ((s->sub.left -= t) != 0) | |
3827 | break; | |
3828 | Tracev((stderr, "inflate: stored end, %lu total out\n", | |
3829 | z->total_out + (q >= s->read ? q - s->read : | |
3830 | (s->end - s->read) + (q - s->window)))); | |
3831 | s->mode = s->last ? DRY : TYPE; | |
3832 | break; | |
3833 | case TABLE: | |
3834 | NEEDBITS(14) | |
3835 | s->sub.trees.table = t = (uInt)b & 0x3fff; | |
3836 | #ifndef PKZIP_BUG_WORKAROUND | |
3837 | if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) | |
3838 | { | |
3839 | s->mode = BADB; | |
3840 | z->msg = (char*)"too many length or distance symbols"; | |
3841 | r = Z_DATA_ERROR; | |
3842 | LEAVE | |
3843 | } | |
3844 | #endif | |
3845 | t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f); | |
3846 | if (t < 19) | |
3847 | t = 19; | |
3848 | if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL) | |
3849 | { | |
3850 | r = Z_MEM_ERROR; | |
3851 | LEAVE | |
3852 | } | |
3853 | DUMPBITS(14) | |
3854 | s->sub.trees.index = 0; | |
3855 | Tracev((stderr, "inflate: table sizes ok\n")); | |
3856 | s->mode = BTREE; | |
3857 | case BTREE: | |
3858 | while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10)) | |
3859 | { | |
3860 | NEEDBITS(3) | |
3861 | s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7; | |
3862 | DUMPBITS(3) | |
3863 | } | |
3864 | while (s->sub.trees.index < 19) | |
3865 | s->sub.trees.blens[border[s->sub.trees.index++]] = 0; | |
3866 | s->sub.trees.bb = 7; | |
3867 | t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb, | |
3868 | &s->sub.trees.tb, z); | |
3869 | if (t != Z_OK) | |
3870 | { | |
3871 | ZFREE(z, s->sub.trees.blens); | |
3872 | r = t; | |
3873 | if (r == Z_DATA_ERROR) | |
3874 | s->mode = BADB; | |
3875 | LEAVE | |
3876 | } | |
3877 | s->sub.trees.index = 0; | |
3878 | Tracev((stderr, "inflate: bits tree ok\n")); | |
3879 | s->mode = DTREE; | |
3880 | case DTREE: | |
3881 | while (t = s->sub.trees.table, | |
3882 | s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)) | |
3883 | { | |
3884 | inflate_huft *h; | |
3885 | uInt i, j, c; | |
3886 | ||
3887 | t = s->sub.trees.bb; | |
3888 | NEEDBITS(t) | |
3889 | h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]); | |
3890 | t = h->word.what.Bits; | |
3891 | c = h->more.Base; | |
3892 | if (c < 16) | |
3893 | { | |
3894 | DUMPBITS(t) | |
3895 | s->sub.trees.blens[s->sub.trees.index++] = c; | |
3896 | } | |
3897 | else /* c == 16..18 */ | |
3898 | { | |
3899 | i = c == 18 ? 7 : c - 14; | |
3900 | j = c == 18 ? 11 : 3; | |
3901 | NEEDBITS(t + i) | |
3902 | DUMPBITS(t) | |
3903 | j += (uInt)b & inflate_mask[i]; | |
3904 | DUMPBITS(i) | |
3905 | i = s->sub.trees.index; | |
3906 | t = s->sub.trees.table; | |
3907 | if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || | |
3908 | (c == 16 && i < 1)) | |
3909 | { | |
3910 | inflate_trees_free(s->sub.trees.tb, z); | |
3911 | ZFREE(z, s->sub.trees.blens); | |
3912 | s->mode = BADB; | |
3913 | z->msg = (char*)"invalid bit length repeat"; | |
3914 | r = Z_DATA_ERROR; | |
3915 | LEAVE | |
3916 | } | |
3917 | c = c == 16 ? s->sub.trees.blens[i - 1] : 0; | |
3918 | do { | |
3919 | s->sub.trees.blens[i++] = c; | |
3920 | } while (--j); | |
3921 | s->sub.trees.index = i; | |
3922 | } | |
3923 | } | |
3924 | inflate_trees_free(s->sub.trees.tb, z); | |
3925 | s->sub.trees.tb = Z_NULL; | |
3926 | { | |
3927 | uInt bl, bd; | |
3928 | inflate_huft *tl, *td; | |
3929 | inflate_codes_statef *c; | |
3930 | ||
3931 | bl = 9; /* must be <= 9 for lookahead assumptions */ | |
3932 | bd = 6; /* must be <= 9 for lookahead assumptions */ | |
3933 | t = s->sub.trees.table; | |
3934 | #ifdef DEBUG_ZLIB | |
3935 | inflate_hufts = 0; | |
3936 | #endif | |
3937 | t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), | |
3938 | s->sub.trees.blens, &bl, &bd, &tl, &td, z); | |
3939 | ZFREE(z, s->sub.trees.blens); | |
3940 | if (t != Z_OK) | |
3941 | { | |
3942 | if (t == (uInt)Z_DATA_ERROR) | |
3943 | s->mode = BADB; | |
3944 | r = t; | |
3945 | LEAVE | |
3946 | } | |
3947 | Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n", | |
3948 | inflate_hufts, sizeof(inflate_huft))); | |
3949 | if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL) | |
3950 | { | |
3951 | inflate_trees_free(td, z); | |
3952 | inflate_trees_free(tl, z); | |
3953 | r = Z_MEM_ERROR; | |
3954 | LEAVE | |
3955 | } | |
3956 | s->sub.decode.codes = c; | |
3957 | s->sub.decode.tl = tl; | |
3958 | s->sub.decode.td = td; | |
3959 | } | |
3960 | s->mode = CODES; | |
3961 | case CODES: | |
3962 | UPDATE | |
3963 | if ((r = inflate_codes(s, z, r)) != Z_STREAM_END) | |
3964 | return inflate_flush(s, z, r); | |
3965 | r = Z_OK; | |
3966 | inflate_codes_free(s->sub.decode.codes, z); | |
3967 | inflate_trees_free(s->sub.decode.td, z); | |
3968 | inflate_trees_free(s->sub.decode.tl, z); | |
3969 | LOAD | |
3970 | Tracev((stderr, "inflate: codes end, %lu total out\n", | |
3971 | z->total_out + (q >= s->read ? q - s->read : | |
3972 | (s->end - s->read) + (q - s->window)))); | |
3973 | if (!s->last) | |
3974 | { | |
3975 | s->mode = TYPE; | |
3976 | break; | |
3977 | } | |
3978 | if (k > 7) /* return unused byte, if any */ | |
3979 | { | |
3980 | Assert(k < 16, "inflate_codes grabbed too many bytes") | |
3981 | k -= 8; | |
3982 | n++; | |
3983 | p--; /* can always return one */ | |
3984 | } | |
3985 | s->mode = DRY; | |
3986 | case DRY: | |
3987 | FLUSH | |
3988 | if (s->read != s->write) | |
3989 | LEAVE | |
3990 | s->mode = DONEB; | |
3991 | case DONEB: | |
3992 | r = Z_STREAM_END; | |
3993 | LEAVE | |
3994 | case BADB: | |
3995 | r = Z_DATA_ERROR; | |
3996 | LEAVE | |
3997 | default: | |
3998 | r = Z_STREAM_ERROR; | |
3999 | LEAVE | |
4000 | } | |
4001 | } | |
4002 | ||
4003 | ||
4004 | int inflate_blocks_free(s, z, c) | |
4005 | inflate_blocks_statef *s; | |
4006 | z_streamp z; | |
4007 | uLongf *c; | |
4008 | { | |
4009 | inflate_blocks_reset(s, z, c); | |
4010 | ZFREE(z, s->window); | |
4011 | ZFREE(z, s); | |
4012 | Trace((stderr, "inflate: blocks freed\n")); | |
4013 | return Z_OK; | |
4014 | } | |
4015 | ||
4016 | ||
4017 | void inflate_set_dictionary(s, d, n) | |
4018 | inflate_blocks_statef *s; | |
4019 | const Bytef *d; | |
4020 | uInt n; | |
4021 | { | |
4022 | zmemcpy((charf *)s->window, d, n); | |
4023 | s->read = s->write = s->window + n; | |
4024 | } | |
4025 | ||
4026 | /* | |
4027 | * This subroutine adds the data at next_in/avail_in to the output history | |
4028 | * without performing any output. The output buffer must be "caught up"; | |
4029 | * i.e. no pending output (hence s->read equals s->write), and the state must | |
4030 | * be BLOCKS (i.e. we should be willing to see the start of a series of | |
4031 | * BLOCKS). On exit, the output will also be caught up, and the checksum | |
4032 | * will have been updated if need be. | |
4033 | */ | |
4034 | int inflate_addhistory(s, z) | |
4035 | inflate_blocks_statef *s; | |
4036 | z_stream *z; | |
4037 | { | |
4038 | uLong b; /* bit buffer */ /* NOT USED HERE */ | |
4039 | uInt k; /* bits in bit buffer */ /* NOT USED HERE */ | |
4040 | uInt t; /* temporary storage */ | |
4041 | Bytef *p; /* input data pointer */ | |
4042 | uInt n; /* bytes available there */ | |
4043 | Bytef *q; /* output window write pointer */ | |
4044 | uInt m; /* bytes to end of window or read pointer */ | |
4045 | ||
4046 | if (s->read != s->write) | |
4047 | return Z_STREAM_ERROR; | |
4048 | if (s->mode != TYPE) | |
4049 | return Z_DATA_ERROR; | |
4050 | ||
4051 | /* we're ready to rock */ | |
4052 | LOAD | |
4053 | /* while there is input ready, copy to output buffer, moving | |
4054 | * pointers as needed. | |
4055 | */ | |
4056 | while (n) { | |
4057 | t = n; /* how many to do */ | |
4058 | /* is there room until end of buffer? */ | |
4059 | if (t > m) t = m; | |
4060 | /* update check information */ | |
4061 | if (s->checkfn != Z_NULL) | |
4062 | s->check = (*s->checkfn)(s->check, q, t); | |
4063 | zmemcpy(q, p, t); | |
4064 | q += t; | |
4065 | p += t; | |
4066 | n -= t; | |
4067 | z->total_out += t; | |
4068 | s->read = q; /* drag read pointer forward */ | |
4069 | /* WWRAP */ /* expand WWRAP macro by hand to handle s->read */ | |
4070 | if (q == s->end) { | |
4071 | s->read = q = s->window; | |
4072 | m = WAVAIL; | |
4073 | } | |
4074 | } | |
4075 | UPDATE | |
4076 | return Z_OK; | |
4077 | } | |
4078 | ||
4079 | ||
4080 | /* | |
4081 | * At the end of a Deflate-compressed PPP packet, we expect to have seen | |
4082 | * a `stored' block type value but not the (zero) length bytes. | |
4083 | */ | |
4084 | int inflate_packet_flush(s) | |
4085 | inflate_blocks_statef *s; | |
4086 | { | |
4087 | if (s->mode != LENS) | |
4088 | return Z_DATA_ERROR; | |
4089 | s->mode = TYPE; | |
4090 | return Z_OK; | |
4091 | } | |
4092 | /* --- infblock.c */ | |
4093 | ||
4094 | /* +++ inftrees.c */ | |
4095 | /* inftrees.c -- generate Huffman trees for efficient decoding | |
4096 | * Copyright (C) 1995-1996 Mark Adler | |
4097 | * For conditions of distribution and use, see copyright notice in zlib.h | |
4098 | */ | |
4099 | ||
4100 | /* #include "zutil.h" */ | |
4101 | /* #include "inftrees.h" */ | |
4102 | ||
4103 | char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler "; | |
4104 | /* | |
4105 | If you use the zlib library in a product, an acknowledgment is welcome | |
4106 | in the documentation of your product. If for some reason you cannot | |
4107 | include such an acknowledgment, I would appreciate that you keep this | |
4108 | copyright string in the executable of your product. | |
4109 | */ | |
4110 | ||
4111 | #ifndef NO_DUMMY_DECL | |
4112 | struct internal_state {int dummy;}; /* for buggy compilers */ | |
4113 | #endif | |
4114 | ||
4115 | /* simplify the use of the inflate_huft type with some defines */ | |
4116 | #define base more.Base | |
4117 | #define next more.Next | |
4118 | #define exop word.what.Exop | |
4119 | #define bits word.what.Bits | |
4120 | ||
4121 | ||
4122 | local int huft_build OF(( | |
4123 | uIntf *, /* code lengths in bits */ | |
4124 | uInt, /* number of codes */ | |
4125 | uInt, /* number of "simple" codes */ | |
4126 | const uIntf *, /* list of base values for non-simple codes */ | |
4127 | const uIntf *, /* list of extra bits for non-simple codes */ | |
4128 | inflate_huft * FAR*,/* result: starting table */ | |
4129 | uIntf *, /* maximum lookup bits (returns actual) */ | |
4130 | z_streamp )); /* for zalloc function */ | |
4131 | ||
4132 | local voidpf falloc OF(( | |
4133 | voidpf, /* opaque pointer (not used) */ | |
4134 | uInt, /* number of items */ | |
4135 | uInt)); /* size of item */ | |
4136 | ||
4137 | /* Tables for deflate from PKZIP's appnote.txt. */ | |
4138 | local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ | |
4139 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, | |
4140 | 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; | |
4141 | /* see note #13 above about 258 */ | |
4142 | local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ | |
4143 | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, | |
4144 | 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */ | |
4145 | local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ | |
4146 | 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, | |
4147 | 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, | |
4148 | 8193, 12289, 16385, 24577}; | |
4149 | local const uInt cpdext[30] = { /* Extra bits for distance codes */ | |
4150 | 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, | |
4151 | 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, | |
4152 | 12, 12, 13, 13}; | |
4153 | ||
4154 | /* | |
4155 | Huffman code decoding is performed using a multi-level table lookup. | |
4156 | The fastest way to decode is to simply build a lookup table whose | |
4157 | size is determined by the longest code. However, the time it takes | |
4158 | to build this table can also be a factor if the data being decoded | |
4159 | is not very long. The most common codes are necessarily the | |
4160 | shortest codes, so those codes dominate the decoding time, and hence | |
4161 | the speed. The idea is you can have a shorter table that decodes the | |
4162 | shorter, more probable codes, and then point to subsidiary tables for | |
4163 | the longer codes. The time it costs to decode the longer codes is | |
4164 | then traded against the time it takes to make longer tables. | |
4165 | ||
4166 | This results of this trade are in the variables lbits and dbits | |
4167 | below. lbits is the number of bits the first level table for literal/ | |
4168 | length codes can decode in one step, and dbits is the same thing for | |
4169 | the distance codes. Subsequent tables are also less than or equal to | |
4170 | those sizes. These values may be adjusted either when all of the | |
4171 | codes are shorter than that, in which case the longest code length in | |
4172 | bits is used, or when the shortest code is *longer* than the requested | |
4173 | table size, in which case the length of the shortest code in bits is | |
4174 | used. | |
4175 | ||
4176 | There are two different values for the two tables, since they code a | |
4177 | different number of possibilities each. The literal/length table | |
4178 | codes 286 possible values, or in a flat code, a little over eight | |
4179 | bits. The distance table codes 30 possible values, or a little less | |
4180 | than five bits, flat. The optimum values for speed end up being | |
4181 | about one bit more than those, so lbits is 8+1 and dbits is 5+1. | |
4182 | The optimum values may differ though from machine to machine, and | |
4183 | possibly even between compilers. Your mileage may vary. | |
4184 | */ | |
4185 | ||
4186 | ||
4187 | /* If BMAX needs to be larger than 16, then h and x[] should be uLong. */ | |
4188 | #define BMAX 15 /* maximum bit length of any code */ | |
4189 | #define N_MAX 288 /* maximum number of codes in any set */ | |
4190 | ||
4191 | #ifdef DEBUG_ZLIB | |
4192 | uInt inflate_hufts; | |
4193 | #endif | |
4194 | ||
4195 | local int huft_build(b, n, s, d, e, t, m, zs) | |
4196 | uIntf *b; /* code lengths in bits (all assumed <= BMAX) */ | |
4197 | uInt n; /* number of codes (assumed <= N_MAX) */ | |
4198 | uInt s; /* number of simple-valued codes (0..s-1) */ | |
4199 | const uIntf *d; /* list of base values for non-simple codes */ | |
4200 | const uIntf *e; /* list of extra bits for non-simple codes */ | |
4201 | inflate_huft * FAR *t; /* result: starting table */ | |
4202 | uIntf *m; /* maximum lookup bits, returns actual */ | |
4203 | z_streamp zs; /* for zalloc function */ | |
4204 | /* Given a list of code lengths and a maximum table size, make a set of | |
4205 | tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR | |
4206 | if the given code set is incomplete (the tables are still built in this | |
4207 | case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of | |
4208 | lengths), or Z_MEM_ERROR if not enough memory. */ | |
4209 | { | |
4210 | ||
4211 | uInt a; /* counter for codes of length k */ | |
4212 | uInt c[BMAX+1]; /* bit length count table */ | |
4213 | uInt f; /* i repeats in table every f entries */ | |
4214 | int g; /* maximum code length */ | |
4215 | int h; /* table level */ | |
4216 | register uInt i; /* counter, current code */ | |
4217 | register uInt j; /* counter */ | |
4218 | register int k; /* number of bits in current code */ | |
4219 | int l; /* bits per table (returned in m) */ | |
4220 | register uIntf *p; /* pointer into c[], b[], or v[] */ | |
4221 | inflate_huft *q; /* points to current table */ | |
4222 | struct inflate_huft_s r; /* table entry for structure assignment */ | |
4223 | inflate_huft *u[BMAX]; /* table stack */ | |
4224 | uInt v[N_MAX]; /* values in order of bit length */ | |
4225 | register int w; /* bits before this table == (l * h) */ | |
4226 | uInt x[BMAX+1]; /* bit offsets, then code stack */ | |
4227 | uIntf *xp; /* pointer into x */ | |
4228 | int y; /* number of dummy codes added */ | |
4229 | uInt z; /* number of entries in current table */ | |
4230 | ||
4231 | ||
4232 | /* Generate counts for each bit length */ | |
4233 | p = c; | |
4234 | #define C0 *p++ = 0; | |
4235 | #define C2 C0 C0 C0 C0 | |
4236 | #define C4 C2 C2 C2 C2 | |
4237 | C4 /* clear c[]--assume BMAX+1 is 16 */ | |
4238 | p = b; i = n; | |
4239 | do { | |
4240 | c[*p++]++; /* assume all entries <= BMAX */ | |
4241 | } while (--i); | |
4242 | if (c[0] == n) /* null input--all zero length codes */ | |
4243 | { | |
4244 | *t = (inflate_huft *)Z_NULL; | |
4245 | *m = 0; | |
4246 | return Z_OK; | |
4247 | } | |
4248 | ||
4249 | ||
4250 | /* Find minimum and maximum length, bound *m by those */ | |
4251 | l = *m; | |
4252 | for (j = 1; j <= BMAX; j++) | |
4253 | if (c[j]) | |
4254 | break; | |
4255 | k = j; /* minimum code length */ | |
4256 | if ((uInt)l < j) | |
4257 | l = j; | |
4258 | for (i = BMAX; i; i--) | |
4259 | if (c[i]) | |
4260 | break; | |
4261 | g = i; /* maximum code length */ | |
4262 | if ((uInt)l > i) | |
4263 | l = i; | |
4264 | *m = l; | |
4265 | ||
4266 | ||
4267 | /* Adjust last length count to fill out codes, if needed */ | |
4268 | for (y = 1 << j; j < i; j++, y <<= 1) | |
4269 | if ((y -= c[j]) < 0) | |
4270 | return Z_DATA_ERROR; | |
4271 | if ((y -= c[i]) < 0) | |
4272 | return Z_DATA_ERROR; | |
4273 | c[i] += y; | |
4274 | ||
4275 | ||
4276 | /* Generate starting offsets into the value table for each length */ | |
4277 | x[1] = j = 0; | |
4278 | p = c + 1; xp = x + 2; | |
4279 | while (--i) { /* note that i == g from above */ | |
4280 | *xp++ = (j += *p++); | |
4281 | } | |
4282 | ||
4283 | ||
4284 | /* Make a table of values in order of bit lengths */ | |
4285 | p = b; i = 0; | |
4286 | do { | |
4287 | if ((j = *p++) != 0) | |
4288 | v[x[j]++] = i; | |
4289 | } while (++i < n); | |
4290 | n = x[g]; /* set n to length of v */ | |
4291 | ||
4292 | ||
4293 | /* Generate the Huffman codes and for each, make the table entries */ | |
4294 | x[0] = i = 0; /* first Huffman code is zero */ | |
4295 | p = v; /* grab values in bit order */ | |
4296 | h = -1; /* no tables yet--level -1 */ | |
4297 | w = -l; /* bits decoded == (l * h) */ | |
4298 | u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */ | |
4299 | q = (inflate_huft *)Z_NULL; /* ditto */ | |
4300 | z = 0; /* ditto */ | |
4301 | ||
4302 | /* go through the bit lengths (k already is bits in shortest code) */ | |
4303 | for (; k <= g; k++) | |
4304 | { | |
4305 | a = c[k]; | |
4306 | while (a--) | |
4307 | { | |
4308 | /* here i is the Huffman code of length k bits for value *p */ | |
4309 | /* make tables up to required level */ | |
4310 | while (k > w + l) | |
4311 | { | |
4312 | h++; | |
4313 | w += l; /* previous table always l bits */ | |
4314 | ||
4315 | /* compute minimum size table less than or equal to l bits */ | |
4316 | z = g - w; | |
4317 | z = z > (uInt)l ? l : z; /* table size upper limit */ | |
4318 | if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ | |
4319 | { /* too few codes for k-w bit table */ | |
4320 | f -= a + 1; /* deduct codes from patterns left */ | |
4321 | xp = c + k; | |
4322 | if (j < z) | |
4323 | while (++j < z) /* try smaller tables up to z bits */ | |
4324 | { | |
4325 | if ((f <<= 1) <= *++xp) | |
4326 | break; /* enough codes to use up j bits */ | |
4327 | f -= *xp; /* else deduct codes from patterns */ | |
4328 | } | |
4329 | } | |
4330 | z = 1 << j; /* table entries for j-bit table */ | |
4331 | ||
4332 | /* allocate and link in new table */ | |
4333 | if ((q = (inflate_huft *)ZALLOC | |
4334 | (zs,z + 1,sizeof(inflate_huft))) == Z_NULL) | |
4335 | { | |
4336 | if (h) | |
4337 | inflate_trees_free(u[0], zs); | |
4338 | return Z_MEM_ERROR; /* not enough memory */ | |
4339 | } | |
4340 | #ifdef DEBUG_ZLIB | |
4341 | inflate_hufts += z + 1; | |
4342 | #endif | |
4343 | *t = q + 1; /* link to list for huft_free() */ | |
4344 | *(t = &(q->next)) = Z_NULL; | |
4345 | u[h] = ++q; /* table starts after link */ | |
4346 | ||
4347 | /* connect to last table, if there is one */ | |
4348 | if (h) | |
4349 | { | |
4350 | x[h] = i; /* save pattern for backing up */ | |
4351 | r.bits = (Byte)l; /* bits to dump before this table */ | |
4352 | r.exop = (Byte)j; /* bits in this table */ | |
4353 | r.next = q; /* pointer to this table */ | |
4354 | j = i >> (w - l); /* (get around Turbo C bug) */ | |
4355 | u[h-1][j] = r; /* connect to last table */ | |
4356 | } | |
4357 | } | |
4358 | ||
4359 | /* set up table entry in r */ | |
4360 | r.bits = (Byte)(k - w); | |
4361 | if (p >= v + n) | |
4362 | r.exop = 128 + 64; /* out of values--invalid code */ | |
4363 | else if (*p < s) | |
4364 | { | |
4365 | r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */ | |
4366 | r.base = *p++; /* simple code is just the value */ | |
4367 | } | |
4368 | else | |
4369 | { | |
4370 | r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */ | |
4371 | r.base = d[*p++ - s]; | |
4372 | } | |
4373 | ||
4374 | /* fill code-like entries with r */ | |
4375 | f = 1 << (k - w); | |
4376 | for (j = i >> w; j < z; j += f) | |
4377 | q[j] = r; | |
4378 | ||
4379 | /* backwards increment the k-bit code i */ | |
4380 | for (j = 1 << (k - 1); i & j; j >>= 1) | |
4381 | i ^= j; | |
4382 | i ^= j; | |
4383 | ||
4384 | /* backup over finished tables */ | |
4385 | while ((i & ((1 << w) - 1)) != x[h]) | |
4386 | { | |
4387 | h--; /* don't need to update q */ | |
4388 | w -= l; | |
4389 | } | |
4390 | } | |
4391 | } | |
4392 | ||
4393 | ||
4394 | /* Return Z_BUF_ERROR if we were given an incomplete table */ | |
4395 | return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK; | |
4396 | } | |
4397 | ||
4398 | ||
4399 | int inflate_trees_bits(c, bb, tb, z) | |
4400 | uIntf *c; /* 19 code lengths */ | |
4401 | uIntf *bb; /* bits tree desired/actual depth */ | |
4402 | inflate_huft * FAR *tb; /* bits tree result */ | |
4403 | z_streamp z; /* for zfree function */ | |
4404 | { | |
4405 | int r; | |
4406 | ||
4407 | r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z); | |
4408 | if (r == Z_DATA_ERROR) | |
4409 | z->msg = (char*)"oversubscribed dynamic bit lengths tree"; | |
4410 | else if (r == Z_BUF_ERROR || *bb == 0) | |
4411 | { | |
4412 | inflate_trees_free(*tb, z); | |
4413 | z->msg = (char*)"incomplete dynamic bit lengths tree"; | |
4414 | r = Z_DATA_ERROR; | |
4415 | } | |
4416 | return r; | |
4417 | } | |
4418 | ||
4419 | ||
4420 | int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z) | |
4421 | uInt nl; /* number of literal/length codes */ | |
4422 | uInt nd; /* number of distance codes */ | |
4423 | uIntf *c; /* that many (total) code lengths */ | |
4424 | uIntf *bl; /* literal desired/actual bit depth */ | |
4425 | uIntf *bd; /* distance desired/actual bit depth */ | |
4426 | inflate_huft * FAR *tl; /* literal/length tree result */ | |
4427 | inflate_huft * FAR *td; /* distance tree result */ | |
4428 | z_streamp z; /* for zfree function */ | |
4429 | { | |
4430 | int r; | |
4431 | ||
4432 | /* build literal/length tree */ | |
4433 | r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z); | |
4434 | if (r != Z_OK || *bl == 0) | |
4435 | { | |
4436 | if (r == Z_DATA_ERROR) | |
4437 | z->msg = (char*)"oversubscribed literal/length tree"; | |
4438 | else if (r != Z_MEM_ERROR) | |
4439 | { | |
4440 | inflate_trees_free(*tl, z); | |
4441 | z->msg = (char*)"incomplete literal/length tree"; | |
4442 | r = Z_DATA_ERROR; | |
4443 | } | |
4444 | return r; | |
4445 | } | |
4446 | ||
4447 | /* build distance tree */ | |
4448 | r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z); | |
4449 | if (r != Z_OK || (*bd == 0 && nl > 257)) | |
4450 | { | |
4451 | if (r == Z_DATA_ERROR) | |
4452 | z->msg = (char*)"oversubscribed distance tree"; | |
4453 | else if (r == Z_BUF_ERROR) { | |
4454 | #ifdef PKZIP_BUG_WORKAROUND | |
4455 | r = Z_OK; | |
4456 | } | |
4457 | #else | |
4458 | inflate_trees_free(*td, z); | |
4459 | z->msg = (char*)"incomplete distance tree"; | |
4460 | r = Z_DATA_ERROR; | |
4461 | } | |
4462 | else if (r != Z_MEM_ERROR) | |
4463 | { | |
4464 | z->msg = (char*)"empty distance tree with lengths"; | |
4465 | r = Z_DATA_ERROR; | |
4466 | } | |
4467 | inflate_trees_free(*tl, z); | |
4468 | return r; | |
4469 | #endif | |
4470 | } | |
4471 | ||
4472 | /* done */ | |
4473 | return Z_OK; | |
4474 | } | |
4475 | ||
4476 | ||
4477 | /* build fixed tables only once--keep them here */ | |
4478 | local int fixed_built = 0; | |
4479 | #define FIXEDH 530 /* number of hufts used by fixed tables */ | |
4480 | local inflate_huft fixed_mem[FIXEDH]; | |
4481 | local uInt fixed_bl; | |
4482 | local uInt fixed_bd; | |
4483 | local inflate_huft *fixed_tl; | |
4484 | local inflate_huft *fixed_td; | |
4485 | ||
4486 | ||
4487 | local voidpf falloc(q, n, s) | |
4488 | voidpf q; /* opaque pointer */ | |
4489 | uInt n; /* number of items */ | |
4490 | uInt s; /* size of item */ | |
4491 | { | |
4492 | Assert(s == sizeof(inflate_huft) && n <= *(intf *)q, | |
4493 | "inflate_trees falloc overflow"); | |
4494 | *(intf *)q -= n+s-s; /* s-s to avoid warning */ | |
4495 | return (voidpf)(fixed_mem + *(intf *)q); | |
4496 | } | |
4497 | ||
4498 | ||
4499 | int inflate_trees_fixed(bl, bd, tl, td) | |
4500 | uIntf *bl; /* literal desired/actual bit depth */ | |
4501 | uIntf *bd; /* distance desired/actual bit depth */ | |
4502 | inflate_huft * FAR *tl; /* literal/length tree result */ | |
4503 | inflate_huft * FAR *td; /* distance tree result */ | |
4504 | { | |
4505 | /* build fixed tables if not already (multiple overlapped executions ok) */ | |
4506 | if (!fixed_built) | |
4507 | { | |
4508 | int k; /* temporary variable */ | |
4509 | unsigned c[288]; /* length list for huft_build */ | |
4510 | z_stream z; /* for falloc function */ | |
4511 | int f = FIXEDH; /* number of hufts left in fixed_mem */ | |
4512 | ||
4513 | /* set up fake z_stream for memory routines */ | |
4514 | z.zalloc = falloc; | |
4515 | z.zfree = Z_NULL; | |
4516 | z.opaque = (voidpf)&f; | |
4517 | ||
4518 | /* literal table */ | |
4519 | for (k = 0; k < 144; k++) | |
4520 | c[k] = 8; | |
4521 | for (; k < 256; k++) | |
4522 | c[k] = 9; | |
4523 | for (; k < 280; k++) | |
4524 | c[k] = 7; | |
4525 | for (; k < 288; k++) | |
4526 | c[k] = 8; | |
4527 | fixed_bl = 7; | |
4528 | huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z); | |
4529 | ||
4530 | /* distance table */ | |
4531 | for (k = 0; k < 30; k++) | |
4532 | c[k] = 5; | |
4533 | fixed_bd = 5; | |
4534 | huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z); | |
4535 | ||
4536 | /* done */ | |
4537 | Assert(f == 0, "invalid build of fixed tables"); | |
4538 | fixed_built = 1; | |
4539 | } | |
4540 | *bl = fixed_bl; | |
4541 | *bd = fixed_bd; | |
4542 | *tl = fixed_tl; | |
4543 | *td = fixed_td; | |
4544 | return Z_OK; | |
4545 | } | |
4546 | ||
4547 | ||
4548 | int inflate_trees_free(t, z) | |
4549 | inflate_huft *t; /* table to free */ | |
4550 | z_streamp z; /* for zfree function */ | |
4551 | /* Free the malloc'ed tables built by huft_build(), which makes a linked | |
4552 | list of the tables it made, with the links in a dummy first entry of | |
4553 | each table. */ | |
4554 | { | |
4555 | register inflate_huft *p, *q, *r; | |
4556 | ||
4557 | /* Reverse linked list */ | |
4558 | p = Z_NULL; | |
4559 | q = t; | |
4560 | while (q != Z_NULL) | |
4561 | { | |
4562 | r = (q - 1)->next; | |
4563 | (q - 1)->next = p; | |
4564 | p = q; | |
4565 | q = r; | |
4566 | } | |
4567 | /* Go through linked list, freeing from the malloced (t[-1]) address. */ | |
4568 | while (p != Z_NULL) | |
4569 | { | |
4570 | q = (--p)->next; | |
4571 | ZFREE(z,p); | |
4572 | p = q; | |
4573 | } | |
4574 | return Z_OK; | |
4575 | } | |
4576 | /* --- inftrees.c */ | |
4577 | ||
4578 | /* +++ infcodes.c */ | |
4579 | /* infcodes.c -- process literals and length/distance pairs | |
4580 | * Copyright (C) 1995-1996 Mark Adler | |
4581 | * For conditions of distribution and use, see copyright notice in zlib.h | |
4582 | */ | |
4583 | ||
4584 | /* #include "zutil.h" */ | |
4585 | /* #include "inftrees.h" */ | |
4586 | /* #include "infblock.h" */ | |
4587 | /* #include "infcodes.h" */ | |
4588 | /* #include "infutil.h" */ | |
4589 | ||
4590 | /* +++ inffast.h */ | |
4591 | /* inffast.h -- header to use inffast.c | |
4592 | * Copyright (C) 1995-1996 Mark Adler | |
4593 | * For conditions of distribution and use, see copyright notice in zlib.h | |
4594 | */ | |
4595 | ||
4596 | /* WARNING: this file should *not* be used by applications. It is | |
4597 | part of the implementation of the compression library and is | |
4598 | subject to change. Applications should only use zlib.h. | |
4599 | */ | |
4600 | ||
4601 | extern int inflate_fast OF(( | |
4602 | uInt, | |
4603 | uInt, | |
4604 | inflate_huft *, | |
4605 | inflate_huft *, | |
4606 | inflate_blocks_statef *, | |
4607 | z_streamp )); | |
4608 | /* --- inffast.h */ | |
4609 | ||
4610 | /* simplify the use of the inflate_huft type with some defines */ | |
4611 | #define base more.Base | |
4612 | #define next more.Next | |
4613 | #define exop word.what.Exop | |
4614 | #define bits word.what.Bits | |
4615 | ||
4616 | /* inflate codes private state */ | |
4617 | struct inflate_codes_state { | |
4618 | ||
4619 | /* mode */ | |
4620 | enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ | |
4621 | START, /* x: set up for LEN */ | |
4622 | LEN, /* i: get length/literal/eob next */ | |
4623 | LENEXT, /* i: getting length extra (have base) */ | |
4624 | DIST, /* i: get distance next */ | |
4625 | DISTEXT, /* i: getting distance extra */ | |
4626 | COPY, /* o: copying bytes in window, waiting for space */ | |
4627 | LIT, /* o: got literal, waiting for output space */ | |
4628 | WASH, /* o: got eob, possibly still output waiting */ | |
4629 | END, /* x: got eob and all data flushed */ | |
4630 | BADCODE} /* x: got error */ | |
4631 | mode; /* current inflate_codes mode */ | |
4632 | ||
4633 | /* mode dependent information */ | |
4634 | uInt len; | |
4635 | union { | |
4636 | struct { | |
4637 | inflate_huft *tree; /* pointer into tree */ | |
4638 | uInt need; /* bits needed */ | |
4639 | } code; /* if LEN or DIST, where in tree */ | |
4640 | uInt lit; /* if LIT, literal */ | |
4641 | struct { | |
4642 | uInt get; /* bits to get for extra */ | |
4643 | uInt dist; /* distance back to copy from */ | |
4644 | } copy; /* if EXT or COPY, where and how much */ | |
4645 | } sub; /* submode */ | |
4646 | ||
4647 | /* mode independent information */ | |
4648 | Byte lbits; /* ltree bits decoded per branch */ | |
4649 | Byte dbits; /* dtree bits decoder per branch */ | |
4650 | inflate_huft *ltree; /* literal/length/eob tree */ | |
4651 | inflate_huft *dtree; /* distance tree */ | |
4652 | ||
4653 | }; | |
4654 | ||
4655 | ||
4656 | inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z) | |
4657 | uInt bl, bd; | |
4658 | inflate_huft *tl; | |
4659 | inflate_huft *td; /* need separate declaration for Borland C++ */ | |
4660 | z_streamp z; | |
4661 | { | |
4662 | inflate_codes_statef *c; | |
4663 | ||
4664 | if ((c = (inflate_codes_statef *) | |
4665 | ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL) | |
4666 | { | |
4667 | c->mode = START; | |
4668 | c->lbits = (Byte)bl; | |
4669 | c->dbits = (Byte)bd; | |
4670 | c->ltree = tl; | |
4671 | c->dtree = td; | |
4672 | Tracev((stderr, "inflate: codes new\n")); | |
4673 | } | |
4674 | return c; | |
4675 | } | |
4676 | ||
4677 | ||
4678 | int inflate_codes(s, z, r) | |
4679 | inflate_blocks_statef *s; | |
4680 | z_streamp z; | |
4681 | int r; | |
4682 | { | |
4683 | uInt j; /* temporary storage */ | |
4684 | inflate_huft *t; /* temporary pointer */ | |
4685 | uInt e; /* extra bits or operation */ | |
4686 | uLong b; /* bit buffer */ | |
4687 | uInt k; /* bits in bit buffer */ | |
4688 | Bytef *p; /* input data pointer */ | |
4689 | uInt n; /* bytes available there */ | |
4690 | Bytef *q; /* output window write pointer */ | |
4691 | uInt m; /* bytes to end of window or read pointer */ | |
4692 | Bytef *f; /* pointer to copy strings from */ | |
4693 | inflate_codes_statef *c = s->sub.decode.codes; /* codes state */ | |
4694 | ||
4695 | /* copy input/output information to locals (UPDATE macro restores) */ | |
4696 | LOAD | |
4697 | ||
4698 | /* process input and output based on current state */ | |
4699 | while (1) switch (c->mode) | |
4700 | { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ | |
4701 | case START: /* x: set up for LEN */ | |
4702 | #ifndef SLOW | |
4703 | if (m >= 258 && n >= 10) | |
4704 | { | |
4705 | UPDATE | |
4706 | r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z); | |
4707 | LOAD | |
4708 | if (r != Z_OK) | |
4709 | { | |
4710 | c->mode = r == Z_STREAM_END ? WASH : BADCODE; | |
4711 | break; | |
4712 | } | |
4713 | } | |
4714 | #endif /* !SLOW */ | |
4715 | c->sub.code.need = c->lbits; | |
4716 | c->sub.code.tree = c->ltree; | |
4717 | c->mode = LEN; | |
4718 | case LEN: /* i: get length/literal/eob next */ | |
4719 | j = c->sub.code.need; | |
4720 | NEEDBITS(j) | |
4721 | t = c->sub.code.tree + ((uInt)b & inflate_mask[j]); | |
4722 | DUMPBITS(t->bits) | |
4723 | e = (uInt)(t->exop); | |
4724 | if (e == 0) /* literal */ | |
4725 | { | |
4726 | c->sub.lit = t->base; | |
4727 | Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? | |
4728 | "inflate: literal '%c'\n" : | |
4729 | "inflate: literal 0x%02x\n", t->base)); | |
4730 | c->mode = LIT; | |
4731 | break; | |
4732 | } | |
4733 | if (e & 16) /* length */ | |
4734 | { | |
4735 | c->sub.copy.get = e & 15; | |
4736 | c->len = t->base; | |
4737 | c->mode = LENEXT; | |
4738 | break; | |
4739 | } | |
4740 | if ((e & 64) == 0) /* next table */ | |
4741 | { | |
4742 | c->sub.code.need = e; | |
4743 | c->sub.code.tree = t->next; | |
4744 | break; | |
4745 | } | |
4746 | if (e & 32) /* end of block */ | |
4747 | { | |
4748 | Tracevv((stderr, "inflate: end of block\n")); | |
4749 | c->mode = WASH; | |
4750 | break; | |
4751 | } | |
4752 | c->mode = BADCODE; /* invalid code */ | |
4753 | z->msg = (char*)"invalid literal/length code"; | |
4754 | r = Z_DATA_ERROR; | |
4755 | LEAVE | |
4756 | case LENEXT: /* i: getting length extra (have base) */ | |
4757 | j = c->sub.copy.get; | |
4758 | NEEDBITS(j) | |
4759 | c->len += (uInt)b & inflate_mask[j]; | |
4760 | DUMPBITS(j) | |
4761 | c->sub.code.need = c->dbits; | |
4762 | c->sub.code.tree = c->dtree; | |
4763 | Tracevv((stderr, "inflate: length %u\n", c->len)); | |
4764 | c->mode = DIST; | |
4765 | case DIST: /* i: get distance next */ | |
4766 | j = c->sub.code.need; | |
4767 | NEEDBITS(j) | |
4768 | t = c->sub.code.tree + ((uInt)b & inflate_mask[j]); | |
4769 | DUMPBITS(t->bits) | |
4770 | e = (uInt)(t->exop); | |
4771 | if (e & 16) /* distance */ | |
4772 | { | |
4773 | c->sub.copy.get = e & 15; | |
4774 | c->sub.copy.dist = t->base; | |
4775 | c->mode = DISTEXT; | |
4776 | break; | |
4777 | } | |
4778 | if ((e & 64) == 0) /* next table */ | |
4779 | { | |
4780 | c->sub.code.need = e; | |
4781 | c->sub.code.tree = t->next; | |
4782 | break; | |
4783 | } | |
4784 | c->mode = BADCODE; /* invalid code */ | |
4785 | z->msg = (char*)"invalid distance code"; | |
4786 | r = Z_DATA_ERROR; | |
4787 | LEAVE | |
4788 | case DISTEXT: /* i: getting distance extra */ | |
4789 | j = c->sub.copy.get; | |
4790 | NEEDBITS(j) | |
4791 | c->sub.copy.dist += (uInt)b & inflate_mask[j]; | |
4792 | DUMPBITS(j) | |
4793 | Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist)); | |
4794 | c->mode = COPY; | |
4795 | case COPY: /* o: copying bytes in window, waiting for space */ | |
4796 | #ifndef __TURBOC__ /* Turbo C bug for following expression */ | |
4797 | f = (uInt)(q - s->window) < c->sub.copy.dist ? | |
4798 | s->end - (c->sub.copy.dist - (q - s->window)) : | |
4799 | q - c->sub.copy.dist; | |
4800 | #else | |
4801 | f = q - c->sub.copy.dist; | |
4802 | if ((uInt)(q - s->window) < c->sub.copy.dist) | |
4803 | f = s->end - (c->sub.copy.dist - (uInt)(q - s->window)); | |
4804 | #endif | |
4805 | while (c->len) | |
4806 | { | |
4807 | NEEDOUT | |
4808 | OUTBYTE(*f++) | |
4809 | if (f == s->end) | |
4810 | f = s->window; | |
4811 | c->len--; | |
4812 | } | |
4813 | c->mode = START; | |
4814 | break; | |
4815 | case LIT: /* o: got literal, waiting for output space */ | |
4816 | NEEDOUT | |
4817 | OUTBYTE(c->sub.lit) | |
4818 | c->mode = START; | |
4819 | break; | |
4820 | case WASH: /* o: got eob, possibly more output */ | |
4821 | FLUSH | |
4822 | if (s->read != s->write) | |
4823 | LEAVE | |
4824 | c->mode = END; | |
4825 | case END: | |
4826 | r = Z_STREAM_END; | |
4827 | LEAVE | |
4828 | case BADCODE: /* x: got error */ | |
4829 | r = Z_DATA_ERROR; | |
4830 | LEAVE | |
4831 | default: | |
4832 | r = Z_STREAM_ERROR; | |
4833 | LEAVE | |
4834 | } | |
4835 | } | |
4836 | ||
4837 | ||
4838 | void inflate_codes_free(c, z) | |
4839 | inflate_codes_statef *c; | |
4840 | z_streamp z; | |
4841 | { | |
4842 | ZFREE(z, c); | |
4843 | Tracev((stderr, "inflate: codes free\n")); | |
4844 | } | |
4845 | /* --- infcodes.c */ | |
4846 | ||
4847 | /* +++ infutil.c */ | |
4848 | /* inflate_util.c -- data and routines common to blocks and codes | |
4849 | * Copyright (C) 1995-1996 Mark Adler | |
4850 | * For conditions of distribution and use, see copyright notice in zlib.h | |
4851 | */ | |
4852 | ||
4853 | /* #include "zutil.h" */ | |
4854 | /* #include "infblock.h" */ | |
4855 | /* #include "inftrees.h" */ | |
4856 | /* #include "infcodes.h" */ | |
4857 | /* #include "infutil.h" */ | |
4858 | ||
4859 | #ifndef NO_DUMMY_DECL | |
4860 | struct inflate_codes_state {int dummy;}; /* for buggy compilers */ | |
4861 | #endif | |
4862 | ||
4863 | /* And'ing with mask[n] masks the lower n bits */ | |
4864 | uInt inflate_mask[17] = { | |
4865 | 0x0000, | |
4866 | 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, | |
4867 | 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff | |
4868 | }; | |
4869 | ||
4870 | ||
4871 | /* copy as much as possible from the sliding window to the output area */ | |
4872 | int inflate_flush(s, z, r) | |
4873 | inflate_blocks_statef *s; | |
4874 | z_streamp z; | |
4875 | int r; | |
4876 | { | |
4877 | uInt n; | |
4878 | Bytef *p; | |
4879 | Bytef *q; | |
4880 | ||
4881 | /* local copies of source and destination pointers */ | |
4882 | p = z->next_out; | |
4883 | q = s->read; | |
4884 | ||
4885 | /* compute number of bytes to copy as far as end of window */ | |
4886 | n = (uInt)((q <= s->write ? s->write : s->end) - q); | |
4887 | if (n > z->avail_out) n = z->avail_out; | |
4888 | if (n && r == Z_BUF_ERROR) r = Z_OK; | |
4889 | ||
4890 | /* update counters */ | |
4891 | z->avail_out -= n; | |
4892 | z->total_out += n; | |
4893 | ||
4894 | /* update check information */ | |
4895 | if (s->checkfn != Z_NULL) | |
4896 | z->adler = s->check = (*s->checkfn)(s->check, q, n); | |
4897 | ||
4898 | /* copy as far as end of window */ | |
4899 | if (p != Z_NULL) { | |
4900 | zmemcpy(p, q, n); | |
4901 | p += n; | |
4902 | } | |
4903 | q += n; | |
4904 | ||
4905 | /* see if more to copy at beginning of window */ | |
4906 | if (q == s->end) | |
4907 | { | |
4908 | /* wrap pointers */ | |
4909 | q = s->window; | |
4910 | if (s->write == s->end) | |
4911 | s->write = s->window; | |
4912 | ||
4913 | /* compute bytes to copy */ | |
4914 | n = (uInt)(s->write - q); | |
4915 | if (n > z->avail_out) n = z->avail_out; | |
4916 | if (n && r == Z_BUF_ERROR) r = Z_OK; | |
4917 | ||
4918 | /* update counters */ | |
4919 | z->avail_out -= n; | |
4920 | z->total_out += n; | |
4921 | ||
4922 | /* update check information */ | |
4923 | if (s->checkfn != Z_NULL) | |
4924 | z->adler = s->check = (*s->checkfn)(s->check, q, n); | |
4925 | ||
4926 | /* copy */ | |
4927 | if (p != Z_NULL) { | |
4928 | zmemcpy(p, q, n); | |
4929 | p += n; | |
4930 | } | |
4931 | q += n; | |
4932 | } | |
4933 | ||
4934 | /* update pointers */ | |
4935 | z->next_out = p; | |
4936 | s->read = q; | |
4937 | ||
4938 | /* done */ | |
4939 | return r; | |
4940 | } | |
4941 | /* --- infutil.c */ | |
4942 | ||
4943 | /* +++ inffast.c */ | |
4944 | /* inffast.c -- process literals and length/distance pairs fast | |
4945 | * Copyright (C) 1995-1996 Mark Adler | |
4946 | * For conditions of distribution and use, see copyright notice in zlib.h | |
4947 | */ | |
4948 | ||
4949 | /* #include "zutil.h" */ | |
4950 | /* #include "inftrees.h" */ | |
4951 | /* #include "infblock.h" */ | |
4952 | /* #include "infcodes.h" */ | |
4953 | /* #include "infutil.h" */ | |
4954 | /* #include "inffast.h" */ | |
4955 | ||
4956 | #ifndef NO_DUMMY_DECL | |
4957 | struct inflate_codes_state {int dummy;}; /* for buggy compilers */ | |
4958 | #endif | |
4959 | ||
4960 | /* simplify the use of the inflate_huft type with some defines */ | |
4961 | #define base more.Base | |
4962 | #define next more.Next | |
4963 | #define exop word.what.Exop | |
4964 | #define bits word.what.Bits | |
4965 | ||
4966 | /* macros for bit input with no checking and for returning unused bytes */ | |
4967 | #define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}} | |
4968 | #define UNGRAB {n+=(c=k>>3);p-=c;k&=7;} | |
4969 | ||
4970 | /* Called with number of bytes left to write in window at least 258 | |
4971 | (the maximum string length) and number of input bytes available | |
4972 | at least ten. The ten bytes are six bytes for the longest length/ | |
4973 | distance pair plus four bytes for overloading the bit buffer. */ | |
4974 | ||
4975 | int inflate_fast(bl, bd, tl, td, s, z) | |
4976 | uInt bl, bd; | |
4977 | inflate_huft *tl; | |
4978 | inflate_huft *td; /* need separate declaration for Borland C++ */ | |
4979 | inflate_blocks_statef *s; | |
4980 | z_streamp z; | |
4981 | { | |
4982 | inflate_huft *t; /* temporary pointer */ | |
4983 | uInt e; /* extra bits or operation */ | |
4984 | uLong b; /* bit buffer */ | |
4985 | uInt k; /* bits in bit buffer */ | |
4986 | Bytef *p; /* input data pointer */ | |
4987 | uInt n; /* bytes available there */ | |
4988 | Bytef *q; /* output window write pointer */ | |
4989 | uInt m; /* bytes to end of window or read pointer */ | |
4990 | uInt ml; /* mask for literal/length tree */ | |
4991 | uInt md; /* mask for distance tree */ | |
4992 | uInt c; /* bytes to copy */ | |
4993 | uInt d; /* distance back to copy from */ | |
4994 | Bytef *r; /* copy source pointer */ | |
4995 | ||
4996 | /* load input, output, bit values */ | |
4997 | LOAD | |
4998 | ||
4999 | /* initialize masks */ | |
5000 | ml = inflate_mask[bl]; | |
5001 | md = inflate_mask[bd]; | |
5002 | ||
5003 | /* do until not enough input or output space for fast loop */ | |
5004 | do { /* assume called with m >= 258 && n >= 10 */ | |
5005 | /* get literal/length code */ | |
5006 | GRABBITS(20) /* max bits for literal/length code */ | |
5007 | if ((e = (t = tl + ((uInt)b & ml))->exop) == 0) | |
5008 | { | |
5009 | DUMPBITS(t->bits) | |
5010 | Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? | |
5011 | "inflate: * literal '%c'\n" : | |
5012 | "inflate: * literal 0x%02x\n", t->base)); | |
5013 | *q++ = (Byte)t->base; | |
5014 | m--; | |
5015 | continue; | |
5016 | } | |
5017 | do { | |
5018 | DUMPBITS(t->bits) | |
5019 | if (e & 16) | |
5020 | { | |
5021 | /* get extra bits for length */ | |
5022 | e &= 15; | |
5023 | c = t->base + ((uInt)b & inflate_mask[e]); | |
5024 | DUMPBITS(e) | |
5025 | Tracevv((stderr, "inflate: * length %u\n", c)); | |
5026 | ||
5027 | /* decode distance base of block to copy */ | |
5028 | GRABBITS(15); /* max bits for distance code */ | |
5029 | e = (t = td + ((uInt)b & md))->exop; | |
5030 | do { | |
5031 | DUMPBITS(t->bits) | |
5032 | if (e & 16) | |
5033 | { | |
5034 | /* get extra bits to add to distance base */ | |
5035 | e &= 15; | |
5036 | GRABBITS(e) /* get extra bits (up to 13) */ | |
5037 | d = t->base + ((uInt)b & inflate_mask[e]); | |
5038 | DUMPBITS(e) | |
5039 | Tracevv((stderr, "inflate: * distance %u\n", d)); | |
5040 | ||
5041 | /* do the copy */ | |
5042 | m -= c; | |
5043 | if ((uInt)(q - s->window) >= d) /* offset before dest */ | |
5044 | { /* just copy */ | |
5045 | r = q - d; | |
5046 | *q++ = *r++; c--; /* minimum count is three, */ | |
5047 | *q++ = *r++; c--; /* so unroll loop a little */ | |
5048 | } | |
5049 | else /* else offset after destination */ | |
5050 | { | |
5051 | e = d - (uInt)(q - s->window); /* bytes from offset to end */ | |
5052 | r = s->end - e; /* pointer to offset */ | |
5053 | if (c > e) /* if source crosses, */ | |
5054 | { | |
5055 | c -= e; /* copy to end of window */ | |
5056 | do { | |
5057 | *q++ = *r++; | |
5058 | } while (--e); | |
5059 | r = s->window; /* copy rest from start of window */ | |
5060 | } | |
5061 | } | |
5062 | do { /* copy all or what's left */ | |
5063 | *q++ = *r++; | |
5064 | } while (--c); | |
5065 | break; | |
5066 | } | |
5067 | else if ((e & 64) == 0) | |
5068 | e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop; | |
5069 | else | |
5070 | { | |
5071 | z->msg = (char*)"invalid distance code"; | |
5072 | UNGRAB | |
5073 | UPDATE | |
5074 | return Z_DATA_ERROR; | |
5075 | } | |
5076 | } while (1); | |
5077 | break; | |
5078 | } | |
5079 | if ((e & 64) == 0) | |
5080 | { | |
5081 | if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0) | |
5082 | { | |
5083 | DUMPBITS(t->bits) | |
5084 | Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? | |
5085 | "inflate: * literal '%c'\n" : | |
5086 | "inflate: * literal 0x%02x\n", t->base)); | |
5087 | *q++ = (Byte)t->base; | |
5088 | m--; | |
5089 | break; | |
5090 | } | |
5091 | } | |
5092 | else if (e & 32) | |
5093 | { | |
5094 | Tracevv((stderr, "inflate: * end of block\n")); | |
5095 | UNGRAB | |
5096 | UPDATE | |
5097 | return Z_STREAM_END; | |
5098 | } | |
5099 | else | |
5100 | { | |
5101 | z->msg = (char*)"invalid literal/length code"; | |
5102 | UNGRAB | |
5103 | UPDATE | |
5104 | return Z_DATA_ERROR; | |
5105 | } | |
5106 | } while (1); | |
5107 | } while (m >= 258 && n >= 10); | |
5108 | ||
5109 | /* not enough input or output--restore pointers and return */ | |
5110 | UNGRAB | |
5111 | UPDATE | |
5112 | return Z_OK; | |
5113 | } | |
5114 | /* --- inffast.c */ | |
5115 | ||
5116 | /* +++ zutil.c */ | |
5117 | /* zutil.c -- target dependent utility functions for the compression library | |
5118 | * Copyright (C) 1995-1996 Jean-loup Gailly. | |
5119 | * For conditions of distribution and use, see copyright notice in zlib.h | |
5120 | */ | |
5121 | ||
5122 | /* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */ | |
5123 | ||
5124 | #ifdef DEBUG_ZLIB | |
5125 | #include <stdio.h> | |
5126 | #endif | |
5127 | ||
5128 | /* #include "zutil.h" */ | |
5129 | ||
5130 | #ifndef NO_DUMMY_DECL | |
5131 | struct internal_state {int dummy;}; /* for buggy compilers */ | |
5132 | #endif | |
5133 | ||
5134 | #ifndef STDC | |
5135 | extern void exit OF((int)); | |
5136 | #endif | |
5137 | ||
5138 | static const char *z_errmsg[10] = { | |
5139 | "need dictionary", /* Z_NEED_DICT 2 */ | |
5140 | "stream end", /* Z_STREAM_END 1 */ | |
5141 | "", /* Z_OK 0 */ | |
5142 | "file error", /* Z_ERRNO (-1) */ | |
5143 | "stream error", /* Z_STREAM_ERROR (-2) */ | |
5144 | "data error", /* Z_DATA_ERROR (-3) */ | |
5145 | "insufficient memory", /* Z_MEM_ERROR (-4) */ | |
5146 | "buffer error", /* Z_BUF_ERROR (-5) */ | |
5147 | "incompatible version",/* Z_VERSION_ERROR (-6) */ | |
5148 | ""}; | |
5149 | ||
5150 | ||
5151 | const char *zlibVersion() | |
5152 | { | |
5153 | return ZLIB_VERSION; | |
5154 | } | |
5155 | ||
5156 | #ifdef DEBUG_ZLIB | |
5157 | void z_error (m) | |
5158 | char *m; | |
5159 | { | |
5160 | fprintf(stderr, "%s\n", m); | |
5161 | exit(1); | |
5162 | } | |
5163 | #endif | |
5164 | ||
5165 | #ifndef HAVE_MEMCPY | |
5166 | ||
5167 | void zmemcpy(dest, source, len) | |
5168 | Bytef* dest; | |
5169 | Bytef* source; | |
5170 | uInt len; | |
5171 | { | |
5172 | if (len == 0) return; | |
5173 | do { | |
5174 | *dest++ = *source++; /* ??? to be unrolled */ | |
5175 | } while (--len != 0); | |
5176 | } | |
5177 | ||
5178 | int zmemcmp(s1, s2, len) | |
5179 | Bytef* s1; | |
5180 | Bytef* s2; | |
5181 | uInt len; | |
5182 | { | |
5183 | uInt j; | |
5184 | ||
5185 | for (j = 0; j < len; j++) { | |
5186 | if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; | |
5187 | } | |
5188 | return 0; | |
5189 | } | |
5190 | ||
5191 | void zmemzero(dest, len) | |
5192 | Bytef* dest; | |
5193 | uInt len; | |
5194 | { | |
5195 | if (len == 0) return; | |
5196 | do { | |
5197 | *dest++ = 0; /* ??? to be unrolled */ | |
5198 | } while (--len != 0); | |
5199 | } | |
5200 | #endif | |
5201 | ||
5202 | #ifdef __TURBOC__ | |
5203 | #if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__) | |
5204 | /* Small and medium model in Turbo C are for now limited to near allocation | |
5205 | * with reduced MAX_WBITS and MAX_MEM_LEVEL | |
5206 | */ | |
5207 | # define MY_ZCALLOC | |
5208 | ||
5209 | /* Turbo C malloc() does not allow dynamic allocation of 64K bytes | |
5210 | * and farmalloc(64K) returns a pointer with an offset of 8, so we | |
5211 | * must fix the pointer. Warning: the pointer must be put back to its | |
5212 | * original form in order to free it, use zcfree(). | |
5213 | */ | |
5214 | ||
5215 | #define MAX_PTR 10 | |
5216 | /* 10*64K = 640K */ | |
5217 | ||
5218 | local int next_ptr = 0; | |
5219 | ||
5220 | typedef struct ptr_table_s { | |
5221 | voidpf org_ptr; | |
5222 | voidpf new_ptr; | |
5223 | } ptr_table; | |
5224 | ||
5225 | local ptr_table table[MAX_PTR]; | |
5226 | /* This table is used to remember the original form of pointers | |
5227 | * to large buffers (64K). Such pointers are normalized with a zero offset. | |
5228 | * Since MSDOS is not a preemptive multitasking OS, this table is not | |
5229 | * protected from concurrent access. This hack doesn't work anyway on | |
5230 | * a protected system like OS/2. Use Microsoft C instead. | |
5231 | */ | |
5232 | ||
5233 | voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) | |
5234 | { | |
5235 | voidpf buf = opaque; /* just to make some compilers happy */ | |
5236 | ulg bsize = (ulg)items*size; | |
5237 | ||
5238 | /* If we allocate less than 65520 bytes, we assume that farmalloc | |
5239 | * will return a usable pointer which doesn't have to be normalized. | |
5240 | */ | |
5241 | if (bsize < 65520L) { | |
5242 | buf = farmalloc(bsize); | |
5243 | if (*(ush*)&buf != 0) return buf; | |
5244 | } else { | |
5245 | buf = farmalloc(bsize + 16L); | |
5246 | } | |
5247 | if (buf == NULL || next_ptr >= MAX_PTR) return NULL; | |
5248 | table[next_ptr].org_ptr = buf; | |
5249 | ||
5250 | /* Normalize the pointer to seg:0 */ | |
5251 | *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; | |
5252 | *(ush*)&buf = 0; | |
5253 | table[next_ptr++].new_ptr = buf; | |
5254 | return buf; | |
5255 | } | |
5256 | ||
5257 | void zcfree (voidpf opaque, voidpf ptr) | |
5258 | { | |
5259 | int n; | |
5260 | if (*(ush*)&ptr != 0) { /* object < 64K */ | |
5261 | farfree(ptr); | |
5262 | return; | |
5263 | } | |
5264 | /* Find the original pointer */ | |
5265 | for (n = 0; n < next_ptr; n++) { | |
5266 | if (ptr != table[n].new_ptr) continue; | |
5267 | ||
5268 | farfree(table[n].org_ptr); | |
5269 | while (++n < next_ptr) { | |
5270 | table[n-1] = table[n]; | |
5271 | } | |
5272 | next_ptr--; | |
5273 | return; | |
5274 | } | |
5275 | ptr = opaque; /* just to make some compilers happy */ | |
5276 | Assert(0, "zcfree: ptr not found"); | |
5277 | } | |
5278 | #endif | |
5279 | #endif /* __TURBOC__ */ | |
5280 | ||
5281 | ||
5282 | #if defined(M_I86) && !defined(__32BIT__) | |
5283 | /* Microsoft C in 16-bit mode */ | |
5284 | ||
5285 | # define MY_ZCALLOC | |
5286 | ||
5287 | #if (!defined(_MSC_VER) || (_MSC_VER < 600)) | |
5288 | # define _halloc halloc | |
5289 | # define _hfree hfree | |
5290 | #endif | |
5291 | ||
5292 | voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) | |
5293 | { | |
5294 | if (opaque) opaque = 0; /* to make compiler happy */ | |
5295 | return _halloc((long)items, size); | |
5296 | } | |
5297 | ||
5298 | void zcfree (voidpf opaque, voidpf ptr) | |
5299 | { | |
5300 | if (opaque) opaque = 0; /* to make compiler happy */ | |
5301 | _hfree(ptr); | |
5302 | } | |
5303 | ||
5304 | #endif /* MSC */ | |
5305 | ||
5306 | ||
5307 | #ifndef MY_ZCALLOC /* Any system without a special alloc function */ | |
5308 | ||
5309 | #ifndef STDC | |
5310 | extern voidp calloc OF((uInt items, uInt size)); | |
5311 | extern void free OF((voidpf ptr)); | |
5312 | #endif | |
5313 | ||
5314 | voidpf zcalloc (opaque, items, size) | |
5315 | voidpf opaque; | |
5316 | unsigned items; | |
5317 | unsigned size; | |
5318 | { | |
5319 | if (opaque) items += size - size; /* make compiler happy */ | |
5320 | return (voidpf)calloc(items, size); | |
5321 | } | |
5322 | ||
5323 | void zcfree (opaque, ptr) | |
5324 | voidpf opaque; | |
5325 | voidpf ptr; | |
5326 | { | |
5327 | _FREE(ptr); | |
5328 | if (opaque) return; /* make compiler happy */ | |
5329 | } | |
5330 | ||
5331 | #endif /* MY_ZCALLOC */ | |
5332 | /* --- zutil.c */ | |
5333 | ||
5334 | /* +++ adler32.c */ | |
5335 | /* adler32.c -- compute the Adler-32 checksum of a data stream | |
5336 | * Copyright (C) 1995-1996 Mark Adler | |
5337 | * For conditions of distribution and use, see copyright notice in zlib.h | |
5338 | */ | |
5339 | ||
5340 | /* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */ | |
5341 | ||
5342 | /* #include "zlib.h" */ | |
5343 | ||
5344 | #define BASE 65521L /* largest prime smaller than 65536 */ | |
5345 | #define NMAX 5552 | |
5346 | /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ | |
5347 | ||
5348 | #define DO1(buf,i) {s1 += buf[i]; s2 += s1;} | |
5349 | #define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); | |
5350 | #define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); | |
5351 | #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); | |
5352 | #define DO16(buf) DO8(buf,0); DO8(buf,8); | |
5353 | ||
5354 | /* ========================================================================= */ | |
5355 | uLong adler32(adler, buf, len) | |
5356 | uLong adler; | |
5357 | const Bytef *buf; | |
5358 | uInt len; | |
5359 | { | |
5360 | unsigned long s1 = adler & 0xffff; | |
5361 | unsigned long s2 = (adler >> 16) & 0xffff; | |
5362 | int k; | |
5363 | ||
5364 | if (buf == Z_NULL) return 1L; | |
5365 | ||
5366 | while (len > 0) { | |
5367 | k = len < NMAX ? len : NMAX; | |
5368 | len -= k; | |
5369 | while (k >= 16) { | |
5370 | DO16(buf); | |
5371 | buf += 16; | |
5372 | k -= 16; | |
5373 | } | |
5374 | if (k != 0) do { | |
5375 | s1 += *buf++; | |
5376 | s2 += s1; | |
5377 | } while (--k); | |
5378 | s1 %= BASE; | |
5379 | s2 %= BASE; | |
5380 | } | |
5381 | return (s2 << 16) | s1; | |
5382 | } | |
5383 | /* --- adler32.c */ |