]> git.saurik.com Git - wxWidgets.git/blob - utils/Install/sfxzip/inflate.c
This is how wxPlotWindow would look like with the
[wxWidgets.git] / utils / Install / sfxzip / inflate.c
1 /* inflate.c -- put in the public domain by Mark Adler
2 version c16b, 29 March 1998 */
3
4
5 /* You can do whatever you like with this source file, though I would
6 prefer that if you modify it and redistribute it that you include
7 comments to that effect with your name and the date. Thank you.
8
9 History:
10 vers date who what
11 ---- --------- -------------- ------------------------------------
12 a ~~ Feb 92 M. Adler used full (large, one-step) lookup table
13 b1 21 Mar 92 M. Adler first version with partial lookup tables
14 b2 21 Mar 92 M. Adler fixed bug in fixed-code blocks
15 b3 22 Mar 92 M. Adler sped up match copies, cleaned up some
16 b4 25 Mar 92 M. Adler added prototypes; removed window[] (now
17 is the responsibility of unzip.h--also
18 changed name to slide[]), so needs diffs
19 for unzip.c and unzip.h (this allows
20 compiling in the small model on MSDOS);
21 fixed cast of q in huft_build();
22 b5 26 Mar 92 M. Adler got rid of unintended macro recursion.
23 b6 27 Mar 92 M. Adler got rid of nextbyte() routine. fixed
24 bug in inflate_fixed().
25 c1 30 Mar 92 M. Adler removed lbits, dbits environment variables.
26 changed BMAX to 16 for explode. Removed
27 OUTB usage, and replaced it with flush()--
28 this was a 20% speed improvement! Added
29 an explode.c (to replace unimplod.c) that
30 uses the huft routines here. Removed
31 register union.
32 c2 4 Apr 92 M. Adler fixed bug for file sizes a multiple of 32k.
33 c3 10 Apr 92 M. Adler reduced memory of code tables made by
34 huft_build significantly (factor of two to
35 three).
36 c4 15 Apr 92 M. Adler added NOMEMCPY do kill use of memcpy().
37 worked around a Turbo C optimization bug.
38 c5 21 Apr 92 M. Adler added the WSIZE #define to allow reducing
39 the 32K window size for specialized
40 applications.
41 c6 31 May 92 M. Adler added some typecasts to eliminate warnings
42 c7 27 Jun 92 G. Roelofs added some more typecasts (444: MSC bug).
43 c8 5 Oct 92 J-l. Gailly added ifdef'd code to deal with PKZIP bug.
44 c9 9 Oct 92 M. Adler removed a memory error message (~line 416).
45 c10 17 Oct 92 G. Roelofs changed ULONG/UWORD/byte to ulg/ush/uch,
46 removed old inflate, renamed inflate_entry
47 to inflate, added Mark's fix to a comment.
48 c10.5 14 Dec 92 M. Adler fix up error messages for incomplete trees.
49 c11 2 Jan 93 M. Adler fixed bug in detection of incomplete
50 tables, and removed assumption that EOB is
51 the longest code (bad assumption).
52 c12 3 Jan 93 M. Adler make tables for fixed blocks only once.
53 c13 5 Jan 93 M. Adler allow all zero length codes (pkzip 2.04c
54 outputs one zero length code for an empty
55 distance tree).
56 c14 12 Mar 93 M. Adler made inflate.c standalone with the
57 introduction of inflate.h.
58 c14b 16 Jul 93 G. Roelofs added (unsigned) typecast to w at 470.
59 c14c 19 Jul 93 J. Bush changed v[N_MAX], l[288], ll[28x+3x] arrays
60 to static for Amiga.
61 c14d 13 Aug 93 J-l. Gailly de-complicatified Mark's c[*p++]++ thing.
62 c14e 8 Oct 93 G. Roelofs changed memset() to memzero().
63 c14f 22 Oct 93 G. Roelofs renamed quietflg to qflag; made Trace()
64 conditional; added inflate_free().
65 c14g 28 Oct 93 G. Roelofs changed l/(lx+1) macro to pointer (Cray bug)
66 c14h 7 Dec 93 C. Ghisler huft_build() optimizations.
67 c14i 9 Jan 94 A. Verheijen set fixed_t{d,l} to NULL after freeing;
68 G. Roelofs check NEXTBYTE macro for EOF.
69 c14j 23 Jan 94 G. Roelofs removed Ghisler "optimizations"; ifdef'd
70 EOF check.
71 c14k 27 Feb 94 G. Roelofs added some typecasts to avoid warnings.
72 c14l 9 Apr 94 G. Roelofs fixed split comments on preprocessor lines
73 to avoid bug in Encore compiler.
74 c14m 7 Jul 94 P. Kienitz modified to allow assembler version of
75 inflate_codes() (define ASM_INFLATECODES)
76 c14n 22 Jul 94 G. Roelofs changed fprintf to macro for DLL versions
77 c14o 23 Aug 94 C. Spieler added a newline to a debug statement;
78 G. Roelofs added another typecast to avoid MSC warning
79 c14p 4 Oct 94 G. Roelofs added (voidp *) cast to free() argument
80 c14q 30 Oct 94 G. Roelofs changed fprintf macro to MESSAGE()
81 c14r 1 Nov 94 G. Roelofs fixed possible redefinition of CHECK_EOF
82 c14s 7 May 95 S. Maxwell OS/2 DLL globals stuff incorporated;
83 P. Kienitz "fixed" ASM_INFLATECODES macro/prototype
84 c14t 18 Aug 95 G. Roelofs added UZinflate() to use zlib functions;
85 changed voidp to zvoid; moved huft_build()
86 and huft_free() to end of file
87 c14u 1 Oct 95 G. Roelofs moved G into definition of MESSAGE macro
88 c14v 8 Nov 95 P. Kienitz changed ASM_INFLATECODES to use a regular
89 call with __G__ instead of a macro
90 c15 3 Aug 96 M. Adler fixed bomb-bug on random input data (Adobe)
91 c15b 24 Aug 96 M. Adler more fixes for random input data
92 c15c 28 Mar 97 G. Roelofs changed USE_ZLIB fatal exit code from
93 PK_MEM2 to PK_MEM3
94 c16 20 Apr 97 J. Altman added memzero(v[]) in huft_build()
95 c16b 29 Mar 98 C. Spieler modified DLL code for slide redirection
96 */
97
98
99 /*
100 Inflate deflated (PKZIP's method 8 compressed) data. The compression
101 method searches for as much of the current string of bytes (up to a
102 length of 258) in the previous 32K bytes. If it doesn't find any
103 matches (of at least length 3), it codes the next byte. Otherwise, it
104 codes the length of the matched string and its distance backwards from
105 the current position. There is a single Huffman code that codes both
106 single bytes (called "literals") and match lengths. A second Huffman
107 code codes the distance information, which follows a length code. Each
108 length or distance code actually represents a base value and a number
109 of "extra" (sometimes zero) bits to get to add to the base value. At
110 the end of each deflated block is a special end-of-block (EOB) literal/
111 length code. The decoding process is basically: get a literal/length
112 code; if EOB then done; if a literal, emit the decoded byte; if a
113 length then get the distance and emit the referred-to bytes from the
114 sliding window of previously emitted data.
115
116 There are (currently) three kinds of inflate blocks: stored, fixed, and
117 dynamic. The compressor outputs a chunk of data at a time and decides
118 which method to use on a chunk-by-chunk basis. A chunk might typically
119 be 32K to 64K, uncompressed. If the chunk is uncompressible, then the
120 "stored" method is used. In this case, the bytes are simply stored as
121 is, eight bits per byte, with none of the above coding. The bytes are
122 preceded by a count, since there is no longer an EOB code.
123
124 If the data are compressible, then either the fixed or dynamic methods
125 are used. In the dynamic method, the compressed data are preceded by
126 an encoding of the literal/length and distance Huffman codes that are
127 to be used to decode this block. The representation is itself Huffman
128 coded, and so is preceded by a description of that code. These code
129 descriptions take up a little space, and so for small blocks, there is
130 a predefined set of codes, called the fixed codes. The fixed method is
131 used if the block ends up smaller that way (usually for quite small
132 chunks); otherwise the dynamic method is used. In the latter case, the
133 codes are customized to the probabilities in the current block and so
134 can code it much better than the pre-determined fixed codes can.
135
136 The Huffman codes themselves are decoded using a multi-level table
137 lookup, in order to maximize the speed of decoding plus the speed of
138 building the decoding tables. See the comments below that precede the
139 lbits and dbits tuning parameters.
140
141 GRR: return values(?)
142 0 OK
143 1 incomplete table
144 2 bad input
145 3 not enough memory
146 */
147
148
149 /*
150 Notes beyond the 1.93a appnote.txt:
151
152 1. Distance pointers never point before the beginning of the output
153 stream.
154 2. Distance pointers can point back across blocks, up to 32k away.
155 3. There is an implied maximum of 7 bits for the bit length table and
156 15 bits for the actual data.
157 4. If only one code exists, then it is encoded using one bit. (Zero
158 would be more efficient, but perhaps a little confusing.) If two
159 codes exist, they are coded using one bit each (0 and 1).
160 5. There is no way of sending zero distance codes--a dummy must be
161 sent if there are none. (History: a pre 2.0 version of PKZIP would
162 store blocks with no distance codes, but this was discovered to be
163 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
164 zero distance codes, which is sent as one code of zero bits in
165 length.
166 6. There are up to 286 literal/length codes. Code 256 represents the
167 end-of-block. Note however that the static length tree defines
168 288 codes just to fill out the Huffman codes. Codes 286 and 287
169 cannot be used though, since there is no length base or extra bits
170 defined for them. Similarily, there are up to 30 distance codes.
171 However, static trees define 32 codes (all 5 bits) to fill out the
172 Huffman codes, but the last two had better not show up in the data.
173 7. Unzip can check dynamic Huffman blocks for complete code sets.
174 The exception is that a single code would not be complete (see #4).
175 8. The five bits following the block type is really the number of
176 literal codes sent minus 257.
177 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
178 (1+6+6). Therefore, to output three times the length, you output
179 three codes (1+1+1), whereas to output four times the same length,
180 you only need two codes (1+3). Hmm.
181 10. In the tree reconstruction algorithm, Code = Code + Increment
182 only if BitLength(i) is not zero. (Pretty obvious.)
183 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
184 12. Note: length code 284 can represent 227-258, but length code 285
185 really is 258. The last length deserves its own, short code
186 since it gets used a lot in very redundant files. The length
187 258 is special since 258 - 3 (the min match length) is 255.
188 13. The literal/length and distance code bit lengths are read as a
189 single stream of lengths. It is possible (and advantageous) for
190 a repeat code (16, 17, or 18) to go across the boundary between
191 the two sets of lengths.
192 */
193
194
195 #define PKZIP_BUG_WORKAROUND /* PKZIP 1.93a problem--live with it */
196
197 /*
198 inflate.h must supply the uch slide[WSIZE] array, the zvoid typedef
199 (void if (void *) is accepted, else char) and the NEXTBYTE,
200 FLUSH() and memzero macros. If the window size is not 32K, it
201 should also define WSIZE. If INFMOD is defined, it can include
202 compiled functions to support the NEXTBYTE and/or FLUSH() macros.
203 There are defaults for NEXTBYTE and FLUSH() below for use as
204 examples of what those functions need to do. Normally, you would
205 also want FLUSH() to compute a crc on the data. inflate.h also
206 needs to provide these typedefs:
207
208 typedef unsigned char uch;
209 typedef unsigned short ush;
210 typedef unsigned long ulg;
211
212 This module uses the external functions malloc() and free() (and
213 probably memset() or bzero() in the memzero() macro). Their
214 prototypes are normally found in <string.h> and <stdlib.h>.
215 */
216
217 /* #define DEBUG */
218 #define INFMOD /* tell inflate.h to include code to be compiled */
219 #include "inflate.h"
220
221
222 #ifndef WSIZE /* default is 32K */
223 # define WSIZE 0x8000 /* window size--must be a power of two, and at least */
224 #endif /* 32K for zip's deflate method */
225
226 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
227 # define wsize G._wsize /* wsize is a variable */
228 #else
229 # define wsize WSIZE /* wsize is a constant */
230 #endif
231
232
233 #ifndef NEXTBYTE /* default is to simply get a byte from stdin */
234 # define NEXTBYTE getchar()
235 #endif
236
237 #ifndef MESSAGE /* only used twice, for fixed strings--NOT general-purpose */
238 # define MESSAGE(str,len,flag) pipeit((char *)(str))
239 #endif
240
241 #ifndef FLUSH /* default is to simply write the buffer to stdout */
242 # define FLUSH(n) fwrite(redirSlide, 1, n, stdout) /* return value not used */
243 #endif
244 /* Warning: the fwrite above might not work on 16-bit compilers, since
245 0x8000 might be interpreted as -32,768 by the library function. */
246
247 #ifndef Trace
248 # ifdef DEBUG
249 # define Trace(x) fprintf x
250 # else
251 # define Trace(x)
252 # endif
253 #endif
254
255
256 /*---------------------------------------------------------------------------*/
257 #ifdef USE_ZLIB
258
259
260 /*
261 GRR: return values for both original inflate() and UZinflate()
262 0 OK
263 1 incomplete table(?)
264 2 bad input
265 3 not enough memory
266 */
267
268 /**************************/
269 /* Function UZinflate() */
270 /**************************/
271
272 int UZinflate(__G) /* decompress an inflated entry using the zlib routines */
273 __GDEF
274 {
275 int err=Z_OK;
276
277 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
278 if (G.redirect_slide)
279 wsize = G.redirect_size, redirSlide = G.redirect_buffer;
280 else
281 wsize = WSIZE, redirSlide = slide;
282 #endif
283
284 G.dstrm.next_out = redirSlide;
285 G.dstrm.avail_out = wsize;
286
287 G.dstrm.next_in = G.inptr;
288 G.dstrm.avail_in = G.incnt;
289
290 if (!G.inflInit) {
291 unsigned i;
292 int windowBits;
293
294 /* only need to test this stuff once */
295 if (zlib_version[0] != ZLIB_VERSION[0]) {
296 Info(slide, 0x21, ((char *)slide,
297 "error: incompatible zlib version (expected %s, found %s)\n",
298 ZLIB_VERSION, zlib_version));
299 return 3;
300 } else if (strcmp(zlib_version, ZLIB_VERSION) != 0)
301 Info(slide, 0x21, ((char *)slide,
302 "warning: different zlib version (expected %s, using %s)\n",
303 ZLIB_VERSION, zlib_version));
304
305 /* windowBits = log2(wsize) */
306 for (i = ((unsigned)wsize * 2 - 1), windowBits = 0;
307 !(i & 1); i >>= 1, ++windowBits);
308 if ((unsigned)windowBits > (unsigned)15)
309 windowBits = 15;
310 else if (windowBits < 8)
311 windowBits = 8;
312
313 G.dstrm.zalloc = (alloc_func)Z_NULL;
314 G.dstrm.zfree = (free_func)Z_NULL;
315
316 Trace((stderr, "initializing inflate()\n"));
317 err = inflateInit2(&G.dstrm, -windowBits);
318
319 if (err == Z_MEM_ERROR)
320 return 3;
321 else if (err != Z_OK)
322 Trace((stderr, "oops! (inflateInit2() err = %d)\n", err));
323 G.inflInit = 1;
324 }
325
326 #ifdef FUNZIP
327 while (err != Z_STREAM_END) {
328 #else /* !FUNZIP */
329 while (G.csize > 0) {
330 Trace((stderr, "first loop: G.csize = %ld\n", G.csize));
331 #endif /* ?FUNZIP */
332 while (G.dstrm.avail_out > 0) {
333 err = inflate(&G.dstrm, Z_PARTIAL_FLUSH);
334
335 if (err == Z_DATA_ERROR)
336 return 2;
337 else if (err == Z_MEM_ERROR)
338 return 3;
339 else if (err != Z_OK && err != Z_STREAM_END)
340 Trace((stderr, "oops! (inflate(first loop) err = %d)\n", err));
341
342 #ifdef FUNZIP
343 if (err == Z_STREAM_END) /* "END-of-entry-condition" ? */
344 #else /* !FUNZIP */
345 if (G.csize <= 0L) /* "END-of-entry-condition" ? */
346 #endif /* ?FUNZIP */
347 break;
348
349 if (G.dstrm.avail_in <= 0) {
350 if (fillinbuf(__G) == 0)
351 return 2; /* no "END-condition" yet, but no more data */
352
353 G.dstrm.next_in = G.inptr;
354 G.dstrm.avail_in = G.incnt;
355 }
356 Trace((stderr, " avail_in = %d\n", G.dstrm.avail_in));
357 }
358 FLUSH(wsize - G.dstrm.avail_out); /* flush slide[] */
359 Trace((stderr, "inside loop: flushing %ld bytes (ptr diff = %ld)\n",
360 (long)(wsize - G.dstrm.avail_out),
361 (long)(G.dstrm.next_out-(Bytef *)redirSlide)));
362 G.dstrm.next_out = redirSlide;
363 G.dstrm.avail_out = wsize;
364 }
365
366 /* no more input, so loop until we have all output */
367 Trace((stderr, "beginning final loop: err = %d\n", err));
368 while (err != Z_STREAM_END) {
369 err = inflate(&G.dstrm, Z_PARTIAL_FLUSH);
370 if (err == Z_DATA_ERROR)
371 return 2;
372 else if (err == Z_MEM_ERROR)
373 return 3;
374 else if (err == Z_BUF_ERROR) { /* DEBUG */
375 Trace((stderr, "zlib inflate() did not detect stream end (%s, %s)\n"
376 , G.zipfn, G.filename));
377 break;
378 } else if (err != Z_OK && err != Z_STREAM_END) {
379 Trace((stderr, "oops! (inflate(final loop) err = %d)\n", err));
380 DESTROYGLOBALS()
381 EXIT(PK_MEM3);
382 }
383 FLUSH(wsize - G.dstrm.avail_out); /* final flush of slide[] */
384 Trace((stderr, "final loop: flushing %ld bytes (ptr diff = %ld)\n",
385 (long)(wsize - G.dstrm.avail_out),
386 (long)(G.dstrm.next_out-(Bytef *)redirSlide)));
387 G.dstrm.next_out = redirSlide;
388 G.dstrm.avail_out = wsize;
389 }
390 Trace((stderr, "total in = %ld, total out = %ld\n", G.dstrm.total_in,
391 G.dstrm.total_out));
392
393 G.inptr = (uch *)G.dstrm.next_in;
394 G.incnt = (G.inbuf + INBUFSIZ) - G.inptr; /* reset for other routines */
395
396 err = inflateReset(&G.dstrm);
397 if (err != Z_OK)
398 Trace((stderr, "oops! (inflateReset() err = %d)\n", err));
399
400 return 0;
401 }
402
403
404 /*---------------------------------------------------------------------------*/
405 #else /* !USE_ZLIB */
406
407
408 /* Function prototypes */
409 #ifndef OF
410 # ifdef __STDC__
411 # define OF(a) a
412 # else
413 # define OF(a) ()
414 # endif
415 #endif /* !OF */
416 int inflate_codes OF((__GPRO__ struct huft *tl, struct huft *td,
417 int bl, int bd));
418 static int inflate_stored OF((__GPRO));
419 static int inflate_fixed OF((__GPRO));
420 static int inflate_dynamic OF((__GPRO));
421 static int inflate_block OF((__GPRO__ int *e));
422
423
424 /* The inflate algorithm uses a sliding 32K byte window on the uncompressed
425 stream to find repeated byte strings. This is implemented here as a
426 circular buffer. The index is updated simply by incrementing and then
427 and'ing with 0x7fff (32K-1). */
428 /* It is left to other modules to supply the 32K area. It is assumed
429 to be usable as if it were declared "uch slide[32768];" or as just
430 "uch *slide;" and then malloc'ed in the latter case. The definition
431 must be in unzip.h, included above. */
432
433
434 /* unsigned wp; moved to globals.h */ /* current position in slide */
435
436
437 /* Tables for deflate from PKZIP's appnote.txt. */
438 static ZCONST unsigned border[] = { /* Order of the bit length code lengths */
439 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
440 static ZCONST ush cplens[] = { /* Copy lengths for literal codes 257..285 */
441 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
442 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
443 /* note: see note #13 above about the 258 in this list. */
444 static ZCONST ush cplext[] = { /* Extra bits for literal codes 257..285 */
445 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
446 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
447 static ZCONST ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
448 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
449 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
450 8193, 12289, 16385, 24577};
451 static ZCONST ush cpdext[] = { /* Extra bits for distance codes */
452 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
453 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
454 12, 12, 13, 13};
455
456
457 /* moved to consts.h (included in unzip.c), resp. funzip.c */
458 #if 0
459 /* And'ing with mask_bits[n] masks the lower n bits */
460 ZCONST ush near mask_bits[] = {
461 0x0000,
462 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
463 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
464 };
465 #endif /* 0 */
466
467
468 /* Macros for inflate() bit peeking and grabbing.
469 The usage is:
470
471 NEEDBITS(j)
472 x = b & mask_bits[j];
473 DUMPBITS(j)
474
475 where NEEDBITS makes sure that b has at least j bits in it, and
476 DUMPBITS removes the bits from b. The macros use the variable k
477 for the number of bits in b. Normally, b and k are register
478 variables for speed and are initialized at the begining of a
479 routine that uses these macros from a global bit buffer and count.
480
481 In order to not ask for more bits than there are in the compressed
482 stream, the Huffman tables are constructed to only ask for just
483 enough bits to make up the end-of-block code (value 256). Then no
484 bytes need to be "returned" to the buffer at the end of the last
485 block. See the huft_build() routine.
486 */
487
488 /* These have been moved to globals.h */
489 #if 0
490 ulg bb; /* bit buffer */
491 unsigned bk; /* bits in bit buffer */
492 #endif
493
494 #ifndef CHECK_EOF
495 # define CHECK_EOF /* default as of 5.13/5.2 */
496 #endif
497
498 #ifndef CHECK_EOF
499 # define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE)<<k;k+=8;}}
500 #else
501 # define NEEDBITS(n) {while(k<(n)){int c=NEXTBYTE;if(c==EOF)return 1;\
502 b|=((ulg)c)<<k;k+=8;}}
503 #endif /* Piet Plomp: change "return 1" to "break" */
504
505 #define DUMPBITS(n) {b>>=(n);k-=(n);}
506
507
508 /*
509 Huffman code decoding is performed using a multi-level table lookup.
510 The fastest way to decode is to simply build a lookup table whose
511 size is determined by the longest code. However, the time it takes
512 to build this table can also be a factor if the data being decoded
513 are not very long. The most common codes are necessarily the
514 shortest codes, so those codes dominate the decoding time, and hence
515 the speed. The idea is you can have a shorter table that decodes the
516 shorter, more probable codes, and then point to subsidiary tables for
517 the longer codes. The time it costs to decode the longer codes is
518 then traded against the time it takes to make longer tables.
519
520 This results of this trade are in the variables lbits and dbits
521 below. lbits is the number of bits the first level table for literal/
522 length codes can decode in one step, and dbits is the same thing for
523 the distance codes. Subsequent tables are also less than or equal to
524 those sizes. These values may be adjusted either when all of the
525 codes are shorter than that, in which case the longest code length in
526 bits is used, or when the shortest code is *longer* than the requested
527 table size, in which case the length of the shortest code in bits is
528 used.
529
530 There are two different values for the two tables, since they code a
531 different number of possibilities each. The literal/length table
532 codes 286 possible values, or in a flat code, a little over eight
533 bits. The distance table codes 30 possible values, or a little less
534 than five bits, flat. The optimum values for speed end up being
535 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
536 The optimum values may differ though from machine to machine, and
537 possibly even between compilers. Your mileage may vary.
538 */
539
540
541 static ZCONST int lbits = 9; /* bits in base literal/length lookup table */
542 static ZCONST int dbits = 6; /* bits in base distance lookup table */
543
544
545 #ifndef ASM_INFLATECODES
546
547 int inflate_codes(__G__ tl, td, bl, bd)
548 __GDEF
549 struct huft *tl, *td; /* literal/length and distance decoder tables */
550 int bl, bd; /* number of bits decoded by tl[] and td[] */
551 /* inflate (decompress) the codes in a deflated (compressed) block.
552 Return an error code or zero if it all goes ok. */
553 {
554 register unsigned e; /* table entry flag/number of extra bits */
555 unsigned n, d; /* length and index for copy */
556 unsigned w; /* current window position */
557 struct huft *t; /* pointer to table entry */
558 unsigned ml, md; /* masks for bl and bd bits */
559 register ulg b; /* bit buffer */
560 register unsigned k; /* number of bits in bit buffer */
561
562
563 /* make local copies of globals */
564 b = G.bb; /* initialize bit buffer */
565 k = G.bk;
566 w = G.wp; /* initialize window position */
567
568
569 /* inflate the coded data */
570 ml = mask_bits[bl]; /* precompute masks for speed */
571 md = mask_bits[bd];
572 while (1) /* do until end of block */
573 {
574 NEEDBITS((unsigned)bl)
575 if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
576 do {
577 if (e == 99)
578 return 1;
579 DUMPBITS(t->b)
580 e -= 16;
581 NEEDBITS(e)
582 } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
583 DUMPBITS(t->b)
584 if (e == 16) /* then it's a literal */
585 {
586 redirSlide[w++] = (uch)t->v.n;
587 if (w == wsize)
588 {
589 FLUSH(w);
590 w = 0;
591 }
592 }
593 else /* it's an EOB or a length */
594 {
595 /* exit if end of block */
596 if (e == 15)
597 break;
598
599 /* get length of block to copy */
600 NEEDBITS(e)
601 n = t->v.n + ((unsigned)b & mask_bits[e]);
602 DUMPBITS(e);
603
604 /* decode distance of block to copy */
605 NEEDBITS((unsigned)bd)
606 if ((e = (t = td + ((unsigned)b & md))->e) > 16)
607 do {
608 if (e == 99)
609 return 1;
610 DUMPBITS(t->b)
611 e -= 16;
612 NEEDBITS(e)
613 } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
614 DUMPBITS(t->b)
615 NEEDBITS(e)
616 d = w - t->v.n - ((unsigned)b & mask_bits[e]);
617 DUMPBITS(e)
618
619 /* do the copy */
620 do {
621 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
622 if (G.redirect_slide) {/* &= w/ wsize unnecessary & wrong if redirect */
623 if (d >= wsize)
624 return 1; /* invalid compressed data */
625 n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e);
626 }
627 else
628 #endif
629 n -= (e = (e = wsize - ((d &= wsize-1) > w ? d : w)) > n ? n : e);
630 #ifndef NOMEMCPY
631 if (w - d >= e) /* (this test assumes unsigned comparison) */
632 {
633 memcpy(redirSlide + w, redirSlide + d, e);
634 w += e;
635 d += e;
636 }
637 else /* do it slowly to avoid memcpy() overlap */
638 #endif /* !NOMEMCPY */
639 do {
640 redirSlide[w++] = redirSlide[d++];
641 } while (--e);
642 if (w == wsize)
643 {
644 FLUSH(w);
645 w = 0;
646 }
647 } while (n);
648 }
649 }
650
651
652 /* restore the globals from the locals */
653 G.wp = w; /* restore global window pointer */
654 G.bb = b; /* restore global bit buffer */
655 G.bk = k;
656
657
658 /* done */
659 return 0;
660 }
661
662 #endif /* ASM_INFLATECODES */
663
664
665
666 static int inflate_stored(__G)
667 __GDEF
668 /* "decompress" an inflated type 0 (stored) block. */
669 {
670 unsigned n; /* number of bytes in block */
671 unsigned w; /* current window position */
672 register ulg b; /* bit buffer */
673 register unsigned k; /* number of bits in bit buffer */
674
675
676 /* make local copies of globals */
677 Trace((stderr, "\nstored block"));
678 b = G.bb; /* initialize bit buffer */
679 k = G.bk;
680 w = G.wp; /* initialize window position */
681
682
683 /* go to byte boundary */
684 n = k & 7;
685 DUMPBITS(n);
686
687
688 /* get the length and its complement */
689 NEEDBITS(16)
690 n = ((unsigned)b & 0xffff);
691 DUMPBITS(16)
692 NEEDBITS(16)
693 if (n != (unsigned)((~b) & 0xffff))
694 return 1; /* error in compressed data */
695 DUMPBITS(16)
696
697
698 /* read and output the compressed data */
699 while (n--)
700 {
701 NEEDBITS(8)
702 redirSlide[w++] = (uch)b;
703 if (w == wsize)
704 {
705 FLUSH(w);
706 w = 0;
707 }
708 DUMPBITS(8)
709 }
710
711
712 /* restore the globals from the locals */
713 G.wp = w; /* restore global window pointer */
714 G.bb = b; /* restore global bit buffer */
715 G.bk = k;
716 return 0;
717 }
718
719
720 /* Globals for literal tables (built once) */
721 /* Moved to globals.h */
722 #if 0
723 struct huft *fixed_tl = (struct huft *)NULL;
724 struct huft *fixed_td;
725 int fixed_bl, fixed_bd;
726 #endif
727
728 static int inflate_fixed(__G)
729 __GDEF
730 /* decompress an inflated type 1 (fixed Huffman codes) block. We should
731 either replace this with a custom decoder, or at least precompute the
732 Huffman tables. */
733 {
734 /* if first time, set up tables for fixed blocks */
735 Trace((stderr, "\nliteral block"));
736 if (G.fixed_tl == (struct huft *)NULL)
737 {
738 int i; /* temporary variable */
739 unsigned l[288]; /* length list for huft_build */
740
741 /* literal table */
742 for (i = 0; i < 144; i++)
743 l[i] = 8;
744 for (; i < 256; i++)
745 l[i] = 9;
746 for (; i < 280; i++)
747 l[i] = 7;
748 for (; i < 288; i++) /* make a complete, but wrong code set */
749 l[i] = 8;
750 G.fixed_bl = 7;
751 if ((i = huft_build(__G__ l, 288, 257, cplens, cplext,
752 &G.fixed_tl, &G.fixed_bl)) != 0)
753 {
754 G.fixed_tl = (struct huft *)NULL;
755 return i;
756 }
757
758 /* distance table */
759 for (i = 0; i < 30; i++) /* make an incomplete code set */
760 l[i] = 5;
761 G.fixed_bd = 5;
762 if ((i = huft_build(__G__ l, 30, 0, cpdist, cpdext,
763 &G.fixed_td, &G.fixed_bd)) > 1)
764 {
765 huft_free(G.fixed_tl);
766 G.fixed_tl = (struct huft *)NULL;
767 return i;
768 }
769 }
770
771 /* decompress until an end-of-block code */
772 return inflate_codes(__G__ G.fixed_tl, G.fixed_td,
773 G.fixed_bl, G.fixed_bd) != 0;
774 }
775
776
777
778 static int inflate_dynamic(__G)
779 __GDEF
780 /* decompress an inflated type 2 (dynamic Huffman codes) block. */
781 {
782 int i; /* temporary variables */
783 unsigned j;
784 unsigned l; /* last length */
785 unsigned m; /* mask for bit lengths table */
786 unsigned n; /* number of lengths to get */
787 struct huft *tl; /* literal/length code table */
788 struct huft *td; /* distance code table */
789 int bl; /* lookup bits for tl */
790 int bd; /* lookup bits for td */
791 unsigned nb; /* number of bit length codes */
792 unsigned nl; /* number of literal/length codes */
793 unsigned nd; /* number of distance codes */
794 #ifdef PKZIP_BUG_WORKAROUND
795 unsigned ll[288+32]; /* literal/length and distance code lengths */
796 #else
797 unsigned ll[286+30]; /* literal/length and distance code lengths */
798 #endif
799 register ulg b; /* bit buffer */
800 register unsigned k; /* number of bits in bit buffer */
801
802
803 /* make local bit buffer */
804 Trace((stderr, "\ndynamic block"));
805 b = G.bb;
806 k = G.bk;
807
808
809 /* read in table lengths */
810 NEEDBITS(5)
811 nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */
812 DUMPBITS(5)
813 NEEDBITS(5)
814 nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */
815 DUMPBITS(5)
816 NEEDBITS(4)
817 nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */
818 DUMPBITS(4)
819 #ifdef PKZIP_BUG_WORKAROUND
820 if (nl > 288 || nd > 32)
821 #else
822 if (nl > 286 || nd > 30)
823 #endif
824 return 1; /* bad lengths */
825
826
827 /* read in bit-length-code lengths */
828 for (j = 0; j < nb; j++)
829 {
830 NEEDBITS(3)
831 ll[border[j]] = (unsigned)b & 7;
832 DUMPBITS(3)
833 }
834 for (; j < 19; j++)
835 ll[border[j]] = 0;
836
837
838 /* build decoding table for trees--single level, 7 bit lookup */
839 bl = 7;
840 i = huft_build(__G__ ll, 19, 19, NULL, NULL, &tl, &bl);
841 if (bl == 0) /* no bit lengths */
842 i = 1;
843 if (i)
844 {
845 if (i == 1)
846 huft_free(tl);
847 return i; /* incomplete code set */
848 }
849
850
851 /* read in literal and distance code lengths */
852 n = nl + nd;
853 m = mask_bits[bl];
854 i = l = 0;
855 while ((unsigned)i < n)
856 {
857 NEEDBITS((unsigned)bl)
858 j = (td = tl + ((unsigned)b & m))->b;
859 DUMPBITS(j)
860 j = td->v.n;
861 if (j < 16) /* length of code in bits (0..15) */
862 ll[i++] = l = j; /* save last length in l */
863 else if (j == 16) /* repeat last length 3 to 6 times */
864 {
865 NEEDBITS(2)
866 j = 3 + ((unsigned)b & 3);
867 DUMPBITS(2)
868 if ((unsigned)i + j > n)
869 return 1;
870 while (j--)
871 ll[i++] = l;
872 }
873 else if (j == 17) /* 3 to 10 zero length codes */
874 {
875 NEEDBITS(3)
876 j = 3 + ((unsigned)b & 7);
877 DUMPBITS(3)
878 if ((unsigned)i + j > n)
879 return 1;
880 while (j--)
881 ll[i++] = 0;
882 l = 0;
883 }
884 else /* j == 18: 11 to 138 zero length codes */
885 {
886 NEEDBITS(7)
887 j = 11 + ((unsigned)b & 0x7f);
888 DUMPBITS(7)
889 if ((unsigned)i + j > n)
890 return 1;
891 while (j--)
892 ll[i++] = 0;
893 l = 0;
894 }
895 }
896
897
898 /* free decoding table for trees */
899 huft_free(tl);
900
901
902 /* restore the global bit buffer */
903 G.bb = b;
904 G.bk = k;
905
906
907 /* build the decoding tables for literal/length and distance codes */
908 bl = lbits;
909 i = huft_build(__G__ ll, nl, 257, cplens, cplext, &tl, &bl);
910 if (bl == 0) /* no literals or lengths */
911 i = 1;
912 if (i)
913 {
914 if (i == 1) {
915 if (!uO.qflag)
916 MESSAGE((uch *)"(incomplete l-tree) ", 21L, 1);
917 huft_free(tl);
918 }
919 return i; /* incomplete code set */
920 }
921 bd = dbits;
922 i = huft_build(__G__ ll + nl, nd, 0, cpdist, cpdext, &td, &bd);
923 if (bd == 0 && nl > 257) /* lengths but no distances */
924 {
925 if (!uO.qflag)
926 MESSAGE((uch *)"(incomplete d-tree) ", 21L, 1);
927 huft_free(tl);
928 return 1;
929 }
930 if (i == 1) {
931 #ifdef PKZIP_BUG_WORKAROUND
932 i = 0;
933 #else
934 if (!uO.qflag)
935 MESSAGE((uch *)"(incomplete d-tree) ", 21L, 1);
936 huft_free(td);
937 #endif
938 }
939 if (i)
940 {
941 huft_free(tl);
942 return i;
943 }
944
945
946 /* decompress until an end-of-block code */
947 if (inflate_codes(__G__ tl, td, bl, bd))
948 return 1;
949
950
951 /* free the decoding tables, return */
952 huft_free(tl);
953 huft_free(td);
954 return 0;
955 }
956
957
958
959 static int inflate_block(__G__ e)
960 __GDEF
961 int *e; /* last block flag */
962 /* decompress an inflated block */
963 {
964 unsigned t; /* block type */
965 register ulg b; /* bit buffer */
966 register unsigned k; /* number of bits in bit buffer */
967
968
969 /* make local bit buffer */
970 b = G.bb;
971 k = G.bk;
972
973
974 /* read in last block bit */
975 NEEDBITS(1)
976 *e = (int)b & 1;
977 DUMPBITS(1)
978
979
980 /* read in block type */
981 NEEDBITS(2)
982 t = (unsigned)b & 3;
983 DUMPBITS(2)
984
985
986 /* restore the global bit buffer */
987 G.bb = b;
988 G.bk = k;
989
990
991 /* inflate that block type */
992 if (t == 2)
993 return inflate_dynamic(__G);
994 if (t == 0)
995 return inflate_stored(__G);
996 if (t == 1)
997 return inflate_fixed(__G);
998
999
1000 /* bad block type */
1001 return 2;
1002 }
1003
1004
1005
1006 int inflate(__G)
1007 __GDEF
1008 /* decompress an inflated entry */
1009 {
1010 int e; /* last block flag */
1011 int r; /* result code */
1012 #ifdef DEBUG
1013 unsigned h = 0; /* maximum struct huft's malloc'ed */
1014 #endif
1015
1016 #if (defined(DLL) && !defined(NO_SLIDE_REDIR))
1017 if (G.redirect_slide)
1018 wsize = G.redirect_size, redirSlide = G.redirect_buffer;
1019 else
1020 wsize = WSIZE, redirSlide = slide; /* how they're #defined if !DLL */
1021 #endif
1022
1023 /* initialize window, bit buffer */
1024 G.wp = 0;
1025 G.bk = 0;
1026 G.bb = 0;
1027
1028
1029 /* decompress until the last block */
1030 do {
1031 #ifdef DEBUG
1032 G.hufts = 0;
1033 #endif
1034 if ((r = inflate_block(__G__ &e)) != 0)
1035 return r;
1036 #ifdef DEBUG
1037 if (G.hufts > h)
1038 h = G.hufts;
1039 #endif
1040 } while (!e);
1041
1042
1043 /* flush out redirSlide */
1044 FLUSH(G.wp);
1045
1046
1047 /* return success */
1048 Trace((stderr, "\n%u bytes in Huffman tables (%d/entry)\n",
1049 h * sizeof(struct huft), sizeof(struct huft)));
1050 return 0;
1051 }
1052
1053
1054
1055 int inflate_free(__G)
1056 __GDEF
1057 {
1058 if (G.fixed_tl != (struct huft *)NULL)
1059 {
1060 huft_free(G.fixed_td);
1061 huft_free(G.fixed_tl);
1062 G.fixed_td = G.fixed_tl = (struct huft *)NULL;
1063 }
1064 return 0;
1065 }
1066
1067 #endif /* ?USE_ZLIB */
1068
1069
1070 /*
1071 * GRR: moved huft_build() and huft_free() down here; used by explode()
1072 * and fUnZip regardless of whether USE_ZLIB defined or not
1073 */
1074
1075
1076 /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
1077 #define BMAX 16 /* maximum bit length of any code (16 for explode) */
1078 #define N_MAX 288 /* maximum number of codes in any set */
1079
1080
1081 int huft_build(__G__ b, n, s, d, e, t, m)
1082 __GDEF
1083 ZCONST unsigned *b; /* code lengths in bits (all assumed <= BMAX) */
1084 unsigned n; /* number of codes (assumed <= N_MAX) */
1085 unsigned s; /* number of simple-valued codes (0..s-1) */
1086 ZCONST ush *d; /* list of base values for non-simple codes */
1087 ZCONST ush *e; /* list of extra bits for non-simple codes */
1088 struct huft **t; /* result: starting table */
1089 int *m; /* maximum lookup bits, returns actual */
1090 /* Given a list of code lengths and a maximum table size, make a set of
1091 tables to decode that set of codes. Return zero on success, one if
1092 the given code set is incomplete (the tables are still built in this
1093 case), two if the input is invalid (all zero length codes or an
1094 oversubscribed set of lengths), and three if not enough memory.
1095 The code with value 256 is special, and the tables are constructed
1096 so that no bits beyond that code are fetched when that code is
1097 decoded. */
1098 {
1099 unsigned a; /* counter for codes of length k */
1100 unsigned c[BMAX+1]; /* bit length count table */
1101 unsigned el; /* length of EOB code (value 256) */
1102 unsigned f; /* i repeats in table every f entries */
1103 int g; /* maximum code length */
1104 int h; /* table level */
1105 register unsigned i; /* counter, current code */
1106 register unsigned j; /* counter */
1107 register int k; /* number of bits in current code */
1108 int lx[BMAX+1]; /* memory for l[-1..BMAX-1] */
1109 int *l = lx+1; /* stack of bits per table */
1110 register unsigned *p; /* pointer into c[], b[], or v[] */
1111 register struct huft *q; /* points to current table */
1112 struct huft r; /* table entry for structure assignment */
1113 struct huft *u[BMAX]; /* table stack */
1114 unsigned v[N_MAX]; /* values in order of bit length */
1115 register int w; /* bits before this table == (l * h) */
1116 unsigned x[BMAX+1]; /* bit offsets, then code stack */
1117 unsigned *xp; /* pointer into x */
1118 int y; /* number of dummy codes added */
1119 unsigned z; /* number of entries in current table */
1120
1121
1122 /* Generate counts for each bit length */
1123 el = n > 256 ? b[256] : BMAX; /* set length of EOB code, if any */
1124 memzero((char *)c, sizeof(c));
1125 p = (unsigned *)b; i = n;
1126 do {
1127 c[*p]++; p++; /* assume all entries <= BMAX */
1128 } while (--i);
1129 if (c[0] == n) /* null input--all zero length codes */
1130 {
1131 *t = (struct huft *)NULL;
1132 *m = 0;
1133 return 0;
1134 }
1135
1136
1137 /* Find minimum and maximum length, bound *m by those */
1138 for (j = 1; j <= BMAX; j++)
1139 if (c[j])
1140 break;
1141 k = j; /* minimum code length */
1142 if ((unsigned)*m < j)
1143 *m = j;
1144 for (i = BMAX; i; i--)
1145 if (c[i])
1146 break;
1147 g = i; /* maximum code length */
1148 if ((unsigned)*m > i)
1149 *m = i;
1150
1151
1152 /* Adjust last length count to fill out codes, if needed */
1153 for (y = 1 << j; j < i; j++, y <<= 1)
1154 if ((y -= c[j]) < 0)
1155 return 2; /* bad input: more codes than bits */
1156 if ((y -= c[i]) < 0)
1157 return 2;
1158 c[i] += y;
1159
1160
1161 /* Generate starting offsets into the value table for each length */
1162 x[1] = j = 0;
1163 p = c + 1; xp = x + 2;
1164 while (--i) { /* note that i == g from above */
1165 *xp++ = (j += *p++);
1166 }
1167
1168
1169 /* Make a table of values in order of bit lengths */
1170 memzero((char *)v, sizeof(v));
1171 p = (unsigned *)b; i = 0;
1172 do {
1173 if ((j = *p++) != 0)
1174 v[x[j]++] = i;
1175 } while (++i < n);
1176 n = x[g]; /* set n to length of v */
1177
1178
1179 /* Generate the Huffman codes and for each, make the table entries */
1180 x[0] = i = 0; /* first Huffman code is zero */
1181 p = v; /* grab values in bit order */
1182 h = -1; /* no tables yet--level -1 */
1183 w = l[-1] = 0; /* no bits decoded yet */
1184 u[0] = (struct huft *)NULL; /* just to keep compilers happy */
1185 q = (struct huft *)NULL; /* ditto */
1186 z = 0; /* ditto */
1187
1188 /* go through the bit lengths (k already is bits in shortest code) */
1189 for (; k <= g; k++)
1190 {
1191 a = c[k];
1192 while (a--)
1193 {
1194 /* here i is the Huffman code of length k bits for value *p */
1195 /* make tables up to required level */
1196 while (k > w + l[h])
1197 {
1198 w += l[h++]; /* add bits already decoded */
1199
1200 /* compute minimum size table less than or equal to *m bits */
1201 z = (z = g - w) > (unsigned)*m ? *m : z; /* upper limit */
1202 if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
1203 { /* too few codes for k-w bit table */
1204 f -= a + 1; /* deduct codes from patterns left */
1205 xp = c + k;
1206 while (++j < z) /* try smaller tables up to z bits */
1207 {
1208 if ((f <<= 1) <= *++xp)
1209 break; /* enough codes to use up j bits */
1210 f -= *xp; /* else deduct codes from patterns */
1211 }
1212 }
1213 if ((unsigned)w + j > el && (unsigned)w < el)
1214 j = el - w; /* make EOB code end at table */
1215 z = 1 << j; /* table entries for j-bit table */
1216 l[h] = j; /* set table size in stack */
1217
1218 /* allocate and link in new table */
1219 if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
1220 (struct huft *)NULL)
1221 {
1222 if (h)
1223 huft_free(u[0]);
1224 return 3; /* not enough memory */
1225 }
1226 #ifdef DEBUG
1227 G.hufts += z + 1; /* track memory usage */
1228 #endif
1229 *t = q + 1; /* link to list for huft_free() */
1230 *(t = &(q->v.t)) = (struct huft *)NULL;
1231 u[h] = ++q; /* table starts after link */
1232
1233 /* connect to last table, if there is one */
1234 if (h)
1235 {
1236 x[h] = i; /* save pattern for backing up */
1237 r.b = (uch)l[h-1]; /* bits to dump before this table */
1238 r.e = (uch)(16 + j); /* bits in this table */
1239 r.v.t = q; /* pointer to this table */
1240 j = (i & ((1 << w) - 1)) >> (w - l[h-1]);
1241 u[h-1][j] = r; /* connect to last table */
1242 }
1243 }
1244
1245 /* set up table entry in r */
1246 r.b = (uch)(k - w);
1247 if (p >= v + n)
1248 r.e = 99; /* out of values--invalid code */
1249 else if (*p < s)
1250 {
1251 r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */
1252 r.v.n = (ush)*p++; /* simple code is just the value */
1253 }
1254 else
1255 {
1256 r.e = (uch)e[*p - s]; /* non-simple--look up in lists */
1257 r.v.n = d[*p++ - s];
1258 }
1259
1260 /* fill code-like entries with r */
1261 f = 1 << (k - w);
1262 for (j = i >> w; j < z; j += f)
1263 q[j] = r;
1264
1265 /* backwards increment the k-bit code i */
1266 for (j = 1 << (k - 1); i & j; j >>= 1)
1267 i ^= j;
1268 i ^= j;
1269
1270 /* backup over finished tables */
1271 while ((i & ((1 << w) - 1)) != x[h])
1272 w -= l[--h]; /* don't need to update q */
1273 }
1274 }
1275
1276
1277 /* return actual size of base table */
1278 *m = l[0];
1279
1280
1281 /* Return true (1) if we were given an incomplete table */
1282 return y != 0 && g != 1;
1283 }
1284
1285
1286
1287 int huft_free(t)
1288 struct huft *t; /* table to free */
1289 /* Free the malloc'ed tables built by huft_build(), which makes a linked
1290 list of the tables it made, with the links in a dummy first entry of
1291 each table. */
1292 {
1293 register struct huft *p, *q;
1294
1295
1296 /* Go through linked list, freeing from the malloced (t[-1]) address. */
1297 p = t;
1298 while (p != (struct huft *)NULL)
1299 {
1300 q = (--p)->v.t;
1301 free((zvoid *)p);
1302 p = q;
1303 }
1304 return 0;
1305 }