]>
Commit | Line | Data |
---|---|---|
f6bcfd97 BP |
1 | /* explode.c -- put in the public domain by Mark Adler |
2 | version c15, 6 July 1996 */ | |
3 | ||
4 | ||
5 | /* You can do whatever you like with this source file, though I would | |
6 | prefer that if you modify it and redistribute it that you include | |
7 | comments to that effect with your name and the date. Thank you. | |
8 | ||
9 | History: | |
10 | vers date who what | |
11 | ---- --------- -------------- ------------------------------------ | |
12 | c1 30 Mar 92 M. Adler explode that uses huft_build from inflate | |
13 | (this gives over a 70% speed improvement | |
14 | over the original unimplode.c, which | |
15 | decoded a bit at a time) | |
16 | c2 4 Apr 92 M. Adler fixed bug for file sizes a multiple of 32k. | |
17 | c3 10 Apr 92 M. Adler added a little memory tracking if DEBUG | |
18 | c4 11 Apr 92 M. Adler added NOMEMCPY do kill use of memcpy() | |
19 | c5 21 Apr 92 M. Adler added the WSIZE #define to allow reducing | |
20 | the 32K window size for specialized | |
21 | applications. | |
22 | c6 31 May 92 M. Adler added typecasts to eliminate some warnings | |
23 | c7 27 Jun 92 G. Roelofs added more typecasts. | |
24 | c8 17 Oct 92 G. Roelofs changed ULONG/UWORD/byte to ulg/ush/uch. | |
25 | c9 19 Jul 93 J. Bush added more typecasts (to return values); | |
26 | made l[256] array static for Amiga. | |
27 | c10 8 Oct 93 G. Roelofs added used_csize for diagnostics; added | |
28 | buf and unshrink arguments to flush(); | |
29 | undef'd various macros at end for Turbo C; | |
30 | removed NEXTBYTE macro (now in unzip.h) | |
31 | and bytebuf variable (not used); changed | |
32 | memset() to memzero(). | |
33 | c11 9 Jan 94 M. Adler fixed incorrect used_csize calculation. | |
34 | c12 9 Apr 94 G. Roelofs fixed split comments on preprocessor lines | |
35 | to avoid bug in Encore compiler. | |
36 | c13 25 Aug 94 M. Adler fixed distance-length comment (orig c9 fix) | |
37 | c14 22 Nov 95 S. Maxwell removed unnecessary "static" on auto array | |
38 | c15 6 Jul 96 W. Haidinger added ulg typecasts to flush() calls. | |
39 | c16 8 Feb 98 C. Spieler added ZCONST modifiers to const tables | |
40 | and #ifdef DEBUG around debugging code. | |
41 | c16b 25 Mar 98 C. Spieler modified DLL code for slide redirection. | |
42 | */ | |
43 | ||
44 | ||
45 | /* | |
46 | Explode imploded (PKZIP method 6 compressed) data. This compression | |
47 | method searches for as much of the current string of bytes (up to a length | |
48 | of ~320) in the previous 4K or 8K bytes. If it doesn't find any matches | |
49 | (of at least length 2 or 3), it codes the next byte. Otherwise, it codes | |
50 | the length of the matched string and its distance backwards from the | |
51 | current position. Single bytes ("literals") are preceded by a one (a | |
52 | single bit) and are either uncoded (the eight bits go directly into the | |
53 | compressed stream for a total of nine bits) or Huffman coded with a | |
54 | supplied literal code tree. If literals are coded, then the minimum match | |
55 | length is three, otherwise it is two. | |
56 | ||
57 | There are therefore four kinds of imploded streams: 8K search with coded | |
58 | literals (min match = 3), 4K search with coded literals (min match = 3), | |
59 | 8K with uncoded literals (min match = 2), and 4K with uncoded literals | |
60 | (min match = 2). The kind of stream is identified in two bits of a | |
61 | general purpose bit flag that is outside of the compressed stream. | |
62 | ||
63 | Distance-length pairs for matched strings are preceded by a zero bit (to | |
64 | distinguish them from literals) and are always coded. The distance comes | |
65 | first and is either the low six (4K) or low seven (8K) bits of the | |
66 | distance (uncoded), followed by the high six bits of the distance coded. | |
67 | Then the length is six bits coded (0..63 + min match length), and if the | |
68 | maximum such length is coded, then it's followed by another eight bits | |
69 | (uncoded) to be added to the coded length. This gives a match length | |
70 | range of 2..320 or 3..321 bytes. | |
71 | ||
72 | The literal, length, and distance codes are all represented in a slightly | |
73 | compressed form themselves. What is sent are the lengths of the codes for | |
74 | each value, which is sufficient to construct the codes. Each byte of the | |
75 | code representation is the code length (the low four bits representing | |
76 | 1..16), and the number of values sequentially with that length (the high | |
77 | four bits also representing 1..16). There are 256 literal code values (if | |
78 | literals are coded), 64 length code values, and 64 distance code values, | |
79 | in that order at the beginning of the compressed stream. Each set of code | |
80 | values is preceded (redundantly) with a byte indicating how many bytes are | |
81 | in the code description that follows, in the range 1..256. | |
82 | ||
83 | The codes themselves are decoded using tables made by huft_build() from | |
84 | the bit lengths. That routine and its comments are in the inflate.c | |
85 | module. | |
86 | */ | |
87 | ||
88 | #define UNZIP_INTERNAL | |
89 | #include "unzip.h" /* must supply slide[] (uch) array and NEXTBYTE macro */ | |
90 | ||
91 | #ifndef WSIZE | |
92 | # define WSIZE 0x8000 /* window size--must be a power of two, and */ | |
93 | #endif /* at least 8K for zip's implode method */ | |
94 | ||
95 | #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) | |
96 | # define wsize G._wsize | |
97 | #else | |
98 | # define wsize WSIZE | |
99 | #endif | |
100 | ||
101 | /* routines here */ | |
102 | static int get_tree OF((__GPRO__ unsigned *l, unsigned n)); | |
103 | static int explode_lit8 OF((__GPRO__ struct huft *tb, struct huft *tl, | |
104 | struct huft *td, int bb, int bl, int bd)); | |
105 | static int explode_lit4 OF((__GPRO__ struct huft *tb, struct huft *tl, | |
106 | struct huft *td, int bb, int bl, int bd)); | |
107 | static int explode_nolit8 OF((__GPRO__ struct huft *tl, struct huft *td, | |
108 | int bl, int bd)); | |
109 | static int explode_nolit4 OF((__GPRO__ struct huft *tl, struct huft *td, | |
110 | int bl, int bd)); | |
111 | int explode OF((__GPRO)); | |
112 | ||
113 | ||
114 | /* The implode algorithm uses a sliding 4K or 8K byte window on the | |
115 | uncompressed stream to find repeated byte strings. This is implemented | |
116 | here as a circular buffer. The index is updated simply by incrementing | |
117 | and then and'ing with 0x0fff (4K-1) or 0x1fff (8K-1). Here, the 32K | |
118 | buffer of inflate is used, and it works just as well to always have | |
119 | a 32K circular buffer, so the index is anded with 0x7fff. This is | |
120 | done to allow the window to also be used as the output buffer. */ | |
121 | /* This must be supplied in an external module useable like "uch slide[8192];" | |
122 | or "uch *slide;", where the latter would be malloc'ed. In unzip, slide[] | |
123 | is actually a 32K area for use by inflate, which uses a 32K sliding window. | |
124 | */ | |
125 | ||
126 | ||
127 | /* Tables for length and distance */ | |
128 | static ZCONST ush cplen2[] = | |
129 | {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, | |
130 | 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, | |
131 | 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, | |
132 | 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65}; | |
133 | static ZCONST ush cplen3[] = | |
134 | {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, | |
135 | 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, | |
136 | 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, | |
137 | 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66}; | |
138 | static ZCONST ush extra[] = | |
139 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
140 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
141 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
142 | 8}; | |
143 | static ZCONST ush cpdist4[] = | |
144 | {1, 65, 129, 193, 257, 321, 385, 449, 513, 577, 641, 705, | |
145 | 769, 833, 897, 961, 1025, 1089, 1153, 1217, 1281, 1345, 1409, 1473, | |
146 | 1537, 1601, 1665, 1729, 1793, 1857, 1921, 1985, 2049, 2113, 2177, | |
147 | 2241, 2305, 2369, 2433, 2497, 2561, 2625, 2689, 2753, 2817, 2881, | |
148 | 2945, 3009, 3073, 3137, 3201, 3265, 3329, 3393, 3457, 3521, 3585, | |
149 | 3649, 3713, 3777, 3841, 3905, 3969, 4033}; | |
150 | static ZCONST ush cpdist8[] = | |
151 | {1, 129, 257, 385, 513, 641, 769, 897, 1025, 1153, 1281, | |
152 | 1409, 1537, 1665, 1793, 1921, 2049, 2177, 2305, 2433, 2561, 2689, | |
153 | 2817, 2945, 3073, 3201, 3329, 3457, 3585, 3713, 3841, 3969, 4097, | |
154 | 4225, 4353, 4481, 4609, 4737, 4865, 4993, 5121, 5249, 5377, 5505, | |
155 | 5633, 5761, 5889, 6017, 6145, 6273, 6401, 6529, 6657, 6785, 6913, | |
156 | 7041, 7169, 7297, 7425, 7553, 7681, 7809, 7937, 8065}; | |
157 | ||
158 | ||
159 | /* Macros for inflate() bit peeking and grabbing. | |
160 | The usage is: | |
161 | ||
162 | NEEDBITS(j) | |
163 | x = b & mask_bits[j]; | |
164 | DUMPBITS(j) | |
165 | ||
166 | where NEEDBITS makes sure that b has at least j bits in it, and | |
167 | DUMPBITS removes the bits from b. The macros use the variable k | |
168 | for the number of bits in b. Normally, b and k are register | |
169 | variables for speed. | |
170 | */ | |
171 | ||
172 | #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE)<<k;k+=8;}} | |
173 | #define DUMPBITS(n) {b>>=(n);k-=(n);} | |
174 | ||
175 | ||
176 | ||
177 | static int get_tree(__G__ l, n) | |
178 | __GDEF | |
179 | unsigned *l; /* bit lengths */ | |
180 | unsigned n; /* number expected */ | |
181 | /* Get the bit lengths for a code representation from the compressed | |
182 | stream. If get_tree() returns 4, then there is an error in the data. | |
183 | Otherwise zero is returned. */ | |
184 | { | |
185 | unsigned i; /* bytes remaining in list */ | |
186 | unsigned k; /* lengths entered */ | |
187 | unsigned j; /* number of codes */ | |
188 | unsigned b; /* bit length for those codes */ | |
189 | ||
190 | ||
191 | /* get bit lengths */ | |
192 | i = NEXTBYTE + 1; /* length/count pairs to read */ | |
193 | k = 0; /* next code */ | |
194 | do { | |
195 | b = ((j = NEXTBYTE) & 0xf) + 1; /* bits in code (1..16) */ | |
196 | j = ((j & 0xf0) >> 4) + 1; /* codes with those bits (1..16) */ | |
197 | if (k + j > n) | |
198 | return 4; /* don't overflow l[] */ | |
199 | do { | |
200 | l[k++] = b; | |
201 | } while (--j); | |
202 | } while (--i); | |
203 | return k != n ? 4 : 0; /* should have read n of them */ | |
204 | } | |
205 | ||
206 | ||
207 | ||
208 | static int explode_lit8(__G__ tb, tl, td, bb, bl, bd) | |
209 | __GDEF | |
210 | struct huft *tb, *tl, *td; /* literal, length, and distance tables */ | |
211 | int bb, bl, bd; /* number of bits decoded by those */ | |
212 | /* Decompress the imploded data using coded literals and an 8K sliding | |
213 | window. */ | |
214 | { | |
215 | long s; /* bytes to decompress */ | |
216 | register unsigned e; /* table entry flag/number of extra bits */ | |
217 | unsigned n, d; /* length and index for copy */ | |
218 | unsigned w; /* current window position */ | |
219 | struct huft *t; /* pointer to table entry */ | |
220 | unsigned mb, ml, md; /* masks for bb, bl, and bd bits */ | |
221 | register ulg b; /* bit buffer */ | |
222 | register unsigned k; /* number of bits in bit buffer */ | |
223 | unsigned u; /* true if unflushed */ | |
224 | ||
225 | ||
226 | /* explode the coded data */ | |
227 | b = k = w = 0; /* initialize bit buffer, window */ | |
228 | u = 1; /* buffer unflushed */ | |
229 | mb = mask_bits[bb]; /* precompute masks for speed */ | |
230 | ml = mask_bits[bl]; | |
231 | md = mask_bits[bd]; | |
232 | s = G.ucsize; | |
233 | while (s > 0) /* do until ucsize bytes uncompressed */ | |
234 | { | |
235 | NEEDBITS(1) | |
236 | if (b & 1) /* then literal--decode it */ | |
237 | { | |
238 | DUMPBITS(1) | |
239 | s--; | |
240 | NEEDBITS((unsigned)bb) /* get coded literal */ | |
241 | if ((e = (t = tb + ((~(unsigned)b) & mb))->e) > 16) | |
242 | do { | |
243 | if (e == 99) | |
244 | return 1; | |
245 | DUMPBITS(t->b) | |
246 | e -= 16; | |
247 | NEEDBITS(e) | |
248 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
249 | DUMPBITS(t->b) | |
250 | redirSlide[w++] = (uch)t->v.n; | |
251 | if (w == wsize) | |
252 | { | |
253 | flush(__G__ redirSlide, (ulg)w, 0); | |
254 | w = u = 0; | |
255 | } | |
256 | } | |
257 | else /* else distance/length */ | |
258 | { | |
259 | DUMPBITS(1) | |
260 | NEEDBITS(7) /* get distance low bits */ | |
261 | d = (unsigned)b & 0x7f; | |
262 | DUMPBITS(7) | |
263 | NEEDBITS((unsigned)bd) /* get coded distance high bits */ | |
264 | if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16) | |
265 | do { | |
266 | if (e == 99) | |
267 | return 1; | |
268 | DUMPBITS(t->b) | |
269 | e -= 16; | |
270 | NEEDBITS(e) | |
271 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
272 | DUMPBITS(t->b) | |
273 | d = w - d - t->v.n; /* construct offset */ | |
274 | NEEDBITS((unsigned)bl) /* get coded length */ | |
275 | if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16) | |
276 | do { | |
277 | if (e == 99) | |
278 | return 1; | |
279 | DUMPBITS(t->b) | |
280 | e -= 16; | |
281 | NEEDBITS(e) | |
282 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
283 | DUMPBITS(t->b) | |
284 | n = t->v.n; | |
285 | if (e) /* get length extra bits */ | |
286 | { | |
287 | NEEDBITS(8) | |
288 | n += (unsigned)b & 0xff; | |
289 | DUMPBITS(8) | |
290 | } | |
291 | ||
292 | /* do the copy */ | |
293 | s -= n; | |
294 | do { | |
295 | #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) | |
296 | if (G.redirect_slide) { | |
297 | /* &= w/ wsize not needed and wrong if redirect */ | |
298 | if (d >= wsize) | |
299 | return 1; | |
300 | n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e); | |
301 | } else | |
302 | #endif | |
303 | n -= (e = (e = wsize - ((d &= wsize-1) > w ? d : w)) > n ? n : e); | |
304 | if (u && w <= d) | |
305 | { | |
306 | memzero(redirSlide + w, e); | |
307 | w += e; | |
308 | d += e; | |
309 | } | |
310 | else | |
311 | #ifndef NOMEMCPY | |
312 | if (w - d >= e) /* (this test assumes unsigned comparison) */ | |
313 | { | |
314 | memcpy(redirSlide + w, redirSlide + d, e); | |
315 | w += e; | |
316 | d += e; | |
317 | } | |
318 | else /* do it slow to avoid memcpy() overlap */ | |
319 | #endif /* !NOMEMCPY */ | |
320 | do { | |
321 | redirSlide[w++] = redirSlide[d++]; | |
322 | } while (--e); | |
323 | if (w == wsize) | |
324 | { | |
325 | flush(__G__ redirSlide, (ulg)w, 0); | |
326 | w = u = 0; | |
327 | } | |
328 | } while (n); | |
329 | } | |
330 | } | |
331 | ||
332 | /* flush out redirSlide */ | |
333 | flush(__G__ redirSlide, (ulg)w, 0); | |
334 | if (G.csize + G.incnt + (k >> 3)) /* should have read csize bytes, but */ | |
335 | { /* sometimes read one too many: k>>3 compensates */ | |
336 | G.used_csize = G.lrec.csize - G.csize - G.incnt - (k >> 3); | |
337 | return 5; | |
338 | } | |
339 | return 0; | |
340 | } | |
341 | ||
342 | ||
343 | ||
344 | static int explode_lit4(__G__ tb, tl, td, bb, bl, bd) | |
345 | __GDEF | |
346 | struct huft *tb, *tl, *td; /* literal, length, and distance tables */ | |
347 | int bb, bl, bd; /* number of bits decoded by those */ | |
348 | /* Decompress the imploded data using coded literals and a 4K sliding | |
349 | window. */ | |
350 | { | |
351 | long s; /* bytes to decompress */ | |
352 | register unsigned e; /* table entry flag/number of extra bits */ | |
353 | unsigned n, d; /* length and index for copy */ | |
354 | unsigned w; /* current window position */ | |
355 | struct huft *t; /* pointer to table entry */ | |
356 | unsigned mb, ml, md; /* masks for bb, bl, and bd bits */ | |
357 | register ulg b; /* bit buffer */ | |
358 | register unsigned k; /* number of bits in bit buffer */ | |
359 | unsigned u; /* true if unflushed */ | |
360 | ||
361 | ||
362 | /* explode the coded data */ | |
363 | b = k = w = 0; /* initialize bit buffer, window */ | |
364 | u = 1; /* buffer unflushed */ | |
365 | mb = mask_bits[bb]; /* precompute masks for speed */ | |
366 | ml = mask_bits[bl]; | |
367 | md = mask_bits[bd]; | |
368 | s = G.ucsize; | |
369 | while (s > 0) /* do until ucsize bytes uncompressed */ | |
370 | { | |
371 | NEEDBITS(1) | |
372 | if (b & 1) /* then literal--decode it */ | |
373 | { | |
374 | DUMPBITS(1) | |
375 | s--; | |
376 | NEEDBITS((unsigned)bb) /* get coded literal */ | |
377 | if ((e = (t = tb + ((~(unsigned)b) & mb))->e) > 16) | |
378 | do { | |
379 | if (e == 99) | |
380 | return 1; | |
381 | DUMPBITS(t->b) | |
382 | e -= 16; | |
383 | NEEDBITS(e) | |
384 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
385 | DUMPBITS(t->b) | |
386 | redirSlide[w++] = (uch)t->v.n; | |
387 | if (w == wsize) | |
388 | { | |
389 | flush(__G__ redirSlide, (ulg)w, 0); | |
390 | w = u = 0; | |
391 | } | |
392 | } | |
393 | else /* else distance/length */ | |
394 | { | |
395 | DUMPBITS(1) | |
396 | NEEDBITS(6) /* get distance low bits */ | |
397 | d = (unsigned)b & 0x3f; | |
398 | DUMPBITS(6) | |
399 | NEEDBITS((unsigned)bd) /* get coded distance high bits */ | |
400 | if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16) | |
401 | do { | |
402 | if (e == 99) | |
403 | return 1; | |
404 | DUMPBITS(t->b) | |
405 | e -= 16; | |
406 | NEEDBITS(e) | |
407 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
408 | DUMPBITS(t->b) | |
409 | d = w - d - t->v.n; /* construct offset */ | |
410 | NEEDBITS((unsigned)bl) /* get coded length */ | |
411 | if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16) | |
412 | do { | |
413 | if (e == 99) | |
414 | return 1; | |
415 | DUMPBITS(t->b) | |
416 | e -= 16; | |
417 | NEEDBITS(e) | |
418 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
419 | DUMPBITS(t->b) | |
420 | n = t->v.n; | |
421 | if (e) /* get length extra bits */ | |
422 | { | |
423 | NEEDBITS(8) | |
424 | n += (unsigned)b & 0xff; | |
425 | DUMPBITS(8) | |
426 | } | |
427 | ||
428 | /* do the copy */ | |
429 | s -= n; | |
430 | do { | |
431 | #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) | |
432 | if (G.redirect_slide) { | |
433 | /* &= w/ wsize not needed and wrong if redirect */ | |
434 | if (d >= wsize) | |
435 | return 1; | |
436 | n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e); | |
437 | } else | |
438 | #endif | |
439 | n -= (e = (e = wsize - ((d &= wsize-1) > w ? d : w)) > n ? n : e); | |
440 | if (u && w <= d) | |
441 | { | |
442 | memzero(redirSlide + w, e); | |
443 | w += e; | |
444 | d += e; | |
445 | } | |
446 | else | |
447 | #ifndef NOMEMCPY | |
448 | if (w - d >= e) /* (this test assumes unsigned comparison) */ | |
449 | { | |
450 | memcpy(redirSlide + w, redirSlide + d, e); | |
451 | w += e; | |
452 | d += e; | |
453 | } | |
454 | else /* do it slow to avoid memcpy() overlap */ | |
455 | #endif /* !NOMEMCPY */ | |
456 | do { | |
457 | redirSlide[w++] = redirSlide[d++]; | |
458 | } while (--e); | |
459 | if (w == wsize) | |
460 | { | |
461 | flush(__G__ redirSlide, (ulg)w, 0); | |
462 | w = u = 0; | |
463 | } | |
464 | } while (n); | |
465 | } | |
466 | } | |
467 | ||
468 | /* flush out redirSlide */ | |
469 | flush(__G__ redirSlide, (ulg)w, 0); | |
470 | if (G.csize + G.incnt + (k >> 3)) /* should have read csize bytes, but */ | |
471 | { /* sometimes read one too many: k>>3 compensates */ | |
472 | G.used_csize = G.lrec.csize - G.csize - G.incnt - (k >> 3); | |
473 | return 5; | |
474 | } | |
475 | return 0; | |
476 | } | |
477 | ||
478 | ||
479 | ||
480 | static int explode_nolit8(__G__ tl, td, bl, bd) | |
481 | __GDEF | |
482 | struct huft *tl, *td; /* length and distance decoder tables */ | |
483 | int bl, bd; /* number of bits decoded by tl[] and td[] */ | |
484 | /* Decompress the imploded data using uncoded literals and an 8K sliding | |
485 | window. */ | |
486 | { | |
487 | long s; /* bytes to decompress */ | |
488 | register unsigned e; /* table entry flag/number of extra bits */ | |
489 | unsigned n, d; /* length and index for copy */ | |
490 | unsigned w; /* current window position */ | |
491 | struct huft *t; /* pointer to table entry */ | |
492 | unsigned ml, md; /* masks for bl and bd bits */ | |
493 | register ulg b; /* bit buffer */ | |
494 | register unsigned k; /* number of bits in bit buffer */ | |
495 | unsigned u; /* true if unflushed */ | |
496 | ||
497 | ||
498 | /* explode the coded data */ | |
499 | b = k = w = 0; /* initialize bit buffer, window */ | |
500 | u = 1; /* buffer unflushed */ | |
501 | ml = mask_bits[bl]; /* precompute masks for speed */ | |
502 | md = mask_bits[bd]; | |
503 | s = G.ucsize; | |
504 | while (s > 0) /* do until ucsize bytes uncompressed */ | |
505 | { | |
506 | NEEDBITS(1) | |
507 | if (b & 1) /* then literal--get eight bits */ | |
508 | { | |
509 | DUMPBITS(1) | |
510 | s--; | |
511 | NEEDBITS(8) | |
512 | redirSlide[w++] = (uch)b; | |
513 | if (w == wsize) | |
514 | { | |
515 | flush(__G__ redirSlide, (ulg)w, 0); | |
516 | w = u = 0; | |
517 | } | |
518 | DUMPBITS(8) | |
519 | } | |
520 | else /* else distance/length */ | |
521 | { | |
522 | DUMPBITS(1) | |
523 | NEEDBITS(7) /* get distance low bits */ | |
524 | d = (unsigned)b & 0x7f; | |
525 | DUMPBITS(7) | |
526 | NEEDBITS((unsigned)bd) /* get coded distance high bits */ | |
527 | if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16) | |
528 | do { | |
529 | if (e == 99) | |
530 | return 1; | |
531 | DUMPBITS(t->b) | |
532 | e -= 16; | |
533 | NEEDBITS(e) | |
534 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
535 | DUMPBITS(t->b) | |
536 | d = w - d - t->v.n; /* construct offset */ | |
537 | NEEDBITS((unsigned)bl) /* get coded length */ | |
538 | if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16) | |
539 | do { | |
540 | if (e == 99) | |
541 | return 1; | |
542 | DUMPBITS(t->b) | |
543 | e -= 16; | |
544 | NEEDBITS(e) | |
545 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
546 | DUMPBITS(t->b) | |
547 | n = t->v.n; | |
548 | if (e) /* get length extra bits */ | |
549 | { | |
550 | NEEDBITS(8) | |
551 | n += (unsigned)b & 0xff; | |
552 | DUMPBITS(8) | |
553 | } | |
554 | ||
555 | /* do the copy */ | |
556 | s -= n; | |
557 | do { | |
558 | #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) | |
559 | if (G.redirect_slide) { | |
560 | /* &= w/ wsize not needed and wrong if redirect */ | |
561 | if (d >= wsize) | |
562 | return 1; | |
563 | n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e); | |
564 | } else | |
565 | #endif | |
566 | n -= (e = (e = wsize - ((d &= wsize-1) > w ? d : w)) > n ? n : e); | |
567 | if (u && w <= d) | |
568 | { | |
569 | memzero(redirSlide + w, e); | |
570 | w += e; | |
571 | d += e; | |
572 | } | |
573 | else | |
574 | #ifndef NOMEMCPY | |
575 | if (w - d >= e) /* (this test assumes unsigned comparison) */ | |
576 | { | |
577 | memcpy(redirSlide + w, redirSlide + d, e); | |
578 | w += e; | |
579 | d += e; | |
580 | } | |
581 | else /* do it slow to avoid memcpy() overlap */ | |
582 | #endif /* !NOMEMCPY */ | |
583 | do { | |
584 | redirSlide[w++] = redirSlide[d++]; | |
585 | } while (--e); | |
586 | if (w == wsize) | |
587 | { | |
588 | flush(__G__ redirSlide, (ulg)w, 0); | |
589 | w = u = 0; | |
590 | } | |
591 | } while (n); | |
592 | } | |
593 | } | |
594 | ||
595 | /* flush out redirSlide */ | |
596 | flush(__G__ redirSlide, (ulg)w, 0); | |
597 | if (G.csize + G.incnt + (k >> 3)) /* should have read csize bytes, but */ | |
598 | { /* sometimes read one too many: k>>3 compensates */ | |
599 | G.used_csize = G.lrec.csize - G.csize - G.incnt - (k >> 3); | |
600 | return 5; | |
601 | } | |
602 | return 0; | |
603 | } | |
604 | ||
605 | ||
606 | ||
607 | static int explode_nolit4(__G__ tl, td, bl, bd) | |
608 | __GDEF | |
609 | struct huft *tl, *td; /* length and distance decoder tables */ | |
610 | int bl, bd; /* number of bits decoded by tl[] and td[] */ | |
611 | /* Decompress the imploded data using uncoded literals and a 4K sliding | |
612 | window. */ | |
613 | { | |
614 | long s; /* bytes to decompress */ | |
615 | register unsigned e; /* table entry flag/number of extra bits */ | |
616 | unsigned n, d; /* length and index for copy */ | |
617 | unsigned w; /* current window position */ | |
618 | struct huft *t; /* pointer to table entry */ | |
619 | unsigned ml, md; /* masks for bl and bd bits */ | |
620 | register ulg b; /* bit buffer */ | |
621 | register unsigned k; /* number of bits in bit buffer */ | |
622 | unsigned u; /* true if unflushed */ | |
623 | ||
624 | ||
625 | /* explode the coded data */ | |
626 | b = k = w = 0; /* initialize bit buffer, window */ | |
627 | u = 1; /* buffer unflushed */ | |
628 | ml = mask_bits[bl]; /* precompute masks for speed */ | |
629 | md = mask_bits[bd]; | |
630 | s = G.ucsize; | |
631 | while (s > 0) /* do until ucsize bytes uncompressed */ | |
632 | { | |
633 | NEEDBITS(1) | |
634 | if (b & 1) /* then literal--get eight bits */ | |
635 | { | |
636 | DUMPBITS(1) | |
637 | s--; | |
638 | NEEDBITS(8) | |
639 | redirSlide[w++] = (uch)b; | |
640 | if (w == wsize) | |
641 | { | |
642 | flush(__G__ redirSlide, (ulg)w, 0); | |
643 | w = u = 0; | |
644 | } | |
645 | DUMPBITS(8) | |
646 | } | |
647 | else /* else distance/length */ | |
648 | { | |
649 | DUMPBITS(1) | |
650 | NEEDBITS(6) /* get distance low bits */ | |
651 | d = (unsigned)b & 0x3f; | |
652 | DUMPBITS(6) | |
653 | NEEDBITS((unsigned)bd) /* get coded distance high bits */ | |
654 | if ((e = (t = td + ((~(unsigned)b) & md))->e) > 16) | |
655 | do { | |
656 | if (e == 99) | |
657 | return 1; | |
658 | DUMPBITS(t->b) | |
659 | e -= 16; | |
660 | NEEDBITS(e) | |
661 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
662 | DUMPBITS(t->b) | |
663 | d = w - d - t->v.n; /* construct offset */ | |
664 | NEEDBITS((unsigned)bl) /* get coded length */ | |
665 | if ((e = (t = tl + ((~(unsigned)b) & ml))->e) > 16) | |
666 | do { | |
667 | if (e == 99) | |
668 | return 1; | |
669 | DUMPBITS(t->b) | |
670 | e -= 16; | |
671 | NEEDBITS(e) | |
672 | } while ((e = (t = t->v.t + ((~(unsigned)b) & mask_bits[e]))->e) > 16); | |
673 | DUMPBITS(t->b) | |
674 | n = t->v.n; | |
675 | if (e) /* get length extra bits */ | |
676 | { | |
677 | NEEDBITS(8) | |
678 | n += (unsigned)b & 0xff; | |
679 | DUMPBITS(8) | |
680 | } | |
681 | ||
682 | /* do the copy */ | |
683 | s -= n; | |
684 | do { | |
685 | #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) | |
686 | if (G.redirect_slide) { | |
687 | /* &= w/ wsize not needed and wrong if redirect */ | |
688 | if (d >= wsize) | |
689 | return 1; | |
690 | n -= (e = (e = wsize - (d > w ? d : w)) > n ? n : e); | |
691 | } else | |
692 | #endif | |
693 | n -= (e = (e = wsize - ((d &= wsize-1) > w ? d : w)) > n ? n : e); | |
694 | if (u && w <= d) | |
695 | { | |
696 | memzero(redirSlide + w, e); | |
697 | w += e; | |
698 | d += e; | |
699 | } | |
700 | else | |
701 | #ifndef NOMEMCPY | |
702 | if (w - d >= e) /* (this test assumes unsigned comparison) */ | |
703 | { | |
704 | memcpy(redirSlide + w, redirSlide + d, e); | |
705 | w += e; | |
706 | d += e; | |
707 | } | |
708 | else /* do it slow to avoid memcpy() overlap */ | |
709 | #endif /* !NOMEMCPY */ | |
710 | do { | |
711 | redirSlide[w++] = redirSlide[d++]; | |
712 | } while (--e); | |
713 | if (w == wsize) | |
714 | { | |
715 | flush(__G__ redirSlide, (ulg)w, 0); | |
716 | w = u = 0; | |
717 | } | |
718 | } while (n); | |
719 | } | |
720 | } | |
721 | ||
722 | /* flush out redirSlide */ | |
723 | flush(__G__ redirSlide, (ulg)w, 0); | |
724 | if (G.csize + G.incnt + (k >> 3)) /* should have read csize bytes, but */ | |
725 | { /* sometimes read one too many: k>>3 compensates */ | |
726 | G.used_csize = G.lrec.csize - G.csize - G.incnt - (k >> 3); | |
727 | return 5; | |
728 | } | |
729 | return 0; | |
730 | } | |
731 | ||
732 | ||
733 | ||
734 | int explode(__G) | |
735 | __GDEF | |
736 | /* Explode an imploded compressed stream. Based on the general purpose | |
737 | bit flag, decide on coded or uncoded literals, and an 8K or 4K sliding | |
738 | window. Construct the literal (if any), length, and distance codes and | |
739 | the tables needed to decode them (using huft_build() from inflate.c), | |
740 | and call the appropriate routine for the type of data in the remainder | |
741 | of the stream. The four routines are nearly identical, differing only | |
742 | in whether the literal is decoded or simply read in, and in how many | |
743 | bits are read in, uncoded, for the low distance bits. */ | |
744 | { | |
745 | unsigned r; /* return codes */ | |
746 | struct huft *tb; /* literal code table */ | |
747 | struct huft *tl; /* length code table */ | |
748 | struct huft *td; /* distance code table */ | |
749 | int bb; /* bits for tb */ | |
750 | int bl; /* bits for tl */ | |
751 | int bd; /* bits for td */ | |
752 | unsigned l[256]; /* bit lengths for codes */ | |
753 | ||
754 | #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) | |
755 | if (G.redirect_slide) | |
756 | wsize = G.redirect_size, redirSlide = G.redirect_buffer; | |
757 | else | |
758 | wsize = WSIZE, redirSlide = slide; | |
759 | #endif | |
760 | ||
761 | /* Tune base table sizes. Note: I thought that to truly optimize speed, | |
762 | I would have to select different bl, bd, and bb values for different | |
763 | compressed file sizes. I was surprised to find out that the values of | |
764 | 7, 7, and 9 worked best over a very wide range of sizes, except that | |
765 | bd = 8 worked marginally better for large compressed sizes. */ | |
766 | bl = 7; | |
767 | bd = (G.csize + G.incnt) > 200000L ? 8 : 7; | |
768 | ||
769 | ||
770 | /* With literal tree--minimum match length is 3 */ | |
771 | #ifdef DEBUG | |
772 | G.hufts = 0; /* initialize huft's malloc'ed */ | |
773 | #endif | |
774 | if (G.lrec.general_purpose_bit_flag & 4) | |
775 | { | |
776 | bb = 9; /* base table size for literals */ | |
777 | if ((r = get_tree(__G__ l, 256)) != 0) | |
778 | return (int)r; | |
779 | if ((r = huft_build(__G__ l, 256, 256, NULL, NULL, &tb, &bb)) != 0) | |
780 | { | |
781 | if (r == 1) | |
782 | huft_free(tb); | |
783 | return (int)r; | |
784 | } | |
785 | if ((r = get_tree(__G__ l, 64)) != 0) | |
786 | return (int)r; | |
787 | if ((r = huft_build(__G__ l, 64, 0, cplen3, extra, &tl, &bl)) != 0) | |
788 | { | |
789 | if (r == 1) | |
790 | huft_free(tl); | |
791 | huft_free(tb); | |
792 | return (int)r; | |
793 | } | |
794 | if ((r = get_tree(__G__ l, 64)) != 0) | |
795 | return (int)r; | |
796 | if (G.lrec.general_purpose_bit_flag & 2) /* true if 8K */ | |
797 | { | |
798 | if ((r = huft_build(__G__ l, 64, 0, cpdist8, extra, &td, &bd)) != 0) | |
799 | { | |
800 | if (r == 1) | |
801 | huft_free(td); | |
802 | huft_free(tl); | |
803 | huft_free(tb); | |
804 | return (int)r; | |
805 | } | |
806 | r = explode_lit8(__G__ tb, tl, td, bb, bl, bd); | |
807 | } | |
808 | else /* else 4K */ | |
809 | { | |
810 | if ((r = huft_build(__G__ l, 64, 0, cpdist4, extra, &td, &bd)) != 0) | |
811 | { | |
812 | if (r == 1) | |
813 | huft_free(td); | |
814 | huft_free(tl); | |
815 | huft_free(tb); | |
816 | return (int)r; | |
817 | } | |
818 | r = explode_lit4(__G__ tb, tl, td, bb, bl, bd); | |
819 | } | |
820 | huft_free(td); | |
821 | huft_free(tl); | |
822 | huft_free(tb); | |
823 | } | |
824 | else | |
825 | ||
826 | ||
827 | /* No literal tree--minimum match length is 2 */ | |
828 | { | |
829 | if ((r = get_tree(__G__ l, 64)) != 0) | |
830 | return (int)r; | |
831 | if ((r = huft_build(__G__ l, 64, 0, cplen2, extra, &tl, &bl)) != 0) | |
832 | { | |
833 | if (r == 1) | |
834 | huft_free(tl); | |
835 | return (int)r; | |
836 | } | |
837 | if ((r = get_tree(__G__ l, 64)) != 0) | |
838 | return (int)r; | |
839 | if (G.lrec.general_purpose_bit_flag & 2) /* true if 8K */ | |
840 | { | |
841 | if ((r = huft_build(__G__ l, 64, 0, cpdist8, extra, &td, &bd)) != 0) | |
842 | { | |
843 | if (r == 1) | |
844 | huft_free(td); | |
845 | huft_free(tl); | |
846 | return (int)r; | |
847 | } | |
848 | r = explode_nolit8(__G__ tl, td, bl, bd); | |
849 | } | |
850 | else /* else 4K */ | |
851 | { | |
852 | if ((r = huft_build(__G__ l, 64, 0, cpdist4, extra, &td, &bd)) != 0) | |
853 | { | |
854 | if (r == 1) | |
855 | huft_free(td); | |
856 | huft_free(tl); | |
857 | return (int)r; | |
858 | } | |
859 | r = explode_nolit4(__G__ tl, td, bl, bd); | |
860 | } | |
861 | huft_free(td); | |
862 | huft_free(tl); | |
863 | } | |
864 | Trace((stderr, "<%u > ", G.hufts)); | |
865 | return (int)r; | |
866 | } | |
867 | ||
868 | /* so explode.c and inflate.c can be compiled together into one object: */ | |
869 | #undef NEXTBYTE | |
870 | #undef NEEDBITS | |
871 | #undef DUMPBITS |