]> git.saurik.com Git - apple/cf.git/blob - Base.subproj/CFSortFunctions.c
CF-299.tar.gz
[apple/cf.git] / Base.subproj / CFSortFunctions.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /* CFSortFunctions.c
26 Copyright 1999-2002, Apple, Inc. All rights reserved.
27 Responsibility: Christopher Kane
28 */
29
30 /* This file contains code copied from the Libc project's files
31 qsort.c, merge.c, and heapsort.c, and modified to suit the
32 needs of CF, which needs the comparison callback to have an
33 additional parameter. The code is collected into this one
34 file so that the individual BSD sort routines can remain
35 private and static. We may not keep the bsd functions
36 separate eventually.
37 */
38
39 #include <CoreFoundation/CFBase.h>
40 #if defined(__MACH__) || defined(__LINUX__) || defined(__FREEBSD__)
41 #include <sys/types.h>
42 #else
43 #define EINVAL 22
44 #endif
45 #include <stdlib.h>
46 #include <errno.h>
47 #include <string.h>
48 #include <stddef.h>
49 #include "CFInternal.h"
50
51 static int bsd_mergesort(void *base, int nmemb, int size, CFComparatorFunction cmp, void *context);
52
53 /* Comparator is passed the address of the values. */
54 void CFMergeSortArray(void *list, CFIndex count, CFIndex elementSize, CFComparatorFunction comparator, void *context) {
55 bsd_mergesort(list, count, elementSize, comparator, context);
56 }
57
58 #if 0
59 // non-recursing qsort with side index list which is the actual thing sorted
60 // XXX kept here for possible later reference
61 void cjk_big_isort(char *list, int size, int *idx_list, int (*comp)(const void *, const void *, void *), void *context, int lo, int hi) {
62 int t, idx, idx2;
63 char *v1;
64 for (idx = lo + 1; idx < hi + 1; idx++) {
65 t = idx_list[idx];
66 v1 = list + t * size;
67 for (idx2 = idx; lo < idx2 && (comp(v1, list + idx_list[idx2 - 1] * size, context) < 0); idx2--)
68 idx_list[idx2] = idx_list[idx2 - 1];
69 idx_list[idx2] = t;
70 }
71 }
72
73 void cjk_big_qsort(char *list, int size, int *idx_list, int (*comp)(const void *, const void *, void *), void *context, int lo, int hi, int *recurse) {
74 int p, idx, mid, g1, g2, g3, r1, r2;
75 char *v1, *v2, *v3;
76 g1 = lo;
77 g2 = (hi + lo) / 2; //random() % (hi - lo + 1) + lo;
78 g3 = hi;
79 v1 = list + idx_list[g1] * size;
80 v2 = list + idx_list[g2] * size;
81 v3 = list + idx_list[g3] * size;
82 if (comp(v1, v2, context) < 0) {
83 p = comp(v2, v3, context) < 0 ? g2 : (comp(v1, v3, context) < 0 ? g3 : g1);
84 } else {
85 p = comp(v2, v3, context) > 0 ? g2 : (comp(v1, v3, context) < 0 ? g1 : g3);
86 }
87 if (lo != p) {int t = idx_list[lo]; idx_list[lo] = idx_list[p]; idx_list[p] = t;}
88 r2 = 1;
89 v2 = list + idx_list[lo] * size;
90 for (mid = lo, idx = lo + 1; idx < hi + 1; idx++) {
91 v1 = list + idx_list[idx] * size;
92 r1 = comp(v1, v2, context);
93 r2 = r2 && (0 == r1);
94 if (r1 < 0) {
95 mid++;
96 if (idx != mid) {int t = idx_list[idx]; idx_list[idx] = idx_list[mid]; idx_list[mid] = t;}
97 }
98 }
99 if (lo != mid) {int t = idx_list[lo]; idx_list[lo] = idx_list[mid]; idx_list[mid] = t;}
100 *recurse = !r2 ? mid : -1;
101 }
102
103 void cjk_big_sort(char *list, int n, int size, int (*comp)(const void *, const void *, void *), void *context) {
104 int len = 0, los[40], his[40];
105 // 40 is big enough for 2^40 elements, in theory; in practice, much
106 // lower but still sufficient; should recurse if the 40 limit is hit
107 int *idx_list = malloc(sizeof(int) * n);
108 char *tmp_list = malloc(n * size);
109 int idx;
110 for (idx = 0; idx < n; idx++) {
111 idx_list[idx] = idx;
112 }
113 los[len] = 0;
114 his[len++] = n - 1;
115 while (0 < len) {
116 int lo, hi;
117 len--;
118 lo = los[len];
119 hi = his[len];
120 if (5 < (hi - lo)) {
121 int mid;
122 cjk_big_qsort(list, size, idx_list, comp, context, lo, hi, &mid);
123 if (0 < mid) {
124 los[len] = lo;
125 his[len++] = mid - 1;
126 los[len] = mid + 1;
127 his[len++] = hi;
128 }
129 } else {
130 cjk_big_isort(list, size, idx_list, comp, context, lo, hi);
131 }
132 }
133 for (idx = 0; idx < n; idx++)
134 memmove(tmp_list + idx * size, list + idx_list[idx] * size, size);
135 memmove(list, tmp_list, n * size);
136 free(tmp_list);
137 free(idx_list);
138 }
139 #endif
140
141
142 /* stdlib.subproj/qsort.c ============================================== */
143
144 /*
145 * Copyright (c) 1992, 1993
146 * The Regents of the University of California. All rights reserved.
147 *
148 * Redistribution and use in source and binary forms, with or without
149 * modification, are permitted provided that the following conditions
150 * are met:
151 * 1. Redistributions of source code must retain the above copyright
152 * notice, this list of conditions and the following disclaimer.
153 * 2. Redistributions in binary form must reproduce the above copyright
154 * notice, this list of conditions and the following disclaimer in the
155 * documentation and/or other materials provided with the distribution.
156 * 3. All advertising materials mentioning features or use of this software
157 * must display the following acknowledgement:
158 * This product includes software developed by the University of
159 * California, Berkeley and its contributors.
160 * 4. Neither the name of the University nor the names of its contributors
161 * may be used to endorse or promote products derived from this software
162 * without specific prior written permission.
163 *
164 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
165 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
166 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
167 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
168 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
169 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
170 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
171 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
172 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
173 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
174 * SUCH DAMAGE.
175 */
176
177 #if !defined(min)
178 #define min(a, b) (a) < (b) ? a : b
179 #endif
180
181 /*
182 * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
183 */
184 #define swapcode(TYPE, parmi, parmj, n) { \
185 long i = (n) / sizeof (TYPE); \
186 register TYPE *pi = (TYPE *) (parmi); \
187 register TYPE *pj = (TYPE *) (parmj); \
188 do { \
189 register TYPE t = *pi; \
190 *pi++ = *pj; \
191 *pj++ = t; \
192 } while (--i > 0); \
193 }
194
195 #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
196 es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
197
198 CF_INLINE void
199 swapfunc(char *a, char *b, int n, int swaptype) {
200 if(swaptype <= 1)
201 swapcode(long, a, b, n)
202 else
203 swapcode(char, a, b, n)
204 }
205
206 #define swap(a, b) \
207 if (swaptype == 0) { \
208 long t = *(long *)(a); \
209 *(long *)(a) = *(long *)(b); \
210 *(long *)(b) = t; \
211 } else \
212 swapfunc(a, b, es, swaptype)
213
214 #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
215
216 CF_INLINE char *
217 med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *, void *), void *context) {
218 return cmp(a, b, context) < 0 ?
219 (cmp(b, c, context) < 0 ? b : (cmp(a, c, context) < 0 ? c : a ))
220 :(cmp(b, c, context) > 0 ? b : (cmp(a, c, context) < 0 ? a : c ));
221 }
222
223 /* Comparator is passed the address of the values. */
224 void CFQSortArray(void *aVoidStar, CFIndex n, CFIndex es, CFComparatorFunction cmp, void *context)
225 {
226 char *a = (char *)aVoidStar;
227 char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
228 int d, r, swaptype, swap_cnt;
229
230 loop: SWAPINIT(a, es);
231 swap_cnt = 0;
232 if (n < 7) {
233 for (pm = a + es; pm < (char *) a + n * es; pm += es)
234 for (pl = pm; pl > (char *) a && cmp(pl - es, pl, context) > 0;
235 pl -= es)
236 swap(pl, pl - es);
237 return;
238 }
239 pm = a + (n / 2) * es;
240 if (n > 7) {
241 pl = a;
242 pn = a + (n - 1) * es;
243 if (n > 40) {
244 d = (n / 8) * es;
245 pl = med3(pl, pl + d, pl + 2 * d, cmp, context);
246 pm = med3(pm - d, pm, pm + d, cmp, context);
247 pn = med3(pn - 2 * d, pn - d, pn, cmp, context);
248 }
249 pm = med3(pl, pm, pn, cmp, context);
250 }
251 swap(a, pm);
252 pa = pb = a + es;
253
254 pc = pd = a + (n - 1) * es;
255 for (;;) {
256 while (pb <= pc && (r = cmp(pb, a, context)) <= 0) {
257 if (r == 0) {
258 swap_cnt = 1;
259 swap(pa, pb);
260 pa += es;
261 }
262 pb += es;
263 }
264 while (pb <= pc && (r = cmp(pc, a, context)) >= 0) {
265 if (r == 0) {
266 swap_cnt = 1;
267 swap(pc, pd);
268 pd -= es;
269 }
270 pc -= es;
271 }
272 if (pb > pc)
273 break;
274 swap(pb, pc);
275 swap_cnt = 1;
276 pb += es;
277 pc -= es;
278 }
279 if (swap_cnt == 0) { /* Switch to insertion sort */
280 for (pm = a + es; pm < (char *) a + n * es; pm += es)
281 for (pl = pm; pl > (char *) a && cmp(pl - es, pl, context) > 0;
282 pl -= es)
283 swap(pl, pl - es);
284 return;
285 }
286
287 pn = a + n * es;
288 r = min(pa - (char *)a, pb - pa);
289 vecswap(a, pb - r, r);
290 r = min(pd - pc, pn - pd - es);
291 vecswap(pb, pn - r, r);
292 if ((r = pb - pa) > es)
293 CFQSortArray(a, r / es, es, cmp, context);
294 if ((r = pd - pc) > es) {
295 /* Iterate rather than recurse to save stack space */
296 a = pn - r;
297 n = r / es;
298 goto loop;
299 }
300 /* CFQSortArray(pn - r, r / es, es, cmp, context);*/
301 }
302
303 #undef min
304 #undef swapcode
305 #undef SWAPINIT
306 #undef swap
307 #undef vecswap
308
309 /* stdlib.subproj/mergesort.c ========================================== */
310
311 /*
312 * Copyright (c) 1992, 1993
313 * The Regents of the University of California. All rights reserved.
314 *
315 * This code is derived from software contributed to Berkeley by
316 * Peter McIlroy.
317 *
318 * Redistribution and use in source and binary forms, with or without
319 * modification, are permitted provided that the following conditions
320 * are met:
321 * 1. Redistributions of source code must retain the above copyright
322 * notice, this list of conditions and the following disclaimer.
323 * 2. Redistributions in binary form must reproduce the above copyright
324 * notice, this list of conditions and the following disclaimer in the
325 * documentation and/or other materials provided with the distribution.
326 * 3. All advertising materials mentioning features or use of this software
327 * must display the following acknowledgement:
328 * This product includes software developed by the University of
329 * California, Berkeley and its contributors.
330 * 4. Neither the name of the University nor the names of its contributors
331 * may be used to endorse or promote products derived from this software
332 * without specific prior written permission.
333 *
334 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
335 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
336 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
337 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
338 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
339 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
340 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
341 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
342 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
343 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
344 * SUCH DAMAGE.
345 */
346
347
348 /*
349 * Hybrid exponential search/linear search merge sort with hybrid
350 * natural/pairwise first pass. Requires about .3% more comparisons
351 * for random data than LSMS with pairwise first pass alone.
352 * It works for objects as small as two bytes.
353 */
354
355 #define NATURAL
356 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
357
358 /* #define NATURAL to get hybrid natural merge.
359 * (The default is pairwise merging.)
360 */
361
362
363 #define ISIZE sizeof(int)
364 #define PSIZE sizeof(uint8_t *)
365 #define ICOPY_LIST(src, dst, last) \
366 do \
367 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
368 while(src < last)
369 #define ICOPY_ELT(src, dst, i) \
370 do \
371 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
372 while (i -= ISIZE)
373
374 #define CCOPY_LIST(src, dst, last) \
375 do \
376 *dst++ = *src++; \
377 while (src < last)
378 #define CCOPY_ELT(src, dst, i) \
379 do \
380 *dst++ = *src++; \
381 while (i -= 1)
382
383 /*
384 * Find the next possible pointer head. (Trickery for forcing an array
385 * to do double duty as a linked list when objects do not align with word
386 * boundaries.
387 */
388 /* Assumption: PSIZE is a power of 2. */
389 #define EVAL(p) (uint8_t **) \
390 ((uint8_t *)0 + \
391 (((uint8_t *)p + PSIZE - 1 - (uint8_t *) 0) & ~(PSIZE - 1)))
392
393
394 #define swap(a, b) { \
395 s = b; \
396 i = size; \
397 do { \
398 tmp = *a; *a++ = *s; *s++ = tmp; \
399 } while (--i); \
400 a -= size; \
401 }
402 #define reverse(bot, top) { \
403 s = top; \
404 do { \
405 i = size; \
406 do { \
407 tmp = *bot; *bot++ = *s; *s++ = tmp; \
408 } while (--i); \
409 s -= size2; \
410 } while(bot < s); \
411 }
412
413 /*
414 * This is to avoid out-of-bounds addresses in sorting the
415 * last 4 elements.
416 */
417 static void
418 insertionsort(char *a, int n, int size, int (*cmp)(const void *, const void *, void *), void *context) {
419 char *ai, *s, *t, *u, tmp;
420 int i;
421
422 for (ai = a+size; --n >= 1; ai += size)
423 for (t = ai; t > a; t -= size) {
424 u = t - size;
425 if (cmp(u, t, context) <= 0)
426 break;
427 swap(u, t);
428 }
429 }
430
431 /*
432 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
433 * increasing order, list2 in a corresponding linked list. Checks for runs
434 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
435 * is defined. Otherwise simple pairwise merging is used.)
436 */
437 static void
438 setup(char *list1, char *list2, int n, int size, int (*cmp)(const void *, const void *, void *), void *context) {
439 int i, length, size2, tmp, sense;
440 char *f1, *f2, *s, *l2, *last, *p2;
441
442 size2 = size*2;
443 if (n <= 5) {
444 insertionsort(list1, n, size, cmp, context);
445 *EVAL(list2) = (uint8_t*) list2 + n*size;
446 return;
447 }
448 /*
449 * Avoid running pointers out of bounds; limit n to evens
450 * for simplicity.
451 */
452 i = 4 + (n & 1);
453 insertionsort(list1 + (n - i) * size, i, size, cmp, context);
454 last = list1 + size * (n - i);
455 *EVAL(list2 + (last - list1)) = list2 + n * size;
456
457 #ifdef NATURAL
458 p2 = list2;
459 f1 = list1;
460 sense = (cmp(f1, f1 + size, context) > 0);
461 for (; f1 < last; sense = !sense) {
462 length = 2;
463 /* Find pairs with same sense. */
464 for (f2 = f1 + size2; f2 < last; f2 += size2) {
465 if ((cmp(f2, f2+ size, context) > 0) != sense)
466 break;
467 length += 2;
468 }
469 if (length < THRESHOLD) { /* Pairwise merge */
470 do {
471 p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
472 if (sense > 0)
473 swap (f1, f1 + size);
474 } while ((f1 += size2) < f2);
475 } else { /* Natural merge */
476 l2 = f2;
477 for (f2 = f1 + size2; f2 < l2; f2 += size2) {
478 if ((cmp(f2-size, f2, context) > 0) != sense) {
479 p2 = *EVAL(p2) = f2 - list1 + list2;
480 if (sense > 0)
481 reverse(f1, f2-size);
482 f1 = f2;
483 }
484 }
485 if (sense > 0)
486 reverse (f1, f2-size);
487 f1 = f2;
488 if (f2 < last || cmp(f2 - size, f2, context) > 0)
489 p2 = *EVAL(p2) = f2 - list1 + list2;
490 else
491 p2 = *EVAL(p2) = list2 + n*size;
492 }
493 }
494 #else /* pairwise merge only. */
495 for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
496 p2 = *EVAL(p2) = p2 + size2;
497 if (cmp (f1, f1 + size, context) > 0)
498 swap(f1, f1 + size);
499 }
500 #endif /* NATURAL */
501 }
502
503 /*
504 * Arguments are as for qsort.
505 */
506 static int
507 bsd_mergesort(void *base, int nmemb, int size, CFComparatorFunction cmp, void *context) {
508 register int i, sense;
509 int big, iflag;
510 register uint8_t *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
511 uint8_t *list2, *list1, *p2, *p, *last, **p1;
512
513 if (size < (int)PSIZE / 2) { /* Pointers must fit into 2 * size. */
514 errno = EINVAL;
515 return (-1);
516 }
517
518 /*
519 * XXX
520 * Stupid subtraction for the Cray.
521 */
522 iflag = 0;
523 if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
524 iflag = 1;
525
526 if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
527 return (-1);
528
529 list1 = base;
530 setup(list1, list2, nmemb, size, cmp, context);
531 last = list2 + nmemb * size;
532 i = big = 0;
533 while (*EVAL(list2) != last) {
534 l2 = list1;
535 p1 = EVAL(list1);
536 for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
537 p2 = *EVAL(p2);
538 f1 = l2;
539 f2 = l1 = list1 + (p2 - list2);
540 if (p2 != last)
541 p2 = *EVAL(p2);
542 l2 = list1 + (p2 - list2);
543 while (f1 < l1 && f2 < l2) {
544 if ((*cmp)(f1, f2, context) <= 0) {
545 q = f2;
546 b = f1, t = l1;
547 sense = -1;
548 } else {
549 q = f1;
550 b = f2, t = l2;
551 sense = 0;
552 }
553 if (!big) { /* here i = 0 */
554 /* LINEAR: */ while ((b += size) < t && cmp(q, b, context) >sense)
555 if (++i == 6) {
556 big = 1;
557 goto EXPONENTIAL;
558 }
559 } else {
560 EXPONENTIAL: for (i = size; ; i <<= 1)
561 if ((p = (b + i)) >= t) {
562 if ((p = t - size) > b &&
563 (*cmp)(q, p, context) <= sense)
564 t = p;
565 else
566 b = p;
567 break;
568 } else if ((*cmp)(q, p, context) <= sense) {
569 t = p;
570 if (i == size)
571 big = 0;
572 goto FASTCASE;
573 } else
574 b = p;
575 /* SLOWCASE: */ while (t > b+size) {
576 i = (((t - b) / size) >> 1) * size;
577 if ((*cmp)(q, p = b + i, context) <= sense)
578 t = p;
579 else
580 b = p;
581 }
582 goto COPY;
583 FASTCASE: while (i > size)
584 if ((*cmp)(q,
585 p = b + (i >>= 1), context) <= sense)
586 t = p;
587 else
588 b = p;
589 COPY: b = t;
590 }
591 i = size;
592 if (q == f1) {
593 if (iflag) {
594 ICOPY_LIST(f2, tp2, b);
595 ICOPY_ELT(f1, tp2, i);
596 } else {
597 CCOPY_LIST(f2, tp2, b);
598 CCOPY_ELT(f1, tp2, i);
599 }
600 } else {
601 if (iflag) {
602 ICOPY_LIST(f1, tp2, b);
603 ICOPY_ELT(f2, tp2, i);
604 } else {
605 CCOPY_LIST(f1, tp2, b);
606 CCOPY_ELT(f2, tp2, i);
607 }
608 }
609 }
610 if (f2 < l2) {
611 if (iflag)
612 ICOPY_LIST(f2, tp2, l2);
613 else
614 CCOPY_LIST(f2, tp2, l2);
615 } else if (f1 < l1) {
616 if (iflag)
617 ICOPY_LIST(f1, tp2, l1);
618 else
619 CCOPY_LIST(f1, tp2, l1);
620 }
621 *p1 = l2;
622 }
623 tp2 = list1; /* swap list1, list2 */
624 list1 = list2;
625 list2 = tp2;
626 last = list2 + nmemb*size;
627 }
628 if (base == list2) {
629 memmove(list2, list1, nmemb*size);
630 list2 = list1;
631 }
632 free(list2);
633 return (0);
634 }
635
636 #undef NATURAL
637 #undef THRESHOLD
638 #undef ISIZE
639 #undef PSIZE
640 #undef ICOPY_LIST
641 #undef ICOPY_ELT
642 #undef CCOPY_LIST
643 #undef CCOPY_ELT
644 #undef EVAL
645 #undef swap
646 #undef reverse
647
648 #if 0
649 /* stdlib.subproj/heapsort.c =========================================== */
650
651 /*
652 * Copyright (c) 1991, 1993
653 * The Regents of the University of California. All rights reserved.
654 *
655 * This code is derived from software contributed to Berkeley by
656 * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias.
657 *
658 * Redistribution and use in source and binary forms, with or without
659 * modification, are permitted provided that the following conditions
660 * are met:
661 * 1. Redistributions of source code must retain the above copyright
662 * notice, this list of conditions and the following disclaimer.
663 * 2. Redistributions in binary form must reproduce the above copyright
664 * notice, this list of conditions and the following disclaimer in the
665 * documentation and/or other materials provided with the distribution.
666 * 3. All advertising materials mentioning features or use of this software
667 * must display the following acknowledgement:
668 * This product includes software developed by the University of
669 * California, Berkeley and its contributors.
670 * 4. Neither the name of the University nor the names of its contributors
671 * may be used to endorse or promote products derived from this software
672 * without specific prior written permission.
673 *
674 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
675 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
676 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
677 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
678 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
679 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
680 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
681 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
682 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
683 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
684 * SUCH DAMAGE.
685 */
686
687 /*
688 * Swap two areas of size number of bytes. Although qsort(3) permits random
689 * blocks of memory to be sorted, sorting pointers is almost certainly the
690 * common case (and, were it not, could easily be made so). Regardless, it
691 * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer
692 * arithmetic gets lost in the time required for comparison function calls.
693 */
694 #define SWAP(a, b, count, size, tmp) { \
695 count = size; \
696 do { \
697 tmp = *a; \
698 *a++ = *b; \
699 *b++ = tmp; \
700 } while (--count); \
701 }
702
703 /* Copy one block of size size to another. */
704 #define COPY(a, b, count, size, tmp1, tmp2) { \
705 count = size; \
706 tmp1 = a; \
707 tmp2 = b; \
708 do { \
709 *tmp1++ = *tmp2++; \
710 } while (--count); \
711 }
712
713 /*
714 * Build the list into a heap, where a heap is defined such that for
715 * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N.
716 *
717 * There two cases. If j == nmemb, select largest of Ki and Kj. If
718 * j < nmemb, select largest of Ki, Kj and Kj+1.
719 */
720 #define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \
721 for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
722 par_i = child_i) { \
723 child = base + child_i * size; \
724 if (child_i < nmemb && compar(child, child + size, context) < 0) { \
725 child += size; \
726 ++child_i; \
727 } \
728 par = base + par_i * size; \
729 if (compar(child, par, context) <= 0) \
730 break; \
731 SWAP(par, child, count, size, tmp); \
732 } \
733 }
734
735 /*
736 * Select the top of the heap and 'heapify'. Since by far the most expensive
737 * action is the call to the compar function, a considerable optimization
738 * in the average case can be achieved due to the fact that k, the displaced
739 * elememt, is ususally quite small, so it would be preferable to first
740 * heapify, always maintaining the invariant that the larger child is copied
741 * over its parent's record.
742 *
743 * Then, starting from the *bottom* of the heap, finding k's correct place,
744 * again maintianing the invariant. As a result of the invariant no element
745 * is 'lost' when k is assigned its correct place in the heap.
746 *
747 * The time savings from this optimization are on the order of 15-20% for the
748 * average case. See Knuth, Vol. 3, page 158, problem 18.
749 *
750 * XXX Don't break the #define SELECT line, below. Reiser cpp gets upset.
751 */
752 #define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
753 for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
754 child = base + child_i * size; \
755 if (child_i < nmemb && compar(child, child + size, context) < 0) { \
756 child += size; \
757 ++child_i; \
758 } \
759 par = base + par_i * size; \
760 COPY(par, child, count, size, tmp1, tmp2); \
761 } \
762 for (;;) { \
763 child_i = par_i; \
764 par_i = child_i / 2; \
765 child = base + child_i * size; \
766 par = base + par_i * size; \
767 if (child_i == 1 || compar(k, par, context) < 0) { \
768 COPY(child, k, count, size, tmp1, tmp2); \
769 break; \
770 } \
771 COPY(child, par, count, size, tmp1, tmp2); \
772 } \
773 }
774
775 /*
776 * Heapsort -- Knuth, Vol. 3, page 145. Runs in O (N lg N), both average
777 * and worst. While heapsort is faster than the worst case of quicksort,
778 * the BSD quicksort does median selection so that the chance of finding
779 * a data set that will trigger the worst case is nonexistent. Heapsort's
780 * only advantage over quicksort is that it requires little additional memory.
781 */
782 static int
783 bsd_heapsort(void *vbase, int nmemb, int size, int (*compar)(const void *, const void *, void *), void *context) {
784 register int cnt, i, j, l;
785 register char tmp, *tmp1, *tmp2;
786 char *base, *k, *p, *t;
787
788 if (nmemb <= 1)
789 return (0);
790
791 if (!size) {
792 errno = EINVAL;
793 return (-1);
794 }
795
796 if ((k = malloc(size)) == NULL)
797 return (-1);
798
799 /*
800 * Items are numbered from 1 to nmemb, so offset from size bytes
801 * below the starting address.
802 */
803 base = (char *)vbase - size;
804
805 for (l = nmemb / 2 + 1; --l;)
806 CREATE(l, nmemb, i, j, t, p, size, cnt, tmp);
807
808 /*
809 * For each element of the heap, save the largest element into its
810 * final slot, save the displaced element (k), then recreate the
811 * heap.
812 */
813 while (nmemb > 1) {
814 COPY(k, base + nmemb * size, cnt, size, tmp1, tmp2);
815 COPY(base + nmemb * size, base + size, cnt, size, tmp1, tmp2);
816 --nmemb;
817 SELECT(i, j, nmemb, t, p, size, k, cnt, tmp1, tmp2);
818 }
819 free(k);
820 return (0);
821 }
822
823 #undef SWAP
824 #undef COPY
825 #undef CREATE
826 #undef SELECT
827
828 #endif
829
830 /* ===================================================================== */
831
832 #undef EINVAL
833