]> git.saurik.com Git - apple/cf.git/blob - Base.subproj/CFSortFunctions.c
CF-368.18.tar.gz
[apple/cf.git] / Base.subproj / CFSortFunctions.c
1 /*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* CFSortFunctions.c
24 Copyright 1999-2002, Apple, Inc. All rights reserved.
25 Responsibility: Christopher Kane
26 */
27
28 /* This file contains code copied from the Libc project's files
29 qsort.c, merge.c, and heapsort.c, and modified to suit the
30 needs of CF, which needs the comparison callback to have an
31 additional parameter. The code is collected into this one
32 file so that the individual BSD sort routines can remain
33 private and static. We may not keep the bsd functions
34 separate eventually.
35 */
36
37 #include <CoreFoundation/CFBase.h>
38 #if defined(__MACH__) || defined(__LINUX__) || defined(__FREEBSD__)
39 #include <sys/types.h>
40 #else
41 #define EINVAL 22
42 #endif
43 #include <stdlib.h>
44 #include <errno.h>
45 #include <string.h>
46 #include <stddef.h>
47 #include "CFInternal.h"
48
49 static int bsd_mergesort(void *base, int nmemb, int size, CFComparatorFunction cmp, void *context);
50
51 /* Comparator is passed the address of the values. */
52 void CFMergeSortArray(void *list, CFIndex count, CFIndex elementSize, CFComparatorFunction comparator, void *context) {
53 bsd_mergesort(list, count, elementSize, comparator, context);
54 }
55
56 #if 0
57 // non-recursing qsort with side index list which is the actual thing sorted
58 // XXX kept here for possible later reference
59 void cjk_big_isort(char *list, int size, int *idx_list, int (*comp)(const void *, const void *, void *), void *context, int lo, int hi) {
60 int t, idx, idx2;
61 char *v1;
62 for (idx = lo + 1; idx < hi + 1; idx++) {
63 t = idx_list[idx];
64 v1 = list + t * size;
65 for (idx2 = idx; lo < idx2 && (comp(v1, list + idx_list[idx2 - 1] * size, context) < 0); idx2--)
66 idx_list[idx2] = idx_list[idx2 - 1];
67 idx_list[idx2] = t;
68 }
69 }
70
71 void cjk_big_qsort(char *list, int size, int *idx_list, int (*comp)(const void *, const void *, void *), void *context, int lo, int hi, int *recurse) {
72 int p, idx, mid, g1, g2, g3, r1, r2;
73 char *v1, *v2, *v3;
74 g1 = lo;
75 g2 = (hi + lo) / 2; //random() % (hi - lo + 1) + lo;
76 g3 = hi;
77 v1 = list + idx_list[g1] * size;
78 v2 = list + idx_list[g2] * size;
79 v3 = list + idx_list[g3] * size;
80 if (comp(v1, v2, context) < 0) {
81 p = comp(v2, v3, context) < 0 ? g2 : (comp(v1, v3, context) < 0 ? g3 : g1);
82 } else {
83 p = comp(v2, v3, context) > 0 ? g2 : (comp(v1, v3, context) < 0 ? g1 : g3);
84 }
85 if (lo != p) {int t = idx_list[lo]; idx_list[lo] = idx_list[p]; idx_list[p] = t;}
86 r2 = 1;
87 v2 = list + idx_list[lo] * size;
88 for (mid = lo, idx = lo + 1; idx < hi + 1; idx++) {
89 v1 = list + idx_list[idx] * size;
90 r1 = comp(v1, v2, context);
91 r2 = r2 && (0 == r1);
92 if (r1 < 0) {
93 mid++;
94 if (idx != mid) {int t = idx_list[idx]; idx_list[idx] = idx_list[mid]; idx_list[mid] = t;}
95 }
96 }
97 if (lo != mid) {int t = idx_list[lo]; idx_list[lo] = idx_list[mid]; idx_list[mid] = t;}
98 *recurse = !r2 ? mid : -1;
99 }
100
101 void cjk_big_sort(char *list, int n, int size, int (*comp)(const void *, const void *, void *), void *context) {
102 int len = 0, los[40], his[40];
103 // 40 is big enough for 2^40 elements, in theory; in practice, much
104 // lower but still sufficient; should recurse if the 40 limit is hit
105 int *idx_list = malloc(sizeof(int) * n);
106 char *tmp_list = malloc(n * size);
107 int idx;
108 for (idx = 0; idx < n; idx++) {
109 idx_list[idx] = idx;
110 }
111 los[len] = 0;
112 his[len++] = n - 1;
113 while (0 < len) {
114 int lo, hi;
115 len--;
116 lo = los[len];
117 hi = his[len];
118 if (5 < (hi - lo)) {
119 int mid;
120 cjk_big_qsort(list, size, idx_list, comp, context, lo, hi, &mid);
121 if (0 < mid) {
122 los[len] = lo;
123 his[len++] = mid - 1;
124 los[len] = mid + 1;
125 his[len++] = hi;
126 }
127 } else {
128 cjk_big_isort(list, size, idx_list, comp, context, lo, hi);
129 }
130 }
131 for (idx = 0; idx < n; idx++)
132 memmove(tmp_list + idx * size, list + idx_list[idx] * size, size);
133 memmove(list, tmp_list, n * size);
134 free(tmp_list);
135 free(idx_list);
136 }
137 #endif
138
139
140 /* stdlib.subproj/qsort.c ============================================== */
141
142 /*
143 * Copyright (c) 1992, 1993
144 * The Regents of the University of California. All rights reserved.
145 *
146 * Redistribution and use in source and binary forms, with or without
147 * modification, are permitted provided that the following conditions
148 * are met:
149 * 1. Redistributions of source code must retain the above copyright
150 * notice, this list of conditions and the following disclaimer.
151 * 2. Redistributions in binary form must reproduce the above copyright
152 * notice, this list of conditions and the following disclaimer in the
153 * documentation and/or other materials provided with the distribution.
154 * 3. All advertising materials mentioning features or use of this software
155 * must display the following acknowledgement:
156 * This product includes software developed by the University of
157 * California, Berkeley and its contributors.
158 * 4. Neither the name of the University nor the names of its contributors
159 * may be used to endorse or promote products derived from this software
160 * without specific prior written permission.
161 *
162 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
163 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
164 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
165 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
166 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
167 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
168 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
169 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
170 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
171 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
172 * SUCH DAMAGE.
173 */
174
175 #if !defined(min)
176 #define min(a, b) (a) < (b) ? a : b
177 #endif
178
179 /*
180 * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
181 */
182 #define swapcode(TYPE, parmi, parmj, n) { \
183 long i = (n) / sizeof (TYPE); \
184 register TYPE *pi = (TYPE *) (parmi); \
185 register TYPE *pj = (TYPE *) (parmj); \
186 do { \
187 register TYPE t = *pi; \
188 *pi++ = *pj; \
189 *pj++ = t; \
190 } while (--i > 0); \
191 }
192
193 #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
194 es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
195
196 CF_INLINE void
197 swapfunc(char *a, char *b, int n, int swaptype) {
198 if(swaptype <= 1)
199 swapcode(long, a, b, n)
200 else
201 swapcode(char, a, b, n)
202 }
203
204 #define swap(a, b) \
205 if (swaptype == 0) { \
206 long t = *(long *)(a); \
207 *(long *)(a) = *(long *)(b); \
208 *(long *)(b) = t; \
209 } else \
210 swapfunc(a, b, es, swaptype)
211
212 #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
213
214 CF_INLINE char *
215 med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *, void *), void *context) {
216 return cmp(a, b, context) < 0 ?
217 (cmp(b, c, context) < 0 ? b : (cmp(a, c, context) < 0 ? c : a ))
218 :(cmp(b, c, context) > 0 ? b : (cmp(a, c, context) < 0 ? a : c ));
219 }
220
221 /* Comparator is passed the address of the values. */
222 void CFQSortArray(void *aVoidStar, CFIndex n, CFIndex es, CFComparatorFunction cmp, void *context)
223 {
224 char *a = (char *)aVoidStar;
225 char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
226 int d, r, swaptype, swap_cnt;
227
228 loop: SWAPINIT(a, es);
229 swap_cnt = 0;
230 if (n < 7) {
231 for (pm = a + es; pm < (char *) a + n * es; pm += es)
232 for (pl = pm; pl > (char *) a && cmp(pl - es, pl, context) > 0;
233 pl -= es)
234 swap(pl, pl - es);
235 return;
236 }
237 pm = a + (n / 2) * es;
238 if (n > 7) {
239 pl = a;
240 pn = a + (n - 1) * es;
241 if (n > 40) {
242 d = (n / 8) * es;
243 pl = med3(pl, pl + d, pl + 2 * d, cmp, context);
244 pm = med3(pm - d, pm, pm + d, cmp, context);
245 pn = med3(pn - 2 * d, pn - d, pn, cmp, context);
246 }
247 pm = med3(pl, pm, pn, cmp, context);
248 }
249 swap(a, pm);
250 pa = pb = a + es;
251
252 pc = pd = a + (n - 1) * es;
253 for (;;) {
254 while (pb <= pc && (r = cmp(pb, a, context)) <= 0) {
255 if (r == 0) {
256 swap_cnt = 1;
257 swap(pa, pb);
258 pa += es;
259 }
260 pb += es;
261 }
262 while (pb <= pc && (r = cmp(pc, a, context)) >= 0) {
263 if (r == 0) {
264 swap_cnt = 1;
265 swap(pc, pd);
266 pd -= es;
267 }
268 pc -= es;
269 }
270 if (pb > pc)
271 break;
272 swap(pb, pc);
273 swap_cnt = 1;
274 pb += es;
275 pc -= es;
276 }
277 if (swap_cnt == 0) { /* Switch to insertion sort */
278 for (pm = a + es; pm < (char *) a + n * es; pm += es)
279 for (pl = pm; pl > (char *) a && cmp(pl - es, pl, context) > 0;
280 pl -= es)
281 swap(pl, pl - es);
282 return;
283 }
284
285 pn = a + n * es;
286 r = min(pa - (char *)a, pb - pa);
287 vecswap(a, pb - r, r);
288 r = min(pd - pc, pn - pd - es);
289 vecswap(pb, pn - r, r);
290 if ((r = pb - pa) > es)
291 CFQSortArray(a, r / es, es, cmp, context);
292 if ((r = pd - pc) > es) {
293 /* Iterate rather than recurse to save stack space */
294 a = pn - r;
295 n = r / es;
296 goto loop;
297 }
298 /* CFQSortArray(pn - r, r / es, es, cmp, context);*/
299 }
300
301 #undef min
302 #undef swapcode
303 #undef SWAPINIT
304 #undef swap
305 #undef vecswap
306
307 /* stdlib.subproj/mergesort.c ========================================== */
308
309 /*
310 * Copyright (c) 1992, 1993
311 * The Regents of the University of California. All rights reserved.
312 *
313 * This code is derived from software contributed to Berkeley by
314 * Peter McIlroy.
315 *
316 * Redistribution and use in source and binary forms, with or without
317 * modification, are permitted provided that the following conditions
318 * are met:
319 * 1. Redistributions of source code must retain the above copyright
320 * notice, this list of conditions and the following disclaimer.
321 * 2. Redistributions in binary form must reproduce the above copyright
322 * notice, this list of conditions and the following disclaimer in the
323 * documentation and/or other materials provided with the distribution.
324 * 3. All advertising materials mentioning features or use of this software
325 * must display the following acknowledgement:
326 * This product includes software developed by the University of
327 * California, Berkeley and its contributors.
328 * 4. Neither the name of the University nor the names of its contributors
329 * may be used to endorse or promote products derived from this software
330 * without specific prior written permission.
331 *
332 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
333 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
334 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
335 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
336 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
337 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
338 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
339 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
340 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
341 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
342 * SUCH DAMAGE.
343 */
344
345
346 /*
347 * Hybrid exponential search/linear search merge sort with hybrid
348 * natural/pairwise first pass. Requires about .3% more comparisons
349 * for random data than LSMS with pairwise first pass alone.
350 * It works for objects as small as two bytes.
351 */
352
353 #define NATURAL
354 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
355
356 /* #define NATURAL to get hybrid natural merge.
357 * (The default is pairwise merging.)
358 */
359
360
361 #define ISIZE sizeof(int)
362 #define PSIZE sizeof(uint8_t *)
363 #define ICOPY_LIST(src, dst, last) \
364 do \
365 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
366 while(src < last)
367 #define ICOPY_ELT(src, dst, i) \
368 do \
369 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
370 while (i -= ISIZE)
371
372 #define CCOPY_LIST(src, dst, last) \
373 do \
374 *dst++ = *src++; \
375 while (src < last)
376 #define CCOPY_ELT(src, dst, i) \
377 do \
378 *dst++ = *src++; \
379 while (i -= 1)
380
381 /*
382 * Find the next possible pointer head. (Trickery for forcing an array
383 * to do double duty as a linked list when objects do not align with word
384 * boundaries.
385 */
386 /* Assumption: PSIZE is a power of 2. */
387 #define EVAL(p) (uint8_t **) \
388 ((uint8_t *)0 + \
389 (((uint8_t *)p + PSIZE - 1 - (uint8_t *) 0) & ~(PSIZE - 1)))
390
391
392 #define swap(a, b) { \
393 s = b; \
394 i = size; \
395 do { \
396 tmp = *a; *a++ = *s; *s++ = tmp; \
397 } while (--i); \
398 a -= size; \
399 }
400 #define reverse(bot, top) { \
401 s = top; \
402 do { \
403 i = size; \
404 do { \
405 tmp = *bot; *bot++ = *s; *s++ = tmp; \
406 } while (--i); \
407 s -= size2; \
408 } while(bot < s); \
409 }
410
411 /*
412 * This is to avoid out-of-bounds addresses in sorting the
413 * last 4 elements.
414 */
415 static void
416 insertionsort(char *a, int n, int size, int (*cmp)(const void *, const void *, void *), void *context) {
417 char *ai, *s, *t, *u, tmp;
418 int i;
419
420 for (ai = a+size; --n >= 1; ai += size)
421 for (t = ai; t > a; t -= size) {
422 u = t - size;
423 if (cmp(u, t, context) <= 0)
424 break;
425 swap(u, t);
426 }
427 }
428
429 /*
430 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
431 * increasing order, list2 in a corresponding linked list. Checks for runs
432 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
433 * is defined. Otherwise simple pairwise merging is used.)
434 */
435 static void
436 setup(char *list1, char *list2, int n, int size, int (*cmp)(const void *, const void *, void *), void *context) {
437 int i, length, size2, tmp, sense;
438 char *f1, *f2, *s, *l2, *last, *p2;
439
440 size2 = size*2;
441 if (n <= 5) {
442 insertionsort(list1, n, size, cmp, context);
443 *EVAL(list2) = (uint8_t*) list2 + n*size;
444 return;
445 }
446 /*
447 * Avoid running pointers out of bounds; limit n to evens
448 * for simplicity.
449 */
450 i = 4 + (n & 1);
451 insertionsort(list1 + (n - i) * size, i, size, cmp, context);
452 last = list1 + size * (n - i);
453 *EVAL(list2 + (last - list1)) = list2 + n * size;
454
455 #ifdef NATURAL
456 p2 = list2;
457 f1 = list1;
458 sense = (cmp(f1, f1 + size, context) > 0);
459 for (; f1 < last; sense = !sense) {
460 length = 2;
461 /* Find pairs with same sense. */
462 for (f2 = f1 + size2; f2 < last; f2 += size2) {
463 if ((cmp(f2, f2+ size, context) > 0) != sense)
464 break;
465 length += 2;
466 }
467 if (length < THRESHOLD) { /* Pairwise merge */
468 do {
469 p2 = *EVAL(p2) = f1 + size2 - list1 + list2;
470 if (sense > 0)
471 swap (f1, f1 + size);
472 } while ((f1 += size2) < f2);
473 } else { /* Natural merge */
474 l2 = f2;
475 for (f2 = f1 + size2; f2 < l2; f2 += size2) {
476 if ((cmp(f2-size, f2, context) > 0) != sense) {
477 p2 = *EVAL(p2) = f2 - list1 + list2;
478 if (sense > 0)
479 reverse(f1, f2-size);
480 f1 = f2;
481 }
482 }
483 if (sense > 0)
484 reverse (f1, f2-size);
485 f1 = f2;
486 if (f2 < last || cmp(f2 - size, f2, context) > 0)
487 p2 = *EVAL(p2) = f2 - list1 + list2;
488 else
489 p2 = *EVAL(p2) = list2 + n*size;
490 }
491 }
492 #else /* pairwise merge only. */
493 for (f1 = list1, p2 = list2; f1 < last; f1 += size2) {
494 p2 = *EVAL(p2) = p2 + size2;
495 if (cmp (f1, f1 + size, context) > 0)
496 swap(f1, f1 + size);
497 }
498 #endif /* NATURAL */
499 }
500
501 /*
502 * Arguments are as for qsort.
503 */
504 static int
505 bsd_mergesort(void *base, int nmemb, int size, CFComparatorFunction cmp, void *context) {
506 register int i, sense;
507 int big, iflag;
508 register uint8_t *f1, *f2, *t, *b, *tp2, *q, *l1, *l2;
509 uint8_t *list2, *list1, *p2, *p, *last, **p1;
510
511 if (size < (int)PSIZE / 2) { /* Pointers must fit into 2 * size. */
512 errno = EINVAL;
513 return (-1);
514 }
515
516 /*
517 * XXX
518 * Stupid subtraction for the Cray.
519 */
520 iflag = 0;
521 if (!(size % ISIZE) && !(((char *)base - (char *)0) % ISIZE))
522 iflag = 1;
523
524 if ((list2 = malloc(nmemb * size + PSIZE)) == NULL)
525 return (-1);
526
527 list1 = base;
528 setup(list1, list2, nmemb, size, cmp, context);
529 last = list2 + nmemb * size;
530 i = big = 0;
531 while (*EVAL(list2) != last) {
532 l2 = list1;
533 p1 = EVAL(list1);
534 for (tp2 = p2 = list2; p2 != last; p1 = EVAL(l2)) {
535 p2 = *EVAL(p2);
536 f1 = l2;
537 f2 = l1 = list1 + (p2 - list2);
538 if (p2 != last)
539 p2 = *EVAL(p2);
540 l2 = list1 + (p2 - list2);
541 while (f1 < l1 && f2 < l2) {
542 if ((*cmp)(f1, f2, context) <= 0) {
543 q = f2;
544 b = f1, t = l1;
545 sense = -1;
546 } else {
547 q = f1;
548 b = f2, t = l2;
549 sense = 0;
550 }
551 if (!big) { /* here i = 0 */
552 /* LINEAR: */ while ((b += size) < t && cmp(q, b, context) >sense)
553 if (++i == 6) {
554 big = 1;
555 goto EXPONENTIAL;
556 }
557 } else {
558 EXPONENTIAL: for (i = size; ; i <<= 1)
559 if ((p = (b + i)) >= t) {
560 if ((p = t - size) > b &&
561 (*cmp)(q, p, context) <= sense)
562 t = p;
563 else
564 b = p;
565 break;
566 } else if ((*cmp)(q, p, context) <= sense) {
567 t = p;
568 if (i == size)
569 big = 0;
570 goto FASTCASE;
571 } else
572 b = p;
573 /* SLOWCASE: */ while (t > b+size) {
574 i = (((t - b) / size) >> 1) * size;
575 if ((*cmp)(q, p = b + i, context) <= sense)
576 t = p;
577 else
578 b = p;
579 }
580 goto COPY;
581 FASTCASE: while (i > size)
582 if ((*cmp)(q,
583 p = b + (i >>= 1), context) <= sense)
584 t = p;
585 else
586 b = p;
587 COPY: b = t;
588 }
589 i = size;
590 if (q == f1) {
591 if (iflag) {
592 ICOPY_LIST(f2, tp2, b);
593 ICOPY_ELT(f1, tp2, i);
594 } else {
595 CCOPY_LIST(f2, tp2, b);
596 CCOPY_ELT(f1, tp2, i);
597 }
598 } else {
599 if (iflag) {
600 ICOPY_LIST(f1, tp2, b);
601 ICOPY_ELT(f2, tp2, i);
602 } else {
603 CCOPY_LIST(f1, tp2, b);
604 CCOPY_ELT(f2, tp2, i);
605 }
606 }
607 }
608 if (f2 < l2) {
609 if (iflag)
610 ICOPY_LIST(f2, tp2, l2);
611 else
612 CCOPY_LIST(f2, tp2, l2);
613 } else if (f1 < l1) {
614 if (iflag)
615 ICOPY_LIST(f1, tp2, l1);
616 else
617 CCOPY_LIST(f1, tp2, l1);
618 }
619 *p1 = l2;
620 }
621 tp2 = list1; /* swap list1, list2 */
622 list1 = list2;
623 list2 = tp2;
624 last = list2 + nmemb*size;
625 }
626 if (base == list2) {
627 memmove(list2, list1, nmemb*size);
628 list2 = list1;
629 }
630 free(list2);
631 return (0);
632 }
633
634 #undef NATURAL
635 #undef THRESHOLD
636 #undef ISIZE
637 #undef PSIZE
638 #undef ICOPY_LIST
639 #undef ICOPY_ELT
640 #undef CCOPY_LIST
641 #undef CCOPY_ELT
642 #undef EVAL
643 #undef swap
644 #undef reverse
645
646 #if 0
647 /* stdlib.subproj/heapsort.c =========================================== */
648
649 /*
650 * Copyright (c) 1991, 1993
651 * The Regents of the University of California. All rights reserved.
652 *
653 * This code is derived from software contributed to Berkeley by
654 * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias.
655 *
656 * Redistribution and use in source and binary forms, with or without
657 * modification, are permitted provided that the following conditions
658 * are met:
659 * 1. Redistributions of source code must retain the above copyright
660 * notice, this list of conditions and the following disclaimer.
661 * 2. Redistributions in binary form must reproduce the above copyright
662 * notice, this list of conditions and the following disclaimer in the
663 * documentation and/or other materials provided with the distribution.
664 * 3. All advertising materials mentioning features or use of this software
665 * must display the following acknowledgement:
666 * This product includes software developed by the University of
667 * California, Berkeley and its contributors.
668 * 4. Neither the name of the University nor the names of its contributors
669 * may be used to endorse or promote products derived from this software
670 * without specific prior written permission.
671 *
672 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
673 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
674 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
675 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
676 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
677 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
678 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
679 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
680 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
681 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
682 * SUCH DAMAGE.
683 */
684
685 /*
686 * Swap two areas of size number of bytes. Although qsort(3) permits random
687 * blocks of memory to be sorted, sorting pointers is almost certainly the
688 * common case (and, were it not, could easily be made so). Regardless, it
689 * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer
690 * arithmetic gets lost in the time required for comparison function calls.
691 */
692 #define SWAP(a, b, count, size, tmp) { \
693 count = size; \
694 do { \
695 tmp = *a; \
696 *a++ = *b; \
697 *b++ = tmp; \
698 } while (--count); \
699 }
700
701 /* Copy one block of size size to another. */
702 #define COPY(a, b, count, size, tmp1, tmp2) { \
703 count = size; \
704 tmp1 = a; \
705 tmp2 = b; \
706 do { \
707 *tmp1++ = *tmp2++; \
708 } while (--count); \
709 }
710
711 /*
712 * Build the list into a heap, where a heap is defined such that for
713 * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N.
714 *
715 * There two cases. If j == nmemb, select largest of Ki and Kj. If
716 * j < nmemb, select largest of Ki, Kj and Kj+1.
717 */
718 #define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \
719 for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
720 par_i = child_i) { \
721 child = base + child_i * size; \
722 if (child_i < nmemb && compar(child, child + size, context) < 0) { \
723 child += size; \
724 ++child_i; \
725 } \
726 par = base + par_i * size; \
727 if (compar(child, par, context) <= 0) \
728 break; \
729 SWAP(par, child, count, size, tmp); \
730 } \
731 }
732
733 /*
734 * Select the top of the heap and 'heapify'. Since by far the most expensive
735 * action is the call to the compar function, a considerable optimization
736 * in the average case can be achieved due to the fact that k, the displaced
737 * elememt, is ususally quite small, so it would be preferable to first
738 * heapify, always maintaining the invariant that the larger child is copied
739 * over its parent's record.
740 *
741 * Then, starting from the *bottom* of the heap, finding k's correct place,
742 * again maintianing the invariant. As a result of the invariant no element
743 * is 'lost' when k is assigned its correct place in the heap.
744 *
745 * The time savings from this optimization are on the order of 15-20% for the
746 * average case. See Knuth, Vol. 3, page 158, problem 18.
747 *
748 * XXX Don't break the #define SELECT line, below. Reiser cpp gets upset.
749 */
750 #define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
751 for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
752 child = base + child_i * size; \
753 if (child_i < nmemb && compar(child, child + size, context) < 0) { \
754 child += size; \
755 ++child_i; \
756 } \
757 par = base + par_i * size; \
758 COPY(par, child, count, size, tmp1, tmp2); \
759 } \
760 for (;;) { \
761 child_i = par_i; \
762 par_i = child_i / 2; \
763 child = base + child_i * size; \
764 par = base + par_i * size; \
765 if (child_i == 1 || compar(k, par, context) < 0) { \
766 COPY(child, k, count, size, tmp1, tmp2); \
767 break; \
768 } \
769 COPY(child, par, count, size, tmp1, tmp2); \
770 } \
771 }
772
773 /*
774 * Heapsort -- Knuth, Vol. 3, page 145. Runs in O (N lg N), both average
775 * and worst. While heapsort is faster than the worst case of quicksort,
776 * the BSD quicksort does median selection so that the chance of finding
777 * a data set that will trigger the worst case is nonexistent. Heapsort's
778 * only advantage over quicksort is that it requires little additional memory.
779 */
780 static int
781 bsd_heapsort(void *vbase, int nmemb, int size, int (*compar)(const void *, const void *, void *), void *context) {
782 register int cnt, i, j, l;
783 register char tmp, *tmp1, *tmp2;
784 char *base, *k, *p, *t;
785
786 if (nmemb <= 1)
787 return (0);
788
789 if (!size) {
790 errno = EINVAL;
791 return (-1);
792 }
793
794 if ((k = malloc(size)) == NULL)
795 return (-1);
796
797 /*
798 * Items are numbered from 1 to nmemb, so offset from size bytes
799 * below the starting address.
800 */
801 base = (char *)vbase - size;
802
803 for (l = nmemb / 2 + 1; --l;)
804 CREATE(l, nmemb, i, j, t, p, size, cnt, tmp);
805
806 /*
807 * For each element of the heap, save the largest element into its
808 * final slot, save the displaced element (k), then recreate the
809 * heap.
810 */
811 while (nmemb > 1) {
812 COPY(k, base + nmemb * size, cnt, size, tmp1, tmp2);
813 COPY(base + nmemb * size, base + size, cnt, size, tmp1, tmp2);
814 --nmemb;
815 SELECT(i, j, nmemb, t, p, size, k, cnt, tmp1, tmp2);
816 }
817 free(k);
818 return (0);
819 }
820
821 #undef SWAP
822 #undef COPY
823 #undef CREATE
824 #undef SELECT
825
826 #endif
827
828 /* ===================================================================== */
829
830 #undef EINVAL
831