2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 Copyright 1999-2002, Apple, Inc. All rights reserved.
25 Responsibility: Christopher Kane
28 /* This file contains code copied from the Libc project's files
29 qsort.c, merge.c, and heapsort.c, and modified to suit the
30 needs of CF, which needs the comparison callback to have an
31 additional parameter. The code is collected into this one
32 file so that the individual BSD sort routines can remain
33 private and static. We may not keep the bsd functions
37 #include <CoreFoundation/CFBase.h>
38 #if defined(__MACH__) || defined(__LINUX__) || defined(__FREEBSD__)
39 #include <sys/types.h>
47 #include "CFInternal.h"
49 static int bsd_mergesort(void *base
, int nmemb
, int size
, CFComparatorFunction cmp
, void *context
);
51 /* Comparator is passed the address of the values. */
52 void CFMergeSortArray(void *list
, CFIndex count
, CFIndex elementSize
, CFComparatorFunction comparator
, void *context
) {
53 bsd_mergesort(list
, count
, elementSize
, comparator
, context
);
57 // non-recursing qsort with side index list which is the actual thing sorted
58 // XXX kept here for possible later reference
59 void cjk_big_isort(char *list
, int size
, int *idx_list
, int (*comp
)(const void *, const void *, void *), void *context
, int lo
, int hi
) {
62 for (idx
= lo
+ 1; idx
< hi
+ 1; idx
++) {
65 for (idx2
= idx
; lo
< idx2
&& (comp(v1
, list
+ idx_list
[idx2
- 1] * size
, context
) < 0); idx2
--)
66 idx_list
[idx2
] = idx_list
[idx2
- 1];
71 void cjk_big_qsort(char *list
, int size
, int *idx_list
, int (*comp
)(const void *, const void *, void *), void *context
, int lo
, int hi
, int *recurse
) {
72 int p
, idx
, mid
, g1
, g2
, g3
, r1
, r2
;
75 g2
= (hi
+ lo
) / 2; //random() % (hi - lo + 1) + lo;
77 v1
= list
+ idx_list
[g1
] * size
;
78 v2
= list
+ idx_list
[g2
] * size
;
79 v3
= list
+ idx_list
[g3
] * size
;
80 if (comp(v1
, v2
, context
) < 0) {
81 p
= comp(v2
, v3
, context
) < 0 ? g2
: (comp(v1
, v3
, context
) < 0 ? g3
: g1
);
83 p
= comp(v2
, v3
, context
) > 0 ? g2
: (comp(v1
, v3
, context
) < 0 ? g1
: g3
);
85 if (lo
!= p
) {int t
= idx_list
[lo
]; idx_list
[lo
] = idx_list
[p
]; idx_list
[p
] = t
;}
87 v2
= list
+ idx_list
[lo
] * size
;
88 for (mid
= lo
, idx
= lo
+ 1; idx
< hi
+ 1; idx
++) {
89 v1
= list
+ idx_list
[idx
] * size
;
90 r1
= comp(v1
, v2
, context
);
94 if (idx
!= mid
) {int t
= idx_list
[idx
]; idx_list
[idx
] = idx_list
[mid
]; idx_list
[mid
] = t
;}
97 if (lo
!= mid
) {int t
= idx_list
[lo
]; idx_list
[lo
] = idx_list
[mid
]; idx_list
[mid
] = t
;}
98 *recurse
= !r2
? mid
: -1;
101 void cjk_big_sort(char *list
, int n
, int size
, int (*comp
)(const void *, const void *, void *), void *context
) {
102 int len
= 0, los
[40], his
[40];
103 // 40 is big enough for 2^40 elements, in theory; in practice, much
104 // lower but still sufficient; should recurse if the 40 limit is hit
105 int *idx_list
= malloc(sizeof(int) * n
);
106 char *tmp_list
= malloc(n
* size
);
108 for (idx
= 0; idx
< n
; idx
++) {
120 cjk_big_qsort(list
, size
, idx_list
, comp
, context
, lo
, hi
, &mid
);
123 his
[len
++] = mid
- 1;
128 cjk_big_isort(list
, size
, idx_list
, comp
, context
, lo
, hi
);
131 for (idx
= 0; idx
< n
; idx
++)
132 memmove(tmp_list
+ idx
* size
, list
+ idx_list
[idx
] * size
, size
);
133 memmove(list
, tmp_list
, n
* size
);
140 /* stdlib.subproj/qsort.c ============================================== */
143 * Copyright (c) 1992, 1993
144 * The Regents of the University of California. All rights reserved.
146 * Redistribution and use in source and binary forms, with or without
147 * modification, are permitted provided that the following conditions
149 * 1. Redistributions of source code must retain the above copyright
150 * notice, this list of conditions and the following disclaimer.
151 * 2. Redistributions in binary form must reproduce the above copyright
152 * notice, this list of conditions and the following disclaimer in the
153 * documentation and/or other materials provided with the distribution.
154 * 3. All advertising materials mentioning features or use of this software
155 * must display the following acknowledgement:
156 * This product includes software developed by the University of
157 * California, Berkeley and its contributors.
158 * 4. Neither the name of the University nor the names of its contributors
159 * may be used to endorse or promote products derived from this software
160 * without specific prior written permission.
162 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
163 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
164 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
165 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
166 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
167 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
168 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
169 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
170 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
171 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
176 #define min(a, b) (a) < (b) ? a : b
180 * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
182 #define swapcode(TYPE, parmi, parmj, n) { \
183 long i = (n) / sizeof (TYPE); \
184 register TYPE *pi = (TYPE *) (parmi); \
185 register TYPE *pj = (TYPE *) (parmj); \
187 register TYPE t = *pi; \
193 #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
194 es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
197 swapfunc(char *a
, char *b
, int n
, int swaptype
) {
199 swapcode(long, a
, b
, n
)
201 swapcode(char, a
, b
, n
)
205 if (swaptype == 0) { \
206 long t = *(long *)(a); \
207 *(long *)(a) = *(long *)(b); \
210 swapfunc(a, b, es, swaptype)
212 #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
215 med3(char *a
, char *b
, char *c
, int (*cmp
)(const void *, const void *, void *), void *context
) {
216 return cmp(a
, b
, context
) < 0 ?
217 (cmp(b
, c
, context
) < 0 ? b
: (cmp(a
, c
, context
) < 0 ? c
: a
))
218 :(cmp(b
, c
, context
) > 0 ? b
: (cmp(a
, c
, context
) < 0 ? a
: c
));
221 /* Comparator is passed the address of the values. */
222 void CFQSortArray(void *aVoidStar
, CFIndex n
, CFIndex es
, CFComparatorFunction cmp
, void *context
)
224 char *a
= (char *)aVoidStar
;
225 char *pa
, *pb
, *pc
, *pd
, *pl
, *pm
, *pn
;
226 int d
, r
, swaptype
, swap_cnt
;
228 loop
: SWAPINIT(a
, es
);
231 for (pm
= a
+ es
; pm
< (char *) a
+ n
* es
; pm
+= es
)
232 for (pl
= pm
; pl
> (char *) a
&& cmp(pl
- es
, pl
, context
) > 0;
237 pm
= a
+ (n
/ 2) * es
;
240 pn
= a
+ (n
- 1) * es
;
243 pl
= med3(pl
, pl
+ d
, pl
+ 2 * d
, cmp
, context
);
244 pm
= med3(pm
- d
, pm
, pm
+ d
, cmp
, context
);
245 pn
= med3(pn
- 2 * d
, pn
- d
, pn
, cmp
, context
);
247 pm
= med3(pl
, pm
, pn
, cmp
, context
);
252 pc
= pd
= a
+ (n
- 1) * es
;
254 while (pb
<= pc
&& (r
= cmp(pb
, a
, context
)) <= 0) {
262 while (pb
<= pc
&& (r
= cmp(pc
, a
, context
)) >= 0) {
277 if (swap_cnt
== 0) { /* Switch to insertion sort */
278 for (pm
= a
+ es
; pm
< (char *) a
+ n
* es
; pm
+= es
)
279 for (pl
= pm
; pl
> (char *) a
&& cmp(pl
- es
, pl
, context
) > 0;
286 r
= min(pa
- (char *)a
, pb
- pa
);
287 vecswap(a
, pb
- r
, r
);
288 r
= min(pd
- pc
, pn
- pd
- es
);
289 vecswap(pb
, pn
- r
, r
);
290 if ((r
= pb
- pa
) > es
)
291 CFQSortArray(a
, r
/ es
, es
, cmp
, context
);
292 if ((r
= pd
- pc
) > es
) {
293 /* Iterate rather than recurse to save stack space */
298 /* CFQSortArray(pn - r, r / es, es, cmp, context);*/
307 /* stdlib.subproj/mergesort.c ========================================== */
310 * Copyright (c) 1992, 1993
311 * The Regents of the University of California. All rights reserved.
313 * This code is derived from software contributed to Berkeley by
316 * Redistribution and use in source and binary forms, with or without
317 * modification, are permitted provided that the following conditions
319 * 1. Redistributions of source code must retain the above copyright
320 * notice, this list of conditions and the following disclaimer.
321 * 2. Redistributions in binary form must reproduce the above copyright
322 * notice, this list of conditions and the following disclaimer in the
323 * documentation and/or other materials provided with the distribution.
324 * 3. All advertising materials mentioning features or use of this software
325 * must display the following acknowledgement:
326 * This product includes software developed by the University of
327 * California, Berkeley and its contributors.
328 * 4. Neither the name of the University nor the names of its contributors
329 * may be used to endorse or promote products derived from this software
330 * without specific prior written permission.
332 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
333 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
334 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
335 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
336 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
337 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
338 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
339 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
340 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
341 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
347 * Hybrid exponential search/linear search merge sort with hybrid
348 * natural/pairwise first pass. Requires about .3% more comparisons
349 * for random data than LSMS with pairwise first pass alone.
350 * It works for objects as small as two bytes.
354 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
356 /* #define NATURAL to get hybrid natural merge.
357 * (The default is pairwise merging.)
361 #define ISIZE sizeof(int)
362 #define PSIZE sizeof(uint8_t *)
363 #define ICOPY_LIST(src, dst, last) \
365 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
367 #define ICOPY_ELT(src, dst, i) \
369 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
372 #define CCOPY_LIST(src, dst, last) \
376 #define CCOPY_ELT(src, dst, i) \
382 * Find the next possible pointer head. (Trickery for forcing an array
383 * to do double duty as a linked list when objects do not align with word
386 /* Assumption: PSIZE is a power of 2. */
387 #define EVAL(p) (uint8_t **) \
389 (((uint8_t *)p + PSIZE - 1 - (uint8_t *) 0) & ~(PSIZE - 1)))
392 #define swap(a, b) { \
396 tmp = *a; *a++ = *s; *s++ = tmp; \
400 #define reverse(bot, top) { \
405 tmp = *bot; *bot++ = *s; *s++ = tmp; \
412 * This is to avoid out-of-bounds addresses in sorting the
416 insertionsort(char *a
, int n
, int size
, int (*cmp
)(const void *, const void *, void *), void *context
) {
417 char *ai
, *s
, *t
, *u
, tmp
;
420 for (ai
= a
+size
; --n
>= 1; ai
+= size
)
421 for (t
= ai
; t
> a
; t
-= size
) {
423 if (cmp(u
, t
, context
) <= 0)
430 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
431 * increasing order, list2 in a corresponding linked list. Checks for runs
432 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
433 * is defined. Otherwise simple pairwise merging is used.)
436 setup(char *list1
, char *list2
, int n
, int size
, int (*cmp
)(const void *, const void *, void *), void *context
) {
437 int i
, length
, size2
, tmp
, sense
;
438 char *f1
, *f2
, *s
, *l2
, *last
, *p2
;
442 insertionsort(list1
, n
, size
, cmp
, context
);
443 *EVAL(list2
) = (uint8_t*) list2
+ n
*size
;
447 * Avoid running pointers out of bounds; limit n to evens
451 insertionsort(list1
+ (n
- i
) * size
, i
, size
, cmp
, context
);
452 last
= list1
+ size
* (n
- i
);
453 *EVAL(list2
+ (last
- list1
)) = list2
+ n
* size
;
458 sense
= (cmp(f1
, f1
+ size
, context
) > 0);
459 for (; f1
< last
; sense
= !sense
) {
461 /* Find pairs with same sense. */
462 for (f2
= f1
+ size2
; f2
< last
; f2
+= size2
) {
463 if ((cmp(f2
, f2
+ size
, context
) > 0) != sense
)
467 if (length
< THRESHOLD
) { /* Pairwise merge */
469 p2
= *EVAL(p2
) = f1
+ size2
- list1
+ list2
;
471 swap (f1
, f1
+ size
);
472 } while ((f1
+= size2
) < f2
);
473 } else { /* Natural merge */
475 for (f2
= f1
+ size2
; f2
< l2
; f2
+= size2
) {
476 if ((cmp(f2
-size
, f2
, context
) > 0) != sense
) {
477 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
479 reverse(f1
, f2
-size
);
484 reverse (f1
, f2
-size
);
486 if (f2
< last
|| cmp(f2
- size
, f2
, context
) > 0)
487 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
489 p2
= *EVAL(p2
) = list2
+ n
*size
;
492 #else /* pairwise merge only. */
493 for (f1
= list1
, p2
= list2
; f1
< last
; f1
+= size2
) {
494 p2
= *EVAL(p2
) = p2
+ size2
;
495 if (cmp (f1
, f1
+ size
, context
) > 0)
502 * Arguments are as for qsort.
505 bsd_mergesort(void *base
, int nmemb
, int size
, CFComparatorFunction cmp
, void *context
) {
506 register int i
, sense
;
508 register uint8_t *f1
, *f2
, *t
, *b
, *tp2
, *q
, *l1
, *l2
;
509 uint8_t *list2
, *list1
, *p2
, *p
, *last
, **p1
;
511 if (size
< (int)PSIZE
/ 2) { /* Pointers must fit into 2 * size. */
518 * Stupid subtraction for the Cray.
521 if (!(size
% ISIZE
) && !(((char *)base
- (char *)0) % ISIZE
))
524 if ((list2
= malloc(nmemb
* size
+ PSIZE
)) == NULL
)
528 setup(list1
, list2
, nmemb
, size
, cmp
, context
);
529 last
= list2
+ nmemb
* size
;
531 while (*EVAL(list2
) != last
) {
534 for (tp2
= p2
= list2
; p2
!= last
; p1
= EVAL(l2
)) {
537 f2
= l1
= list1
+ (p2
- list2
);
540 l2
= list1
+ (p2
- list2
);
541 while (f1
< l1
&& f2
< l2
) {
542 if ((*cmp
)(f1
, f2
, context
) <= 0) {
551 if (!big
) { /* here i = 0 */
552 /* LINEAR: */ while ((b
+= size
) < t
&& cmp(q
, b
, context
) >sense
)
558 EXPONENTIAL
: for (i
= size
; ; i
<<= 1)
559 if ((p
= (b
+ i
)) >= t
) {
560 if ((p
= t
- size
) > b
&&
561 (*cmp
)(q
, p
, context
) <= sense
)
566 } else if ((*cmp
)(q
, p
, context
) <= sense
) {
573 /* SLOWCASE: */ while (t
> b
+size
) {
574 i
= (((t
- b
) / size
) >> 1) * size
;
575 if ((*cmp
)(q
, p
= b
+ i
, context
) <= sense
)
581 FASTCASE
: while (i
> size
)
583 p
= b
+ (i
>>= 1), context
) <= sense
)
592 ICOPY_LIST(f2
, tp2
, b
);
593 ICOPY_ELT(f1
, tp2
, i
);
595 CCOPY_LIST(f2
, tp2
, b
);
596 CCOPY_ELT(f1
, tp2
, i
);
600 ICOPY_LIST(f1
, tp2
, b
);
601 ICOPY_ELT(f2
, tp2
, i
);
603 CCOPY_LIST(f1
, tp2
, b
);
604 CCOPY_ELT(f2
, tp2
, i
);
610 ICOPY_LIST(f2
, tp2
, l2
);
612 CCOPY_LIST(f2
, tp2
, l2
);
613 } else if (f1
< l1
) {
615 ICOPY_LIST(f1
, tp2
, l1
);
617 CCOPY_LIST(f1
, tp2
, l1
);
621 tp2
= list1
; /* swap list1, list2 */
624 last
= list2
+ nmemb
*size
;
627 memmove(list2
, list1
, nmemb
*size
);
647 /* stdlib.subproj/heapsort.c =========================================== */
650 * Copyright (c) 1991, 1993
651 * The Regents of the University of California. All rights reserved.
653 * This code is derived from software contributed to Berkeley by
654 * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias.
656 * Redistribution and use in source and binary forms, with or without
657 * modification, are permitted provided that the following conditions
659 * 1. Redistributions of source code must retain the above copyright
660 * notice, this list of conditions and the following disclaimer.
661 * 2. Redistributions in binary form must reproduce the above copyright
662 * notice, this list of conditions and the following disclaimer in the
663 * documentation and/or other materials provided with the distribution.
664 * 3. All advertising materials mentioning features or use of this software
665 * must display the following acknowledgement:
666 * This product includes software developed by the University of
667 * California, Berkeley and its contributors.
668 * 4. Neither the name of the University nor the names of its contributors
669 * may be used to endorse or promote products derived from this software
670 * without specific prior written permission.
672 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
673 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
674 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
675 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
676 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
677 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
678 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
679 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
680 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
681 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
686 * Swap two areas of size number of bytes. Although qsort(3) permits random
687 * blocks of memory to be sorted, sorting pointers is almost certainly the
688 * common case (and, were it not, could easily be made so). Regardless, it
689 * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer
690 * arithmetic gets lost in the time required for comparison function calls.
692 #define SWAP(a, b, count, size, tmp) { \
701 /* Copy one block of size size to another. */
702 #define COPY(a, b, count, size, tmp1, tmp2) { \
712 * Build the list into a heap, where a heap is defined such that for
713 * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N.
715 * There two cases. If j == nmemb, select largest of Ki and Kj. If
716 * j < nmemb, select largest of Ki, Kj and Kj+1.
718 #define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \
719 for (par_i = initval; (child_i = par_i * 2) <= nmemb; \
721 child = base + child_i * size; \
722 if (child_i < nmemb && compar(child, child + size, context) < 0) { \
726 par = base + par_i * size; \
727 if (compar(child, par, context) <= 0) \
729 SWAP(par, child, count, size, tmp); \
734 * Select the top of the heap and 'heapify'. Since by far the most expensive
735 * action is the call to the compar function, a considerable optimization
736 * in the average case can be achieved due to the fact that k, the displaced
737 * elememt, is ususally quite small, so it would be preferable to first
738 * heapify, always maintaining the invariant that the larger child is copied
739 * over its parent's record.
741 * Then, starting from the *bottom* of the heap, finding k's correct place,
742 * again maintianing the invariant. As a result of the invariant no element
743 * is 'lost' when k is assigned its correct place in the heap.
745 * The time savings from this optimization are on the order of 15-20% for the
746 * average case. See Knuth, Vol. 3, page 158, problem 18.
748 * XXX Don't break the #define SELECT line, below. Reiser cpp gets upset.
750 #define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \
751 for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \
752 child = base + child_i * size; \
753 if (child_i < nmemb && compar(child, child + size, context) < 0) { \
757 par = base + par_i * size; \
758 COPY(par, child, count, size, tmp1, tmp2); \
762 par_i = child_i / 2; \
763 child = base + child_i * size; \
764 par = base + par_i * size; \
765 if (child_i == 1 || compar(k, par, context) < 0) { \
766 COPY(child, k, count, size, tmp1, tmp2); \
769 COPY(child, par, count, size, tmp1, tmp2); \
774 * Heapsort -- Knuth, Vol. 3, page 145. Runs in O (N lg N), both average
775 * and worst. While heapsort is faster than the worst case of quicksort,
776 * the BSD quicksort does median selection so that the chance of finding
777 * a data set that will trigger the worst case is nonexistent. Heapsort's
778 * only advantage over quicksort is that it requires little additional memory.
781 bsd_heapsort(void *vbase
, int nmemb
, int size
, int (*compar
)(const void *, const void *, void *), void *context
) {
782 register int cnt
, i
, j
, l
;
783 register char tmp
, *tmp1
, *tmp2
;
784 char *base
, *k
, *p
, *t
;
794 if ((k
= malloc(size
)) == NULL
)
798 * Items are numbered from 1 to nmemb, so offset from size bytes
799 * below the starting address.
801 base
= (char *)vbase
- size
;
803 for (l
= nmemb
/ 2 + 1; --l
;)
804 CREATE(l
, nmemb
, i
, j
, t
, p
, size
, cnt
, tmp
);
807 * For each element of the heap, save the largest element into its
808 * final slot, save the displaced element (k), then recreate the
812 COPY(k
, base
+ nmemb
* size
, cnt
, size
, tmp1
, tmp2
);
813 COPY(base
+ nmemb
* size
, base
+ size
, cnt
, size
, tmp1
, tmp2
);
815 SELECT(i
, j
, nmemb
, t
, p
, size
, k
, cnt
, tmp1
, tmp2
);
828 /* ===================================================================== */