2  * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved. 
   8  * This file contains Original Code and/or Modifications of Original Code 
   9  * as defined in and that are subject to the Apple Public Source License 
  10  * Version 2.0 (the 'License'). You may not use this file except in 
  11  * compliance with the License. Please obtain a copy of the License at 
  12  * http://www.opensource.apple.com/apsl/ and read it before using this 
  15  * The Original Code and all software distributed under the License are 
  16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  20  * Please see the License for the specific language governing rights and 
  21  * limitations under the License. 
  23  * @APPLE_LICENSE_HEADER_END@ 
  26         Copyright 1999-2002, Apple, Inc. All rights reserved. 
  27         Responsibility: Christopher Kane 
  30 /* This file contains code copied from the Libc project's files 
  31    qsort.c, merge.c, and heapsort.c, and modified to suit the 
  32    needs of CF, which needs the comparison callback to have an 
  33    additional parameter. The code is collected into this one 
  34    file so that the individual BSD sort routines can remain 
  35    private and static.  We may not keep the bsd functions 
  39 #include <CoreFoundation/CFBase.h> 
  40 #if defined(__MACH__) || defined(__LINUX__) || defined(__FREEBSD__) 
  41 #include <sys/types.h> 
  49 #include "CFInternal.h" 
  51 static int bsd_mergesort(void *base
, int nmemb
, int size
, CFComparatorFunction cmp
, void *context
); 
  53 /* Comparator is passed the address of the values. */ 
  54 void CFMergeSortArray(void *list
, CFIndex count
, CFIndex elementSize
, CFComparatorFunction comparator
, void *context
) { 
  55     bsd_mergesort(list
, count
, elementSize
, comparator
, context
); 
  59 // non-recursing qsort with side index list which is the actual thing sorted 
  60 // XXX kept here for possible later reference 
  61 void cjk_big_isort(char *list
, int size
, int *idx_list
, int (*comp
)(const void *, const void *, void *), void *context
, int lo
, int hi
) { 
  64     for (idx 
= lo 
+ 1; idx 
< hi 
+ 1; idx
++) { 
  67         for (idx2 
= idx
; lo 
< idx2 
&& (comp(v1
, list 
+ idx_list
[idx2 
- 1] * size
, context
) < 0); idx2
--) 
  68             idx_list
[idx2
] = idx_list
[idx2 
- 1]; 
  73 void cjk_big_qsort(char *list
, int size
, int *idx_list
, int (*comp
)(const void *, const void *, void *), void *context
, int lo
, int hi
, int *recurse
) { 
  74     int p
, idx
, mid
, g1
, g2
, g3
, r1
, r2
; 
  77     g2 
= (hi 
+ lo
) / 2; //random() % (hi - lo + 1) + lo; 
  79     v1 
= list 
+ idx_list
[g1
] * size
; 
  80     v2 
= list 
+ idx_list
[g2
] * size
; 
  81     v3 
= list 
+ idx_list
[g3
] * size
; 
  82     if (comp(v1
, v2
, context
) < 0) { 
  83         p 
= comp(v2
, v3
, context
) < 0 ? g2 
: (comp(v1
, v3
, context
) < 0 ? g3 
: g1
); 
  85         p 
= comp(v2
, v3
, context
) > 0 ? g2 
: (comp(v1
, v3
, context
) < 0 ? g1 
: g3
); 
  87     if (lo 
!= p
) {int t 
= idx_list
[lo
]; idx_list
[lo
] = idx_list
[p
]; idx_list
[p
] = t
;} 
  89     v2 
= list 
+ idx_list
[lo
] * size
; 
  90     for (mid 
= lo
, idx 
= lo 
+ 1; idx 
< hi 
+ 1; idx
++) { 
  91         v1 
= list 
+ idx_list
[idx
] * size
; 
  92         r1 
= comp(v1
, v2
, context
); 
  96             if (idx 
!= mid
) {int t 
= idx_list
[idx
]; idx_list
[idx
] = idx_list
[mid
]; idx_list
[mid
] = t
;} 
  99     if (lo 
!= mid
) {int t 
= idx_list
[lo
]; idx_list
[lo
] = idx_list
[mid
]; idx_list
[mid
] = t
;} 
 100     *recurse 
= !r2 
? mid 
: -1; 
 103 void cjk_big_sort(char *list
, int n
, int size
, int (*comp
)(const void *, const void *, void *), void *context
) { 
 104     int len 
= 0, los
[40], his
[40]; 
 105     // 40 is big enough for 2^40 elements, in theory; in practice, much 
 106     // lower but still sufficient; should recurse if the 40 limit is hit 
 107     int *idx_list 
= malloc(sizeof(int) * n
); 
 108     char *tmp_list 
= malloc(n 
* size
); 
 110     for (idx 
= 0; idx 
< n
; idx
++) { 
 122             cjk_big_qsort(list
, size
, idx_list
, comp
, context
, lo
, hi
, &mid
); 
 125                 his
[len
++] = mid 
- 1; 
 130             cjk_big_isort(list
, size
, idx_list
, comp
, context
, lo
, hi
); 
 133     for (idx 
= 0; idx 
< n
; idx
++) 
 134         memmove(tmp_list 
+ idx 
* size
, list 
+ idx_list
[idx
] * size
, size
); 
 135     memmove(list
, tmp_list
, n 
* size
); 
 142 /* stdlib.subproj/qsort.c ============================================== */ 
 145  * Copyright (c) 1992, 1993 
 146  *      The Regents of the University of California.  All rights reserved. 
 148  * Redistribution and use in source and binary forms, with or without 
 149  * modification, are permitted provided that the following conditions 
 151  * 1. Redistributions of source code must retain the above copyright 
 152  *    notice, this list of conditions and the following disclaimer. 
 153  * 2. Redistributions in binary form must reproduce the above copyright 
 154  *    notice, this list of conditions and the following disclaimer in the 
 155  *    documentation and/or other materials provided with the distribution. 
 156  * 3. All advertising materials mentioning features or use of this software 
 157  *    must display the following acknowledgement: 
 158  *      This product includes software developed by the University of 
 159  *      California, Berkeley and its contributors. 
 160  * 4. Neither the name of the University nor the names of its contributors 
 161  *    may be used to endorse or promote products derived from this software 
 162  *    without specific prior written permission. 
 164  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
 165  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
 166  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
 167  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
 168  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
 169  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
 170  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
 171  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
 172  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
 173  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
 178 #define min(a, b)       (a) < (b) ? a : b 
 182  * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". 
 184 #define swapcode(TYPE, parmi, parmj, n) {               \ 
 185         long i = (n) / sizeof (TYPE);                   \ 
 186         register TYPE *pi = (TYPE *) (parmi);           \ 
 187         register TYPE *pj = (TYPE *) (parmj);           \ 
 189                 register TYPE   t = *pi;                \ 
 195 #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \ 
 196         es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1; 
 199 swapfunc(char *a
, char *b
, int n
, int swaptype
) { 
 201                 swapcode(long, a
, b
, n
) 
 203                 swapcode(char, a
, b
, n
) 
 207         if (swaptype == 0) {                            \ 
 208                 long t = *(long *)(a);                  \ 
 209                 *(long *)(a) = *(long *)(b);            \ 
 212                 swapfunc(a, b, es, swaptype) 
 214 #define vecswap(a, b, n)        if ((n) > 0) swapfunc(a, b, n, swaptype) 
 217 med3(char *a
, char *b
, char *c
, int (*cmp
)(const void *, const void *, void *), void *context
) { 
 218         return cmp(a
, b
, context
) < 0 ? 
 219                (cmp(b
, c
, context
) < 0 ? b 
: (cmp(a
, c
, context
) < 0 ? c 
: a 
)) 
 220               :(cmp(b
, c
, context
) > 0 ? b 
: (cmp(a
, c
, context
) < 0 ? a 
: c 
)); 
 223 /* Comparator is passed the address of the values. */ 
 224 void CFQSortArray(void *aVoidStar
, CFIndex n
, CFIndex es
, CFComparatorFunction cmp
, void *context
) 
 226         char *a 
= (char *)aVoidStar
; 
 227         char *pa
, *pb
, *pc
, *pd
, *pl
, *pm
, *pn
; 
 228         int d
, r
, swaptype
, swap_cnt
; 
 230 loop
:   SWAPINIT(a
, es
); 
 233                 for (pm 
= a 
+ es
; pm 
< (char *) a 
+ n 
* es
; pm 
+= es
) 
 234                         for (pl 
= pm
; pl 
> (char *) a 
&& cmp(pl 
- es
, pl
, context
) > 0; 
 239         pm 
= a 
+ (n 
/ 2) * es
; 
 242                 pn 
= a 
+ (n 
- 1) * es
; 
 245                         pl 
= med3(pl
, pl 
+ d
, pl 
+ 2 * d
, cmp
, context
); 
 246                         pm 
= med3(pm 
- d
, pm
, pm 
+ d
, cmp
, context
); 
 247                         pn 
= med3(pn 
- 2 * d
, pn 
- d
, pn
, cmp
, context
); 
 249                 pm 
= med3(pl
, pm
, pn
, cmp
, context
); 
 254         pc 
= pd 
= a 
+ (n 
- 1) * es
; 
 256                 while (pb 
<= pc 
&& (r 
= cmp(pb
, a
, context
)) <= 0) { 
 264                 while (pb 
<= pc 
&& (r 
= cmp(pc
, a
, context
)) >= 0) { 
 279         if (swap_cnt 
== 0) {  /* Switch to insertion sort */ 
 280                 for (pm 
= a 
+ es
; pm 
< (char *) a 
+ n 
* es
; pm 
+= es
) 
 281                         for (pl 
= pm
; pl 
> (char *) a 
&& cmp(pl 
- es
, pl
, context
) > 0;  
 288         r 
= min(pa 
- (char *)a
, pb 
- pa
); 
 289         vecswap(a
, pb 
- r
, r
); 
 290         r 
= min(pd 
- pc
, pn 
- pd 
- es
); 
 291         vecswap(pb
, pn 
- r
, r
); 
 292         if ((r 
= pb 
- pa
) > es
) 
 293                 CFQSortArray(a
, r 
/ es
, es
, cmp
, context
); 
 294         if ((r 
= pd 
- pc
) > es
) {  
 295                 /* Iterate rather than recurse to save stack space */ 
 300 /*              CFQSortArray(pn - r, r / es, es, cmp, context);*/ 
 309 /* stdlib.subproj/mergesort.c ========================================== */ 
 312  * Copyright (c) 1992, 1993 
 313  *      The Regents of the University of California.  All rights reserved. 
 315  * This code is derived from software contributed to Berkeley by 
 318  * Redistribution and use in source and binary forms, with or without 
 319  * modification, are permitted provided that the following conditions 
 321  * 1. Redistributions of source code must retain the above copyright 
 322  *    notice, this list of conditions and the following disclaimer. 
 323  * 2. Redistributions in binary form must reproduce the above copyright 
 324  *    notice, this list of conditions and the following disclaimer in the 
 325  *    documentation and/or other materials provided with the distribution. 
 326  * 3. All advertising materials mentioning features or use of this software 
 327  *    must display the following acknowledgement: 
 328  *      This product includes software developed by the University of 
 329  *      California, Berkeley and its contributors. 
 330  * 4. Neither the name of the University nor the names of its contributors 
 331  *    may be used to endorse or promote products derived from this software 
 332  *    without specific prior written permission. 
 334  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
 335  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
 336  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
 337  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
 338  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
 339  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
 340  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
 341  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
 342  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
 343  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
 349  * Hybrid exponential search/linear search merge sort with hybrid 
 350  * natural/pairwise first pass.  Requires about .3% more comparisons 
 351  * for random data than LSMS with pairwise first pass alone. 
 352  * It works for objects as small as two bytes. 
 356 #define THRESHOLD 16    /* Best choice for natural merge cut-off. */ 
 358 /* #define NATURAL to get hybrid natural merge. 
 359  * (The default is pairwise merging.) 
 363 #define ISIZE sizeof(int) 
 364 #define PSIZE sizeof(uint8_t *) 
 365 #define ICOPY_LIST(src, dst, last)                              \ 
 367         *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE;    \ 
 369 #define ICOPY_ELT(src, dst, i)                                  \ 
 371         *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE;  \ 
 374 #define CCOPY_LIST(src, dst, last)              \ 
 378 #define CCOPY_ELT(src, dst, i)                  \ 
 384  * Find the next possible pointer head.  (Trickery for forcing an array 
 385  * to do double duty as a linked list when objects do not align with word 
 388 /* Assumption: PSIZE is a power of 2. */ 
 389 #define EVAL(p) (uint8_t **)                                            \ 
 391             (((uint8_t *)p + PSIZE - 1 - (uint8_t *) 0) & ~(PSIZE - 1))) 
 394 #define swap(a, b) {                                    \ 
 398                         tmp = *a; *a++ = *s; *s++ = tmp; \ 
 402 #define reverse(bot, top) {                             \ 
 407                         tmp = *bot; *bot++ = *s; *s++ = tmp; \ 
 414  * This is to avoid out-of-bounds addresses in sorting the 
 418 insertionsort(char *a
, int n
, int size
, int (*cmp
)(const void *, const void *, void *), void *context
) { 
 419         char *ai
, *s
, *t
, *u
, tmp
; 
 422         for (ai 
= a
+size
; --n 
>= 1; ai 
+= size
) 
 423                 for (t 
= ai
; t 
> a
; t 
-= size
) { 
 425                         if (cmp(u
, t
, context
) <= 0) 
 432  * Optional hybrid natural/pairwise first pass.  Eats up list1 in runs of 
 433  * increasing order, list2 in a corresponding linked list.  Checks for runs 
 434  * when THRESHOLD/2 pairs compare with same sense.  (Only used when NATURAL 
 435  * is defined.  Otherwise simple pairwise merging is used.) 
 438 setup(char *list1
, char *list2
, int n
, int size
, int (*cmp
)(const void *, const void *, void *), void *context
) { 
 439         int i
, length
, size2
, tmp
, sense
; 
 440         char *f1
, *f2
, *s
, *l2
, *last
, *p2
; 
 444                 insertionsort(list1
, n
, size
, cmp
, context
); 
 445                 *EVAL(list2
) = (uint8_t*) list2 
+ n
*size
; 
 449          * Avoid running pointers out of bounds; limit n to evens 
 453         insertionsort(list1 
+ (n 
- i
) * size
, i
, size
, cmp
, context
); 
 454         last 
= list1 
+ size 
* (n 
- i
); 
 455         *EVAL(list2 
+ (last 
- list1
)) = list2 
+ n 
* size
; 
 460         sense 
= (cmp(f1
, f1 
+ size
, context
) > 0); 
 461         for (; f1 
< last
; sense 
= !sense
) { 
 463                                         /* Find pairs with same sense. */ 
 464                 for (f2 
= f1 
+ size2
; f2 
< last
; f2 
+= size2
) { 
 465                         if ((cmp(f2
, f2
+ size
, context
) > 0) != sense
) 
 469                 if (length 
< THRESHOLD
) {               /* Pairwise merge */ 
 471                                 p2 
= *EVAL(p2
) = f1 
+ size2 
- list1 
+ list2
; 
 473                                         swap (f1
, f1 
+ size
); 
 474                         } while ((f1 
+= size2
) < f2
); 
 475                 } else {                                /* Natural merge */ 
 477                         for (f2 
= f1 
+ size2
; f2 
< l2
; f2 
+= size2
) { 
 478                                 if ((cmp(f2
-size
, f2
, context
) > 0) != sense
) { 
 479                                         p2 
= *EVAL(p2
) = f2 
- list1 
+ list2
; 
 481                                                 reverse(f1
, f2
-size
); 
 486                                 reverse (f1
, f2
-size
); 
 488                         if (f2 
< last 
|| cmp(f2 
- size
, f2
, context
) > 0) 
 489                                 p2 
= *EVAL(p2
) = f2 
- list1 
+ list2
; 
 491                                 p2 
= *EVAL(p2
) = list2 
+ n
*size
; 
 494 #else           /* pairwise merge only. */ 
 495         for (f1 
= list1
, p2 
= list2
; f1 
< last
; f1 
+= size2
) { 
 496                 p2 
= *EVAL(p2
) = p2 
+ size2
; 
 497                 if (cmp (f1
, f1 
+ size
, context
) > 0) 
 504  * Arguments are as for qsort. 
 507 bsd_mergesort(void *base
, int nmemb
, int size
, CFComparatorFunction cmp
, void *context
) { 
 508         register int i
, sense
; 
 510         register uint8_t *f1
, *f2
, *t
, *b
, *tp2
, *q
, *l1
, *l2
; 
 511         uint8_t *list2
, *list1
, *p2
, *p
, *last
, **p1
; 
 513         if (size 
< (int)PSIZE 
/ 2) {            /* Pointers must fit into 2 * size. */ 
 520          * Stupid subtraction for the Cray. 
 523         if (!(size 
% ISIZE
) && !(((char *)base 
- (char *)0) % ISIZE
)) 
 526         if ((list2 
= malloc(nmemb 
* size 
+ PSIZE
)) == NULL
) 
 530         setup(list1
, list2
, nmemb
, size
, cmp
, context
); 
 531         last 
= list2 
+ nmemb 
* size
; 
 533         while (*EVAL(list2
) != last
) { 
 536             for (tp2 
= p2 
= list2
; p2 
!= last
; p1 
= EVAL(l2
)) { 
 539                 f2 
= l1 
= list1 
+ (p2 
- list2
); 
 542                 l2 
= list1 
+ (p2 
- list2
); 
 543                 while (f1 
< l1 
&& f2 
< l2
) { 
 544                         if ((*cmp
)(f1
, f2
, context
) <= 0) { 
 553                         if (!big
) {     /* here i = 0 */ 
 554 /* LINEAR: */                           while ((b 
+= size
) < t 
&& cmp(q
, b
, context
) >sense
) 
 560 EXPONENTIAL
:                    for (i 
= size
; ; i 
<<= 1) 
 561                                         if ((p 
= (b 
+ i
)) >= t
) { 
 562                                                 if ((p 
= t 
- size
) > b 
&& 
 563                                                     (*cmp
)(q
, p
, context
) <= sense
) 
 568                                         } else if ((*cmp
)(q
, p
, context
) <= sense
) { 
 575 /* SLOWCASE: */                 while (t 
> b
+size
) { 
 576                                         i 
= (((t 
- b
) / size
) >> 1) * size
; 
 577                                         if ((*cmp
)(q
, p 
= b 
+ i
, context
) <= sense
) 
 583 FASTCASE
:                       while (i 
> size
) 
 585                                                 p 
= b 
+ (i 
>>= 1), context
) <= sense
) 
 594                                         ICOPY_LIST(f2
, tp2
, b
); 
 595                                         ICOPY_ELT(f1
, tp2
, i
); 
 597                                         CCOPY_LIST(f2
, tp2
, b
); 
 598                                         CCOPY_ELT(f1
, tp2
, i
); 
 602                                         ICOPY_LIST(f1
, tp2
, b
); 
 603                                         ICOPY_ELT(f2
, tp2
, i
); 
 605                                         CCOPY_LIST(f1
, tp2
, b
); 
 606                                         CCOPY_ELT(f2
, tp2
, i
); 
 612                                 ICOPY_LIST(f2
, tp2
, l2
); 
 614                                 CCOPY_LIST(f2
, tp2
, l2
); 
 615                 } else if (f1 
< l1
) { 
 617                                 ICOPY_LIST(f1
, tp2
, l1
); 
 619                                 CCOPY_LIST(f1
, tp2
, l1
); 
 623             tp2 
= list1
;        /* swap list1, list2 */ 
 626             last 
= list2 
+ nmemb
*size
; 
 629                 memmove(list2
, list1
, nmemb
*size
); 
 649 /* stdlib.subproj/heapsort.c =========================================== */ 
 652  * Copyright (c) 1991, 1993 
 653  *      The Regents of the University of California.  All rights reserved. 
 655  * This code is derived from software contributed to Berkeley by 
 656  * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias. 
 658  * Redistribution and use in source and binary forms, with or without 
 659  * modification, are permitted provided that the following conditions 
 661  * 1. Redistributions of source code must retain the above copyright 
 662  *    notice, this list of conditions and the following disclaimer. 
 663  * 2. Redistributions in binary form must reproduce the above copyright 
 664  *    notice, this list of conditions and the following disclaimer in the 
 665  *    documentation and/or other materials provided with the distribution. 
 666  * 3. All advertising materials mentioning features or use of this software 
 667  *    must display the following acknowledgement: 
 668  *      This product includes software developed by the University of 
 669  *      California, Berkeley and its contributors. 
 670  * 4. Neither the name of the University nor the names of its contributors 
 671  *    may be used to endorse or promote products derived from this software 
 672  *    without specific prior written permission. 
 674  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
 675  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
 676  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
 677  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
 678  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
 679  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
 680  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
 681  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
 682  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
 683  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
 688  * Swap two areas of size number of bytes.  Although qsort(3) permits random 
 689  * blocks of memory to be sorted, sorting pointers is almost certainly the 
 690  * common case (and, were it not, could easily be made so).  Regardless, it 
 691  * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer 
 692  * arithmetic gets lost in the time required for comparison function calls. 
 694 #define SWAP(a, b, count, size, tmp) { \ 
 703 /* Copy one block of size size to another. */ 
 704 #define COPY(a, b, count, size, tmp1, tmp2) { \ 
 714  * Build the list into a heap, where a heap is defined such that for 
 715  * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N. 
 717  * There two cases.  If j == nmemb, select largest of Ki and Kj.  If 
 718  * j < nmemb, select largest of Ki, Kj and Kj+1. 
 720 #define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \ 
 721         for (par_i = initval; (child_i = par_i * 2) <= nmemb; \ 
 723                 child = base + child_i * size; \ 
 724                 if (child_i < nmemb && compar(child, child + size, context) < 0) { \ 
 728                 par = base + par_i * size; \ 
 729                 if (compar(child, par, context) <= 0) \ 
 731                 SWAP(par, child, count, size, tmp); \ 
 736  * Select the top of the heap and 'heapify'.  Since by far the most expensive 
 737  * action is the call to the compar function, a considerable optimization 
 738  * in the average case can be achieved due to the fact that k, the displaced 
 739  * elememt, is ususally quite small, so it would be preferable to first 
 740  * heapify, always maintaining the invariant that the larger child is copied 
 741  * over its parent's record. 
 743  * Then, starting from the *bottom* of the heap, finding k's correct place, 
 744  * again maintianing the invariant.  As a result of the invariant no element 
 745  * is 'lost' when k is assigned its correct place in the heap. 
 747  * The time savings from this optimization are on the order of 15-20% for the 
 748  * average case. See Knuth, Vol. 3, page 158, problem 18. 
 750  * XXX Don't break the #define SELECT line, below.  Reiser cpp gets upset. 
 752 #define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \ 
 753         for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \ 
 754                 child = base + child_i * size; \ 
 755                 if (child_i < nmemb && compar(child, child + size, context) < 0) { \ 
 759                 par = base + par_i * size; \ 
 760                 COPY(par, child, count, size, tmp1, tmp2); \ 
 764                 par_i = child_i / 2; \ 
 765                 child = base + child_i * size; \ 
 766                 par = base + par_i * size; \ 
 767                 if (child_i == 1 || compar(k, par, context) < 0) { \ 
 768                         COPY(child, k, count, size, tmp1, tmp2); \ 
 771                 COPY(child, par, count, size, tmp1, tmp2); \ 
 776  * Heapsort -- Knuth, Vol. 3, page 145.  Runs in O (N lg N), both average 
 777  * and worst.  While heapsort is faster than the worst case of quicksort, 
 778  * the BSD quicksort does median selection so that the chance of finding 
 779  * a data set that will trigger the worst case is nonexistent.  Heapsort's 
 780  * only advantage over quicksort is that it requires little additional memory. 
 783 bsd_heapsort(void *vbase
, int nmemb
, int size
, int (*compar
)(const void *, const void *, void *), void *context
) { 
 784         register int cnt
, i
, j
, l
; 
 785         register char tmp
, *tmp1
, *tmp2
; 
 786         char *base
, *k
, *p
, *t
; 
 796         if ((k 
= malloc(size
)) == NULL
) 
 800          * Items are numbered from 1 to nmemb, so offset from size bytes 
 801          * below the starting address. 
 803         base 
= (char *)vbase 
- size
; 
 805         for (l 
= nmemb 
/ 2 + 1; --l
;) 
 806                 CREATE(l
, nmemb
, i
, j
, t
, p
, size
, cnt
, tmp
); 
 809          * For each element of the heap, save the largest element into its 
 810          * final slot, save the displaced element (k), then recreate the 
 814                 COPY(k
, base 
+ nmemb 
* size
, cnt
, size
, tmp1
, tmp2
); 
 815                 COPY(base 
+ nmemb 
* size
, base 
+ size
, cnt
, size
, tmp1
, tmp2
); 
 817                 SELECT(i
, j
, nmemb
, t
, p
, size
, k
, cnt
, tmp1
, tmp2
); 
 830 /* ===================================================================== */