2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 Copyright 1999-2007, Apple, Inc. All rights reserved.
25 Responsibility: Christopher Kane
28 /* This file contains code copied from the Libc project's files
29 qsort.c and merge.c, and modified to suit the
30 needs of CF, which needs the comparison callback to have an
31 additional parameter. The code is collected into this one
32 file so that the individual BSD sort routines can remain
36 #include <CoreFoundation/CFBase.h>
37 #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_LINUX || DEPLOYMENT_TARGET_FREEBSD
38 #include <sys/types.h>
42 #include "CFInternal.h"
45 /* stdlib.subproj/qsort.c ============================================== */
48 * Copyright (c) 1992, 1993
49 * The Regents of the University of California. All rights reserved.
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. All advertising materials mentioning features or use of this software
60 * must display the following acknowledgement:
61 * This product includes software developed by the University of
62 * California, Berkeley and its contributors.
63 * 4. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 #if defined(LIBC_SCCS) && !defined(lint)
82 static char sccsid
[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93";
83 #endif /* LIBC_SCCS and not lint */
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD: src/lib/libc/stdlib/qsort.c,v 1.12 2002/09/10 02:04:49 wollman Exp $");
91 typedef int cmp_t(void *, const void *, const void *);
93 typedef CFComparisonResult
cmp_t(const void *, const void *, void *);
95 static inline char *med3(char *, char *, char *, cmp_t
*, void *);
96 static inline void swapfunc(char *, char *, long, long);
99 #define min(a, b) (a) < (b) ? a : b
102 * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
104 #define swapcode(TYPE, parmi, parmj, n) { \
105 long i = (n) / sizeof (TYPE); \
106 TYPE *pi = (TYPE *) (parmi); \
107 TYPE *pj = (TYPE *) (parmj); \
115 #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
116 es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1;
119 swapfunc(char *a
, char *b
, long n
, long swaptype
)
122 swapcode(long, a
, b
, n
)
124 swapcode(char, a
, b
, n
)
128 if (swaptype == 0) { \
129 long t = *(long *)(a); \
130 *(long *)(a) = *(long *)(b); \
133 swapfunc(a, b, es, swaptype)
135 #define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
138 #define CMP(t, x, y) (cmp((t), (x), (y)))
140 #define CMP(t, x, y) (cmp((x), (y), (t)))
144 med3(char *a
, char *b
, char *c
, cmp_t
*cmp
, void *thunk
)
146 return CMP(thunk
, a
, b
) < 0 ?
147 (CMP(thunk
, b
, c
) < 0 ? b
: (CMP(thunk
, a
, c
) < 0 ? c
: a
))
148 :(CMP(thunk
, b
, c
) > 0 ? b
: (CMP(thunk
, a
, c
) < 0 ? a
: c
));
153 qsort_r(void *a
, size_t n
, size_t es
, void *thunk
, cmp_t
*cmp
)
156 bsd_qsort(void *a
, size_t n
, size_t es
, cmp_t
*cmp
, void *thunk
)
159 char *pa
, *pb
, *pc
, *pd
, *pl
, *pm
, *pn
;
160 long d
, r
, swaptype
, swap_cnt
;
162 loop
: SWAPINIT(a
, es
);
165 for (pm
= (char *)a
+ es
; pm
< (char *)a
+ n
* es
; pm
+= es
)
167 pl
> (char *)a
&& CMP(thunk
, pl
- es
, pl
) > 0;
172 pm
= (char *)a
+ (n
/ 2) * es
;
175 pn
= (char *)a
+ (n
- 1) * es
;
178 pl
= med3(pl
, pl
+ d
, pl
+ 2 * d
, cmp
, thunk
);
179 pm
= med3(pm
- d
, pm
, pm
+ d
, cmp
, thunk
);
180 pn
= med3(pn
- 2 * d
, pn
- d
, pn
, cmp
, thunk
);
182 pm
= med3(pl
, pm
, pn
, cmp
, thunk
);
185 pa
= pb
= (char *)a
+ es
;
187 pc
= pd
= (char *)a
+ (n
- 1) * es
;
189 while (pb
<= pc
&& (r
= CMP(thunk
, pb
, a
)) <= 0) {
197 while (pb
<= pc
&& (r
= CMP(thunk
, pc
, a
)) >= 0) {
212 if (swap_cnt
== 0) { /* Switch to insertion sort */
213 for (pm
= (char *)a
+ es
; pm
< (char *)a
+ n
* es
; pm
+= es
)
215 pl
> (char *)a
&& CMP(thunk
, pl
- es
, pl
) > 0;
221 pn
= (char *)a
+ n
* es
;
222 r
= min(pa
- (char *)a
, pb
- pa
);
223 vecswap(a
, pb
- r
, r
);
224 r
= min(pd
- pc
, pn
- pd
- es
);
225 vecswap(pb
, pn
- r
, r
);
226 if ((r
= pb
- pa
) > es
)
228 qsort_r(a
, r
/ es
, es
, thunk
, cmp
);
230 bsd_qsort(a
, r
/ es
, es
, cmp
, thunk
);
232 if ((r
= pd
- pc
) > es
) {
233 /* Iterate rather than recurse to save stack space */
238 /* qsort(pn - r, r / es, es, cmp);*/
242 /* And now for something not so completely different, a copy/paste version that uses write-barriers so as to notify GC as necessary of changes */
243 #define ASSIGN __CFObjCStrongAssign
244 //#define ASSIGN objc_assign_strongCast
246 #define swapcode_wb(TYPE, parmi, parmj, n) { \
247 long i = (n) / sizeof (TYPE); \
248 TYPE *pi = (TYPE *) (parmi); \
249 TYPE *pj = (TYPE *) (parmj); \
259 swapfunc_wb(char *a
, char *b
, long n
, long swaptype
)
262 swapcode_wb(void *, a
, b
, n
)
264 swapcode(char, a
, b
, n
)
267 #define swap_wb(a, b) \
268 if (swaptype == 0) { \
269 const void *t = *(const void **)(a); \
270 ASSIGN(*(void **)(b), (const void **)a); \
271 ASSIGN(t, (const void **)(b)); \
273 printf("bad things happening\n");
274 //swapfunc_wb(a, b, es, swaptype)
276 #define vecswap_wb(a, b, n) if ((n) > 0) swapfunc_wb(a, b, n, swaptype)
279 bsd_qsort_wb(void *a
, size_t n
, size_t es
, cmp_t
*cmp
, void *thunk
)
281 char *pa
, *pb
, *pc
, *pd
, *pl
, *pm
, *pn
;
282 long d
, r
, swaptype
, swap_cnt
;
284 loop
: SWAPINIT(a
, es
);
287 for (pm
= (char *)a
+ es
; pm
< (char *)a
+ n
* es
; pm
+= es
)
289 pl
> (char *)a
&& CMP(thunk
, pl
- es
, pl
) > 0;
291 swap_wb(pl
, pl
- es
);
294 pm
= (char *)a
+ (n
/ 2) * es
;
297 pn
= (char *)a
+ (n
- 1) * es
;
300 pl
= med3(pl
, pl
+ d
, pl
+ 2 * d
, cmp
, thunk
);
301 pm
= med3(pm
- d
, pm
, pm
+ d
, cmp
, thunk
);
302 pn
= med3(pn
- 2 * d
, pn
- d
, pn
, cmp
, thunk
);
304 pm
= med3(pl
, pm
, pn
, cmp
, thunk
);
307 pa
= pb
= (char *)a
+ es
;
309 pc
= pd
= (char *)a
+ (n
- 1) * es
;
311 while (pb
<= pc
&& (r
= CMP(thunk
, pb
, a
)) <= 0) {
319 while (pb
<= pc
&& (r
= CMP(thunk
, pc
, a
)) >= 0) {
334 if (swap_cnt
== 0) { /* Switch to insertion sort */
335 for (pm
= (char *)a
+ es
; pm
< (char *)a
+ n
* es
; pm
+= es
)
337 pl
> (char *)a
&& CMP(thunk
, pl
- es
, pl
) > 0;
339 swap_wb(pl
, pl
- es
);
343 pn
= (char *)a
+ n
* es
;
344 r
= min(pa
- (char *)a
, pb
- pa
);
345 vecswap_wb(a
, pb
- r
, r
);
346 r
= min(pd
- pc
, pn
- pd
- es
);
347 vecswap_wb(pb
, pn
- r
, r
);
348 if ((r
= pb
- pa
) > es
)
349 bsd_qsort_wb(a
, r
/ es
, es
, cmp
, thunk
);
350 if ((r
= pd
- pc
) > es
) {
351 /* Iterate rather than recurse to save stack space */
356 /* qsort(pn - r, r / es, es, cmp);*/
359 /* Comparator is passed the address of the values. */
360 void CFQSortArray(void *list
, CFIndex count
, CFIndex elementSize
, CFComparatorFunction comparator
, void *context
) {
361 if (CF_USING_COLLECTABLE_MEMORY
&& (auto_zone_get_layout_type(__CFCollectableZone
, list
) & AUTO_UNSCANNED
) == 0)
362 bsd_qsort_wb(list
, count
, elementSize
, comparator
, context
);
364 bsd_qsort(list
, count
, elementSize
, comparator
, context
);
375 /* stdlib.subproj/mergesort.c ========================================== */
378 * Copyright (c) 1992, 1993
379 * The Regents of the University of California. All rights reserved.
381 * This code is derived from software contributed to Berkeley by
384 * Redistribution and use in source and binary forms, with or without
385 * modification, are permitted provided that the following conditions
387 * 1. Redistributions of source code must retain the above copyright
388 * notice, this list of conditions and the following disclaimer.
389 * 2. Redistributions in binary form must reproduce the above copyright
390 * notice, this list of conditions and the following disclaimer in the
391 * documentation and/or other materials provided with the distribution.
392 * 3. All advertising materials mentioning features or use of this software
393 * must display the following acknowledgement:
394 * This product includes software developed by the University of
395 * California, Berkeley and its contributors.
396 * 4. Neither the name of the University nor the names of its contributors
397 * may be used to endorse or promote products derived from this software
398 * without specific prior written permission.
400 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
401 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
402 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
403 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
404 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
405 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
406 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
407 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
408 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
409 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
414 #if defined(LIBC_SCCS) && !defined(lint)
415 static char sccsid
[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94";
416 #endif /* LIBC_SCCS and not lint */
417 #include <sys/cdefs.h>
418 __FBSDID("$FreeBSD: src/lib/libc/stdlib/merge.c,v 1.6 2002/03/21 22:48:42 obrien Exp $");
422 * Hybrid exponential search/linear search merge sort with hybrid
423 * natural/pairwise first pass. Requires about .3% more comparisons
424 * for random data than LSMS with pairwise first pass alone.
425 * It works for objects as small as two bytes.
429 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
431 /* #define NATURAL to get hybrid natural merge.
432 * (The default is pairwise merging.)
435 #include <sys/types.h>
441 static void setup(u_char
*, u_char
*, size_t, size_t, CFComparisonResult (*)(), void *);
442 static void insertionsort(u_char
*, size_t, size_t, CFComparisonResult (*)(), void *);
444 #define ISIZE sizeof(long)
445 #define PSIZE sizeof(u_char *)
446 #define ICOPY_LIST(src, dst, last) \
448 *(long*)dst = *(long*)src, src += ISIZE, dst += ISIZE; \
450 #define ICOPY_ELT(src, dst, i) \
452 *(long*) dst = *(long*) src, src += ISIZE, dst += ISIZE; \
455 #define CCOPY_LIST(src, dst, last) \
459 #define CCOPY_ELT(src, dst, i) \
465 * Find the next possible pointer head. (Trickery for forcing an array
466 * to do double duty as a linked list when objects do not align with word
469 /* Assumption: PSIZE is a power of 2. */
470 #define EVAL(p) (u_char **) \
472 (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
475 * Arguments are as for qsort.
478 bsd_mergesort(void *base
, size_t nmemb
, size_t size
, CFComparisonResult (*cmp
)(const void *, const void *, void *), void *context
)
482 u_char
*f1
, *f2
, *t
, *b
, *tp2
, *q
, *l1
, *l2
;
483 u_char
*list2
, *list1
, *p2
, *p
, *last
, **p1
;
485 if (size
< PSIZE
/ 2) { /* Pointers must fit into 2 * size. */
495 * Stupid subtraction for the Cray.
498 if (!(size
% ISIZE
) && !(((char *)base
- (char *)0) % ISIZE
))
501 if ((list2
= malloc(nmemb
* size
+ PSIZE
)) == NULL
)
505 setup(list1
, list2
, nmemb
, size
, cmp
, context
);
506 last
= list2
+ nmemb
* size
;
508 while (*EVAL(list2
) != last
) {
511 for (tp2
= p2
= list2
; p2
!= last
; p1
= EVAL(l2
)) {
514 f2
= l1
= list1
+ (p2
- list2
);
517 l2
= list1
+ (p2
- list2
);
518 while (f1
< l1
&& f2
< l2
) {
519 if ((*cmp
)(f1
, f2
, context
) <= 0) {
528 if (!big
) { /* here i = 0 */
529 while ((b
+= size
) < t
&& cmp(q
, b
, context
) >sense
)
535 EXPONENTIAL
: for (i
= size
; ; i
<<= 1)
536 if ((p
= (b
+ i
)) >= t
) {
537 if ((p
= t
- size
) > b
&&
538 (*cmp
)(q
, p
, context
) <= sense
)
543 } else if ((*cmp
)(q
, p
, context
) <= sense
) {
551 i
= (((t
- b
) / size
) >> 1) * size
;
552 if ((*cmp
)(q
, p
= b
+ i
, context
) <= sense
)
558 FASTCASE
: while (i
> size
)
560 p
= b
+ (i
>>= 1), context
) <= sense
)
569 ICOPY_LIST(f2
, tp2
, b
);
570 ICOPY_ELT(f1
, tp2
, i
);
572 CCOPY_LIST(f2
, tp2
, b
);
573 CCOPY_ELT(f1
, tp2
, i
);
577 ICOPY_LIST(f1
, tp2
, b
);
578 ICOPY_ELT(f2
, tp2
, i
);
580 CCOPY_LIST(f1
, tp2
, b
);
581 CCOPY_ELT(f2
, tp2
, i
);
587 ICOPY_LIST(f2
, tp2
, l2
);
589 CCOPY_LIST(f2
, tp2
, l2
);
590 } else if (f1
< l1
) {
592 ICOPY_LIST(f1
, tp2
, l1
);
594 CCOPY_LIST(f1
, tp2
, l1
);
598 tp2
= list1
; /* swap list1, list2 */
601 last
= list2
+ nmemb
*size
;
604 memmove(list2
, list1
, nmemb
*size
);
611 #define swap(a, b) { \
615 tmp = *a; *a++ = *s; *s++ = tmp; \
619 #define reverse(bot, top) { \
624 tmp = *bot; *bot++ = *s; *s++ = tmp; \
631 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
632 * increasing order, list2 in a corresponding linked list. Checks for runs
633 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
634 * is defined. Otherwise simple pairwise merging is used.)
637 setup(u_char
*list1
, u_char
*list2
, size_t n
, size_t size
, CFComparisonResult (*cmp
)(const void *, const void *, void *), void *context
)
639 long i
, length
, size2
, tmp
, sense
;
640 u_char
*f1
, *f2
, *s
, *l2
, *last
, *p2
;
644 insertionsort(list1
, n
, size
, cmp
, context
);
645 *EVAL(list2
) = (u_char
*) list2
+ n
*size
;
649 * Avoid running pointers out of bounds; limit n to evens
653 insertionsort(list1
+ (n
- i
) * size
, i
, size
, cmp
, context
);
654 last
= list1
+ size
* (n
- i
);
655 *EVAL(list2
+ (last
- list1
)) = list2
+ n
* size
;
660 sense
= (cmp(f1
, f1
+ size
, context
) > 0);
661 for (; f1
< last
; sense
= !sense
) {
663 /* Find pairs with same sense. */
664 for (f2
= f1
+ size2
; f2
< last
; f2
+= size2
) {
665 if ((cmp(f2
, f2
+ size
, context
) > 0) != sense
)
669 if (length
< THRESHOLD
) { /* Pairwise merge */
671 p2
= *EVAL(p2
) = f1
+ size2
- list1
+ list2
;
673 swap (f1
, f1
+ size
);
674 } while ((f1
+= size2
) < f2
);
675 } else { /* Natural merge */
677 for (f2
= f1
+ size2
; f2
< l2
; f2
+= size2
) {
678 if ((cmp(f2
-size
, f2
, context
) > 0) != sense
) {
679 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
681 reverse(f1
, f2
-size
);
686 reverse (f1
, f2
-size
);
688 if (f2
< last
|| cmp(f2
- size
, f2
, context
) > 0)
689 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
691 p2
= *EVAL(p2
) = list2
+ n
*size
;
694 #else /* pairwise merge only. */
695 for (f1
= list1
, p2
= list2
; f1
< last
; f1
+= size2
) {
696 p2
= *EVAL(p2
) = p2
+ size2
;
697 if (cmp (f1
, f1
+ size
, context
) > 0)
704 * This is to avoid out-of-bounds addresses in sorting the
708 insertionsort(u_char
*a
, size_t n
, size_t size
, CFComparisonResult (*cmp
)(const void *, const void *, void *), void *context
)
710 u_char
*ai
, *s
, *t
, *u
, tmp
;
713 for (ai
= a
+size
; --n
>= 1; ai
+= size
)
714 for (t
= ai
; t
> a
; t
-= size
) {
716 if (cmp(u
, t
, context
) <= 0)
722 /* Another version, also not so completely different, in order to handle necessary write-barriers in the face of GC */
725 #define ASSIGN __CFObjCStrongAssign
726 //#define ASSIGN log_assign
728 static void setup_wb(u_char
*, u_char
*, size_t, size_t, CFComparisonResult (*)(), void *);
729 static void insertionsort_wb(u_char
*, size_t, size_t, CFComparisonResult (*)(), void *);
733 #define ICOPY_ELT(src, dst, i) \
735 ASSIGN(*(const void**)src, (const void *)dst), src += ISIZE, dst += ISIZE; \
740 #define ICOPY_LIST(src, dst, last) \
742 ASSIGN(*(const void **)src, (const void *)dst), src += ISIZE, dst += ISIZE; \
747 * Arguments are as for qsort.
750 bsd_mergesort_wb(void *base
, size_t nmemb
, size_t size
, CFComparisonResult (*cmp
)(const void *, const void *, void *), void *context
)
754 u_char
*f1
, *f2
, *t
, *b
, *tp2
, *q
, *l1
, *l2
;
755 u_char
*list2
, *list1
, *p2
, *p
, *last
, **p1
;
757 if (size
< PSIZE
/ 2) { /* Pointers must fit into 2 * size. */
767 * Stupid subtraction for the Cray.
770 if (!(size
% ISIZE
) && !(((char *)base
- (char *)0) % ISIZE
))
774 return -1; // only set up for "integer" swaps, e.g. long integer
776 if ((list2
= CFAllocatorAllocate(NULL
, (nmemb
* size
+ PSIZE
), __kCFAllocatorGCScannedMemory
)) == NULL
)
780 setup_wb(list1
, list2
, nmemb
, size
, cmp
, context
);
781 last
= list2
+ nmemb
* size
;
783 while (*EVAL(list2
) != last
) {
786 for (tp2
= p2
= list2
; p2
!= last
; p1
= EVAL(l2
)) {
789 f2
= l1
= list1
+ (p2
- list2
);
792 l2
= list1
+ (p2
- list2
);
793 while (f1
< l1
&& f2
< l2
) {
794 if ((*cmp
)(f1
, f2
, context
) <= 0) {
803 if (!big
) { /* here i = 0 */
804 while ((b
+= size
) < t
&& cmp(q
, b
, context
) >sense
)
810 EXPONENTIAL
: for (i
= size
; ; i
<<= 1)
811 if ((p
= (b
+ i
)) >= t
) {
812 if ((p
= t
- size
) > b
&&
813 (*cmp
)(q
, p
, context
) <= sense
)
818 } else if ((*cmp
)(q
, p
, context
) <= sense
) {
826 i
= (((t
- b
) / size
) >> 1) * size
;
827 if ((*cmp
)(q
, p
= b
+ i
, context
) <= sense
)
833 FASTCASE
: while (i
> size
)
835 p
= b
+ (i
>>= 1), context
) <= sense
)
844 ICOPY_LIST(f2
, tp2
, b
);
845 ICOPY_ELT(f1
, tp2
, i
);
847 CCOPY_LIST(f2
, tp2
, b
);
848 CCOPY_ELT(f1
, tp2
, i
);
852 ICOPY_LIST(f1
, tp2
, b
);
853 ICOPY_ELT(f2
, tp2
, i
);
855 CCOPY_LIST(f1
, tp2
, b
);
856 CCOPY_ELT(f2
, tp2
, i
);
862 ICOPY_LIST(f2
, tp2
, l2
);
864 CCOPY_LIST(f2
, tp2
, l2
);
865 } else if (f1
< l1
) {
867 ICOPY_LIST(f1
, tp2
, l1
);
869 CCOPY_LIST(f1
, tp2
, l1
);
873 tp2
= list1
; /* swap list1, list2 */
876 last
= list2
+ nmemb
*size
;
879 CF_WRITE_BARRIER_MEMMOVE(list2
, list1
, nmemb
*size
);
887 #define swap_wb(a, b) { \
888 const void *object = *(void **)a; \
889 ASSIGN(*(const void **)b, (const void *)a); \
890 ASSIGN(object, (const void *)b); \
893 #define reverse_wb(bot, top) { \
903 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
904 * increasing order, list2 in a corresponding linked list. Checks for runs
905 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
906 * is defined. Otherwise simple pairwise merging is used.)
909 setup_wb(u_char
*list1
, u_char
*list2
, size_t n
, size_t size
, CFComparisonResult (*cmp
)(const void *, const void *, void *), void *context
)
911 long i
, length
, size2
, tmp
, sense
;
912 u_char
*f1
, *f2
, *s
, *l2
, *last
, *p2
;
916 insertionsort_wb(list1
, n
, size
, cmp
, context
);
917 *EVAL(list2
) = (u_char
*) list2
+ n
*size
;
921 * Avoid running pointers out of bounds; limit n to evens
925 insertionsort_wb(list1
+ (n
- i
) * size
, i
, size
, cmp
, context
);
926 last
= list1
+ size
* (n
- i
);
927 *EVAL(list2
+ (last
- list1
)) = list2
+ n
* size
;
932 sense
= (cmp(f1
, f1
+ size
, context
) > 0);
933 for (; f1
< last
; sense
= !sense
) {
935 /* Find pairs with same sense. */
936 for (f2
= f1
+ size2
; f2
< last
; f2
+= size2
) {
937 if ((cmp(f2
, f2
+ size
, context
) > 0) != sense
)
941 if (length
< THRESHOLD
) { /* Pairwise merge */
943 p2
= *EVAL(p2
) = f1
+ size2
- list1
+ list2
;
945 swap (f1
, f1
+ size
);
946 } while ((f1
+= size2
) < f2
);
947 } else { /* Natural merge */
949 for (f2
= f1
+ size2
; f2
< l2
; f2
+= size2
) {
950 if ((cmp(f2
-size
, f2
, context
) > 0) != sense
) {
951 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
953 reverse_wb(f1
, f2
-size
);
959 reverse_wb (f1
, f2
-size
);
962 if (f2
< last
|| cmp(f2
- size
, f2
, context
) > 0) {
963 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
966 p2
= *EVAL(p2
) = list2
+ n
*size
;
970 #else /* pairwise merge only. */
972 for (f1
= list1
, p2
= list2
; f1
< last
; f1
+= size2
) {
973 p2
= *EVAL(p2
) = p2
+ size2
;
974 if (cmp (f1
, f1
+ size
, context
) > 0)
975 swap_wb(f1
, f1
+ size
);
981 * This is to avoid out-of-bounds addresses in sorting the
985 insertionsort_wb(u_char
*a
, size_t n
, size_t size
, CFComparisonResult (*cmp
)(const void *, const void *, void *), void *context
)
987 u_char
*ai
, *s
, *t
, *u
, tmp
;
990 for (ai
= a
+size
; --n
>= 1; ai
+= size
)
991 for (t
= ai
; t
> a
; t
-= size
) {
993 if (cmp(u
, t
, context
) <= 0)
999 void CFMergeSortArray(void *list
, CFIndex count
, CFIndex elementSize
, CFComparatorFunction comparator
, void *context
) {
1000 if (CF_USING_COLLECTABLE_MEMORY
&& (auto_zone_get_layout_type(__CFCollectableZone
, list
) & AUTO_UNSCANNED
) == 0)
1001 bsd_mergesort_wb(list
, count
, elementSize
, comparator
, context
);
1003 bsd_mergesort(list
, count
, elementSize
, comparator
, context
);
1018 /* ===================================================================== */