]>
git.saurik.com Git - apple/libc.git/blob - stdlib/merge-fbsd.c
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if defined(LIBC_SCCS) && !defined(lint)
34 static char sccsid
[] = "@(#)merge.c 8.2 (Berkeley) 2/14/94";
35 #endif /* LIBC_SCCS and not lint */
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: src/lib/libc/stdlib/merge.c,v 1.8 2007/01/09 00:28:10 imp Exp $");
40 * Hybrid exponential search/linear search merge sort with hybrid
41 * natural/pairwise first pass. Requires about .3% more comparisons
42 * for random data than LSMS with pairwise first pass alone.
43 * It works for objects as small as two bytes.
47 #define THRESHOLD 16 /* Best choice for natural merge cut-off. */
49 /* #define NATURAL to get hybrid natural merge.
50 * (The default is pairwise merging.)
53 #include <sys/types.h>
59 static void setup(u_char
*, u_char
*, size_t, size_t,
60 int (*)(const void *, const void *));
61 static void insertionsort(u_char
*, size_t, size_t,
62 int (*)(const void *, const void *));
64 #define ISIZE sizeof(int)
65 #define PSIZE sizeof(u_char *)
66 #define ICOPY_LIST(src, dst, last) \
68 *(int*)dst = *(int*)src, src += ISIZE, dst += ISIZE; \
70 #define ICOPY_ELT(src, dst, i) \
72 *(int*) dst = *(int*) src, src += ISIZE, dst += ISIZE; \
75 #define CCOPY_LIST(src, dst, last) \
79 #define CCOPY_ELT(src, dst, i) \
85 * Find the next possible pointer head. (Trickery for forcing an array
86 * to do double duty as a linked list when objects do not align with word
89 /* Assumption: PSIZE is a power of 2. */
90 #define EVAL(p) (u_char **) \
92 (((u_char *)p + PSIZE - 1 - (u_char *) 0) & ~(PSIZE - 1)))
95 * Arguments are as for qsort.
98 mergesort(base
, nmemb
, size
, cmp
)
102 int (*cmp
)(const void *, const void *);
107 u_char
*f1
, *f2
, *t
, *b
, *tp2
, *q
, *l1
, *l2
;
108 u_char
*list2
, *list1
, *p2
, *p
, *last
, **p1
;
110 if (size
< PSIZE
/ 2) { /* Pointers must fit into 2 * size. */
120 * Stupid subtraction for the Cray.
123 if (!(size
% ISIZE
) && !(((char *)base
- (char *)0) % ISIZE
))
126 if ((list2
= malloc(nmemb
* size
+ PSIZE
)) == NULL
)
130 setup(list1
, list2
, nmemb
, size
, cmp
);
131 last
= list2
+ nmemb
* size
;
133 while (*EVAL(list2
) != last
) {
136 for (tp2
= p2
= list2
; p2
!= last
; p1
= EVAL(l2
)) {
139 f2
= l1
= list1
+ (p2
- list2
);
142 l2
= list1
+ (p2
- list2
);
143 while (f1
< l1
&& f2
< l2
) {
144 if ((*cmp
)(f1
, f2
) <= 0) {
153 if (!big
) { /* here i = 0 */
154 while ((b
+= size
) < t
&& cmp(q
, b
) >sense
)
160 EXPONENTIAL
: for (i
= size
; ; i
<<= 1)
161 if ((p
= (b
+ i
)) >= t
) {
162 if ((p
= t
- size
) > b
&&
163 (*cmp
)(q
, p
) <= sense
)
168 } else if ((*cmp
)(q
, p
) <= sense
) {
176 i
= (((t
- b
) / size
) >> 1) * size
;
177 if ((*cmp
)(q
, p
= b
+ i
) <= sense
)
183 FASTCASE
: while (i
> size
)
185 p
= b
+ (i
>>= 1)) <= sense
)
194 ICOPY_LIST(f2
, tp2
, b
);
195 ICOPY_ELT(f1
, tp2
, i
);
197 CCOPY_LIST(f2
, tp2
, b
);
198 CCOPY_ELT(f1
, tp2
, i
);
202 ICOPY_LIST(f1
, tp2
, b
);
203 ICOPY_ELT(f2
, tp2
, i
);
205 CCOPY_LIST(f1
, tp2
, b
);
206 CCOPY_ELT(f2
, tp2
, i
);
212 ICOPY_LIST(f2
, tp2
, l2
);
214 CCOPY_LIST(f2
, tp2
, l2
);
215 } else if (f1
< l1
) {
217 ICOPY_LIST(f1
, tp2
, l1
);
219 CCOPY_LIST(f1
, tp2
, l1
);
223 tp2
= list1
; /* swap list1, list2 */
226 last
= list2
+ nmemb
*size
;
229 memmove(list2
, list1
, nmemb
*size
);
236 #define swap(a, b) { \
240 tmp = *a; *a++ = *s; *s++ = tmp; \
244 #define reverse(bot, top) { \
249 tmp = *bot; *bot++ = *s; *s++ = tmp; \
256 * Optional hybrid natural/pairwise first pass. Eats up list1 in runs of
257 * increasing order, list2 in a corresponding linked list. Checks for runs
258 * when THRESHOLD/2 pairs compare with same sense. (Only used when NATURAL
259 * is defined. Otherwise simple pairwise merging is used.)
262 setup(list1
, list2
, n
, size
, cmp
)
264 int (*cmp
)(const void *, const void *);
265 u_char
*list1
, *list2
;
268 int length
, tmp
, sense
;
269 u_char
*f1
, *f2
, *s
, *l2
, *last
, *p2
;
273 insertionsort(list1
, n
, size
, cmp
);
274 *EVAL(list2
) = (u_char
*) list2
+ n
*size
;
278 * Avoid running pointers out of bounds; limit n to evens
282 insertionsort(list1
+ (n
- i
) * size
, i
, size
, cmp
);
283 last
= list1
+ size
* (n
- i
);
284 *EVAL(list2
+ (last
- list1
)) = list2
+ n
* size
;
289 sense
= (cmp(f1
, f1
+ size
) > 0);
290 for (; f1
< last
; sense
= !sense
) {
292 /* Find pairs with same sense. */
293 for (f2
= f1
+ size2
; f2
< last
; f2
+= size2
) {
294 if ((cmp(f2
, f2
+ size
) > 0) != sense
)
298 if (length
< THRESHOLD
) { /* Pairwise merge */
300 p2
= *EVAL(p2
) = f1
+ size2
- list1
+ list2
;
302 swap (f1
, f1
+ size
);
303 } while ((f1
+= size2
) < f2
);
304 } else { /* Natural merge */
306 for (f2
= f1
+ size2
; f2
< l2
; f2
+= size2
) {
307 if ((cmp(f2
-size
, f2
) > 0) != sense
) {
308 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
310 reverse(f1
, f2
-size
);
315 reverse (f1
, f2
-size
);
317 if (f2
< last
|| cmp(f2
- size
, f2
) > 0)
318 p2
= *EVAL(p2
) = f2
- list1
+ list2
;
320 p2
= *EVAL(p2
) = list2
+ n
*size
;
323 #else /* pairwise merge only. */
324 for (f1
= list1
, p2
= list2
; f1
< last
; f1
+= size2
) {
325 p2
= *EVAL(p2
) = p2
+ size2
;
326 if (cmp (f1
, f1
+ size
) > 0)
333 * This is to avoid out-of-bounds addresses in sorting the
337 insertionsort(a
, n
, size
, cmp
)
340 int (*cmp
)(const void *, const void *);
342 u_char
*ai
, *s
, *t
, *u
, tmp
;
345 for (ai
= a
+size
; --n
>= 1; ai
+= size
)
346 for (t
= ai
; t
> a
; t
-= size
) {