]>
git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfscommon/headers/RedBlackTree.h
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /******************************************************************************
31 * Copyright (C) 2008 Jason Evans <jasone@FreeBSD.org>.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice(s), this list of conditions and the following disclaimer
39 * unmodified other than the allowable addition of one or more
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice(s), this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
47 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
53 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
54 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
55 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
56 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 ******************************************************************************
60 * cpp macro implementation of left-leaning red-black trees.
64 * (Optional, see assert(3).)
72 * All operations are done non-recursively. Parent pointers are not used, and
73 * color bits are stored in the least significant bit of right-child pointers,
74 * thus making node linkage as compact as is possible for red-black trees.
76 * Some macros use a comparison function pointer, which is expected to have the
77 * following prototype:
79 * int (a_cmp *)(a_type *a_node, a_type *a_other);
83 * Interpretation of comparision function return values:
85 * -1 : a_node < a_other
86 * 0 : a_node == a_other
87 * 1 : a_node > a_other
89 * In all cases, the a_node or a_key macro argument is the first argument to the
90 * comparison function, which makes it possible to write comparison functions
91 * that treat the first argument specially.
93 ******************************************************************************/
100 /* Node structure. */
101 #define rb_node(a_type) \
104 a_type *rbn_right_red; \
107 #define rb_node(a_type) \
115 /* Root structure. */
116 #define rb_tree(a_type) \
122 /* Left accessors. */
123 #define rbp_left_get(a_type, a_field, a_node) \
124 ((a_node)->a_field.rbn_left)
125 #define rbp_left_set(a_type, a_field, a_node, a_left) do { \
126 (a_node)->a_field.rbn_left = a_left; \
130 /* Right accessors. */
131 #define rbp_right_get(a_type, a_field, a_node) \
132 ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
134 #define rbp_right_set(a_type, a_field, a_node, a_right) do { \
135 (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
136 | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
139 /* Color accessors. */
140 #define rbp_red_get(a_type, a_field, a_node) \
141 ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
143 #define rbp_color_set(a_type, a_field, a_node, a_red) do { \
144 (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
145 (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
146 | ((ssize_t)a_red)); \
148 #define rbp_red_set(a_type, a_field, a_node) do { \
149 (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
150 (a_node)->a_field.rbn_right_red) | ((size_t)1)); \
152 #define rbp_black_set(a_type, a_field, a_node) do { \
153 (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
154 (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
157 /* Right accessors. */
158 #define rbp_right_get(a_type, a_field, a_node) \
159 ((a_node)->a_field.rbn_right)
160 #define rbp_right_set(a_type, a_field, a_node, a_right) do { \
161 (a_node)->a_field.rbn_right = a_right; \
164 /* Color accessors. */
165 #define rbp_red_get(a_type, a_field, a_node) \
166 ((a_node)->a_field.rbn_red)
167 #define rbp_color_set(a_type, a_field, a_node, a_red) do { \
168 (a_node)->a_field.rbn_red = (a_red); \
170 #define rbp_red_set(a_type, a_field, a_node) do { \
171 (a_node)->a_field.rbn_red = true; \
173 #define rbp_black_set(a_type, a_field, a_node) do { \
174 (a_node)->a_field.rbn_red = false; \
178 /* Node initializer. */
179 #define rbp_node_new(a_type, a_field, a_tree, a_node) do { \
180 rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
181 rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
182 rbp_red_set(a_type, a_field, (a_node)); \
185 /* Tree initializer. */
186 #define rb_new(a_type, a_field, a_tree) do { \
187 (a_tree)->rbt_root = &(a_tree)->rbt_nil; \
188 rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil); \
189 rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil); \
192 /* Tree operations. */
193 #define rbp_black_height(a_type, a_field, a_tree, r_height) do { \
195 for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0; \
196 rbp_bh_t != &(a_tree)->rbt_nil; \
197 rbp_bh_t = rbp_left_get(a_type, a_field, rbp_bh_t)) { \
198 if (rbp_red_get(a_type, a_field, rbp_bh_t) == false) { \
204 #define rbp_first(a_type, a_field, a_tree, a_root, r_node) do { \
205 for ((r_node) = (a_root); \
206 rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
207 (r_node) = rbp_left_get(a_type, a_field, (r_node))) { \
211 #define rbp_last(a_type, a_field, a_tree, a_root, r_node) do { \
212 for ((r_node) = (a_root); \
213 rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
214 (r_node) = rbp_right_get(a_type, a_field, (r_node))) { \
218 #define rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
219 if (rbp_right_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) { \
220 rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type, \
221 a_field, (a_node)), (r_node)); \
223 a_type *rbp_n_t = (a_tree)->rbt_root; \
224 assert(rbp_n_t != &(a_tree)->rbt_nil); \
225 (r_node) = &(a_tree)->rbt_nil; \
227 int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t); \
228 if (rbp_n_cmp < 0) { \
229 (r_node) = rbp_n_t; \
230 rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t); \
231 } else if (rbp_n_cmp > 0) { \
232 rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t); \
236 assert(rbp_n_t != &(a_tree)->rbt_nil); \
241 #define rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
242 if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
243 rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type, \
244 a_field, (a_node)), (r_node)); \
246 a_type *rbp_p_t = (a_tree)->rbt_root; \
247 assert(rbp_p_t != &(a_tree)->rbt_nil); \
248 (r_node) = &(a_tree)->rbt_nil; \
250 int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t); \
251 if (rbp_p_cmp < 0) { \
252 rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t); \
253 } else if (rbp_p_cmp > 0) { \
254 (r_node) = rbp_p_t; \
255 rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t); \
259 assert(rbp_p_t != &(a_tree)->rbt_nil); \
264 #define rb_first(a_type, a_field, a_tree, r_node) do { \
265 rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node)); \
266 if ((r_node) == &(a_tree)->rbt_nil) { \
271 #define rb_last(a_type, a_field, a_tree, r_node) do { \
272 rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node); \
273 if ((r_node) == &(a_tree)->rbt_nil) { \
278 #define rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
279 rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
280 if ((r_node) == &(a_tree)->rbt_nil) { \
285 #define rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
286 rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
287 if ((r_node) == &(a_tree)->rbt_nil) { \
292 #define rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
294 (r_node) = (a_tree)->rbt_root; \
295 while ((r_node) != &(a_tree)->rbt_nil && (rbp_se_cmp = (a_cmp)((a_key), (r_node))) != 0) { \
296 if (rbp_se_cmp < 0) { \
297 (r_node) = rbp_left_get(a_type, a_field, (r_node)); \
299 (r_node) = rbp_right_get(a_type, a_field, (r_node)); \
302 if ((r_node) == &(a_tree)->rbt_nil) { \
308 * Find a match if it exists. Otherwise, find the next greater node, if one
311 #define rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
312 a_type *rbp_ns_t = (a_tree)->rbt_root; \
314 while (rbp_ns_t != &(a_tree)->rbt_nil) { \
315 int rbp_ns_cmp = (a_cmp)((a_key), rbp_ns_t); \
316 if (rbp_ns_cmp < 0) { \
317 (r_node) = rbp_ns_t; \
318 rbp_ns_t = rbp_left_get(a_type, a_field, rbp_ns_t); \
319 } else if (rbp_ns_cmp > 0) { \
320 rbp_ns_t = rbp_right_get(a_type, a_field, rbp_ns_t); \
322 (r_node) = rbp_ns_t; \
329 * Find a match if it exists. Otherwise, find the previous lesser node, if one
332 #define rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
333 a_type *rbp_ps_t = (a_tree)->rbt_root; \
335 while (rbp_ps_t != &(a_tree)->rbt_nil) { \
336 int rbp_ps_cmp = (a_cmp)((a_key), rbp_ps_t); \
337 if (rbp_ps_cmp < 0) { \
338 rbp_ps_t = rbp_left_get(a_type, a_field, rbp_ps_t); \
339 } else if (rbp_ps_cmp > 0) { \
340 (r_node) = rbp_ps_t; \
341 rbp_ps_t = rbp_right_get(a_type, a_field, rbp_ps_t); \
343 (r_node) = rbp_ps_t; \
349 #define rbp_rotate_left(a_type, a_field, a_node, r_node) do { \
350 (r_node) = rbp_right_get(a_type, a_field, (a_node)); \
351 rbp_right_set(a_type, a_field, (a_node), rbp_left_get(a_type, a_field, (r_node))); \
352 rbp_left_set(a_type, a_field, (r_node), (a_node)); \
355 #define rbp_rotate_right(a_type, a_field, a_node, r_node) do { \
356 (r_node) = rbp_left_get(a_type, a_field, (a_node)); \
357 rbp_left_set(a_type, a_field, (a_node), rbp_right_get(a_type, a_field, (r_node))); \
358 rbp_right_set(a_type, a_field, (r_node), (a_node)); \
361 #define rbp_lean_left(a_type, a_field, a_node, r_node) do { \
363 rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
364 rbp_ll_red = rbp_red_get(a_type, a_field, (a_node)); \
365 rbp_color_set(a_type, a_field, (r_node), rbp_ll_red); \
366 rbp_red_set(a_type, a_field, (a_node)); \
369 #define rbp_lean_right(a_type, a_field, a_node, r_node) do { \
371 rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
372 rbp_lr_red = rbp_red_get(a_type, a_field, (a_node)); \
373 rbp_color_set(a_type, a_field, (r_node), rbp_lr_red); \
374 rbp_red_set(a_type, a_field, (a_node)); \
377 #define rbp_move_red_left(a_type, a_field, a_node, r_node) do { \
378 a_type *rbp_mrl_t, *rbp_mrl_u; \
379 rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node)); \
380 rbp_red_set(a_type, a_field, rbp_mrl_t); \
381 rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
382 rbp_mrl_u = rbp_left_get(a_type, a_field, rbp_mrl_t); \
383 if (rbp_red_get(a_type, a_field, rbp_mrl_u)) { \
384 rbp_rotate_right(a_type, a_field, rbp_mrl_t, rbp_mrl_u); \
385 rbp_right_set(a_type, a_field, (a_node), rbp_mrl_u); \
386 rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
387 rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
388 if (rbp_red_get(a_type, a_field, rbp_mrl_t)) { \
389 rbp_black_set(a_type, a_field, rbp_mrl_t); \
390 rbp_red_set(a_type, a_field, (a_node)); \
391 rbp_rotate_left(a_type, a_field, (a_node), rbp_mrl_t); \
392 rbp_left_set(a_type, a_field, (r_node), rbp_mrl_t); \
394 rbp_black_set(a_type, a_field, (a_node)); \
397 rbp_red_set(a_type, a_field, (a_node)); \
398 rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
402 #define rbp_move_red_right(a_type, a_field, a_node, r_node) do { \
404 rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node)); \
405 if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
406 a_type *rbp_mrr_u, *rbp_mrr_v; \
407 rbp_mrr_u = rbp_right_get(a_type, a_field, rbp_mrr_t); \
408 rbp_mrr_v = rbp_left_get(a_type, a_field, rbp_mrr_u); \
409 if (rbp_red_get(a_type, a_field, rbp_mrr_v)) { \
410 rbp_color_set(a_type, a_field, rbp_mrr_u, rbp_red_get(a_type, a_field, (a_node))); \
411 rbp_black_set(a_type, a_field, rbp_mrr_v); \
412 rbp_rotate_left(a_type, a_field, rbp_mrr_t, rbp_mrr_u); \
413 rbp_left_set(a_type, a_field, (a_node), rbp_mrr_u); \
414 rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
415 rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
416 rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
418 rbp_color_set(a_type, a_field, rbp_mrr_t, rbp_red_get(a_type, a_field, (a_node))); \
419 rbp_red_set(a_type, a_field, rbp_mrr_u); \
420 rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
421 rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
422 rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
424 rbp_red_set(a_type, a_field, (a_node)); \
426 rbp_red_set(a_type, a_field, rbp_mrr_t); \
427 rbp_mrr_t = rbp_left_get(a_type, a_field, rbp_mrr_t); \
428 if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
429 rbp_black_set(a_type, a_field, rbp_mrr_t); \
430 rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
431 rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
432 rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
434 rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
439 #define rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do { \
441 a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u; \
443 rbp_i_g = &(a_tree)->rbt_nil; \
444 rbp_left_set(a_type, a_field, &rbp_i_s, (a_tree)->rbt_root); \
445 rbp_right_set(a_type, a_field, &rbp_i_s, &(a_tree)->rbt_nil); \
446 rbp_black_set(a_type, a_field, &rbp_i_s); \
447 rbp_i_p = &rbp_i_s; \
448 rbp_i_c = (a_tree)->rbt_root; \
449 /* Iteratively search down the tree for the insertion point, */\
450 /* splitting 4-nodes as they are encountered. At the end of each */\
451 /* iteration, rbp_i_g->rbp_i_p->rbp_i_c is a 3-level path down */\
452 /* the tree, assuming a sufficiently deep tree. */\
453 while (rbp_i_c != &(a_tree)->rbt_nil) { \
454 rbp_i_t = rbp_left_get(a_type, a_field, rbp_i_c); \
455 rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
456 if (rbp_red_get(a_type, a_field, rbp_i_t) \
457 && rbp_red_get(a_type, a_field, rbp_i_u)) { \
458 /* rbp_i_c is the top of a logical 4-node, so split it. */\
459 /* This iteration does not move down the tree, due to the */\
460 /* disruptiveness of node splitting. */\
463 rbp_rotate_right(a_type, a_field, rbp_i_c, rbp_i_t); \
464 /* Pass red links up one level. */\
465 rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
466 rbp_black_set(a_type, a_field, rbp_i_u); \
467 if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) { \
468 rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t); \
471 /* rbp_i_c was the right child of rbp_i_p, so rotate */\
472 /* left in order to maintain the left-leaning */\
474 assert(rbp_right_get(a_type, a_field, rbp_i_p) == rbp_i_c); \
475 rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t); \
476 rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u); \
477 if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
478 rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u); \
480 assert(rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p); \
481 rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u); \
484 rbp_i_cmp = (a_cmp)((a_node), rbp_i_p); \
485 if (rbp_i_cmp < 0) { \
486 rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p); \
488 assert(rbp_i_cmp > 0); \
489 rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p); \
496 rbp_i_cmp = (a_cmp)((a_node), rbp_i_c); \
497 if (rbp_i_cmp < 0) { \
498 rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c); \
500 assert(rbp_i_cmp > 0); \
501 rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c); \
504 /* rbp_i_p now refers to the node under which to insert. */\
505 rbp_node_new(a_type, a_field, a_tree, (a_node)); \
506 if (rbp_i_cmp > 0) { \
507 rbp_right_set(a_type, a_field, rbp_i_p, (a_node)); \
508 rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t); \
509 if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) { \
510 rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_t); \
511 } else if (rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
512 rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_t); \
515 rbp_left_set(a_type, a_field, rbp_i_p, (a_node)); \
517 /* Update the root and make sure that it is black. */\
518 (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_i_s); \
519 rbp_black_set(a_type, a_field, (a_tree)->rbt_root); \
522 #define rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do { \
524 a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u; \
526 rbp_left_set(a_type, a_field, &rbp_r_s, (a_tree)->rbt_root); \
527 rbp_right_set(a_type, a_field, &rbp_r_s, &(a_tree)->rbt_nil); \
528 rbp_black_set(a_type, a_field, &rbp_r_s); \
529 rbp_r_p = &rbp_r_s; \
530 rbp_r_c = (a_tree)->rbt_root; \
531 rbp_r_xp = &(a_tree)->rbt_nil; \
532 /* Iterate down the tree, but always transform 2-nodes to 3- or */\
533 /* 4-nodes in order to maintain the invariant that the current */\
534 /* node is not a 2-node. This allows simple deletion once a leaf */\
535 /* is reached. Handle the root specially though, since there may */\
536 /* be no way to convert it from a 2-node to a 3-node. */\
537 rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
538 if (rbp_r_cmp < 0) { \
539 rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
540 rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
541 if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
542 && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
543 /* Apply standard transform to prepare for left move. */\
544 rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t); \
545 rbp_black_set(a_type, a_field, rbp_r_t); \
546 rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
551 rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
554 if (rbp_r_cmp == 0) { \
555 assert((a_node) == rbp_r_c); \
556 if (rbp_right_get(a_type, a_field, rbp_r_c) == &(a_tree)->rbt_nil) { \
557 /* Delete root node (which is also a leaf node). */\
558 if (rbp_left_get(a_type, a_field, rbp_r_c) != &(a_tree)->rbt_nil) { \
559 rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t); \
560 rbp_right_set(a_type, a_field, rbp_r_t, &(a_tree)->rbt_nil); \
562 rbp_r_t = &(a_tree)->rbt_nil; \
564 rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
566 /* This is the node we want to delete, but we will */\
567 /* instead swap it with its successor and delete the */\
568 /* successor. Record enough information to do the */\
569 /* swap later. rbp_r_xp is the a_node's parent. */\
570 rbp_r_xp = rbp_r_p; \
571 rbp_r_cmp = 1; /* Note that deletion is incomplete. */\
574 if (rbp_r_cmp == 1) { \
575 if (rbp_red_get(a_type, a_field, rbp_left_get(a_type, \
576 a_field, rbp_right_get(a_type, a_field, rbp_r_c))) == false) { \
577 rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
578 if (rbp_red_get(a_type, a_field, rbp_r_t)) { \
579 /* Standard transform. */\
580 rbp_move_red_right(a_type, a_field, rbp_r_c, rbp_r_t); \
582 /* Root-specific transform. */\
583 rbp_red_set(a_type, a_field, rbp_r_c); \
584 rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
585 if (rbp_red_get(a_type, a_field, rbp_r_u)) { \
586 rbp_black_set(a_type, a_field, rbp_r_u); \
587 rbp_rotate_right(a_type, a_field, rbp_r_c, rbp_r_t); \
588 rbp_rotate_left(a_type, a_field, rbp_r_c, rbp_r_u); \
589 rbp_right_set(a_type, a_field, rbp_r_t, rbp_r_u); \
591 rbp_red_set(a_type, a_field, rbp_r_t); \
592 rbp_rotate_left(a_type, a_field, rbp_r_c, rbp_r_t); \
595 rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
600 rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
604 if (rbp_r_cmp != 0) { \
606 assert(rbp_r_p != &(a_tree)->rbt_nil); \
607 rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
608 if (rbp_r_cmp < 0) { \
609 rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
610 if (rbp_r_t == &(a_tree)->rbt_nil) { \
611 /* rbp_r_c now refers to the successor node to */\
612 /* relocate, and rbp_r_xp/a_node refer to the */\
613 /* context for the relocation. */\
614 if (rbp_left_get(a_type, a_field, rbp_r_xp) == (a_node)) { \
615 rbp_left_set(a_type, a_field, rbp_r_xp, rbp_r_c); \
617 assert(rbp_right_get(a_type, a_field, rbp_r_xp) == (a_node)); \
618 rbp_right_set(a_type, a_field, rbp_r_xp, rbp_r_c); \
620 rbp_left_set(a_type, a_field, rbp_r_c, rbp_left_get(a_type, a_field, (a_node))); \
621 rbp_right_set(a_type, a_field, rbp_r_c, rbp_right_get(a_type, a_field, (a_node))); \
622 rbp_color_set(a_type, a_field, rbp_r_c, rbp_red_get(a_type, a_field, (a_node))); \
623 if (rbp_left_get(a_type, a_field, rbp_r_p) == rbp_r_c) { \
624 rbp_left_set(a_type, a_field, rbp_r_p, &(a_tree)->rbt_nil); \
626 assert(rbp_right_get(a_type, a_field, rbp_r_p) == rbp_r_c); \
627 rbp_right_set(a_type, a_field, rbp_r_p, &(a_tree)->rbt_nil); \
631 rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
632 if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
633 && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
634 rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t); \
635 if (rbp_left_get(a_type, a_field, rbp_r_p) == rbp_r_c) { \
636 rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
638 rbp_right_set(a_type, a_field, rbp_r_p, rbp_r_t); \
643 rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
646 /* Check whether to delete this node (it has to be */\
647 /* the correct node and a leaf node). */\
648 if (rbp_r_cmp == 0) { \
649 assert((a_node) == rbp_r_c); \
650 if (rbp_right_get(a_type, a_field, rbp_r_c) == &(a_tree)->rbt_nil) { \
651 /* Delete leaf node. */\
652 if (rbp_left_get(a_type, a_field, rbp_r_c) != &(a_tree)->rbt_nil) { \
653 rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t); \
654 rbp_right_set(a_type, a_field, rbp_r_t, &(a_tree)->rbt_nil); \
656 rbp_r_t = &(a_tree)->rbt_nil; \
658 if (rbp_left_get(a_type, a_field, rbp_r_p) == rbp_r_c) { \
659 rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
661 rbp_right_set(a_type, a_field, rbp_r_p, rbp_r_t); \
665 /* This is the node we want to delete, but we */\
666 /* will instead swap it with its successor */\
667 /* and delete the successor. Record enough */\
668 /* information to do the swap later. */\
669 /* rbp_r_xp is a_node's parent. */\
670 rbp_r_xp = rbp_r_p; \
673 rbp_r_t = rbp_right_get(a_type, a_field, rbp_r_c); \
674 rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
675 if (rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
676 rbp_move_red_right(a_type, a_field, rbp_r_c, \
678 if (rbp_left_get(a_type, a_field, rbp_r_p) == rbp_r_c) { \
679 rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
681 rbp_right_set(a_type, a_field, rbp_r_p, rbp_r_t); \
686 rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
692 (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s); \
696 * The rb_wrap() macro provides a convenient way to wrap functions around the
697 * cpp macros. The main benefits of wrapping are that 1) repeated macro
698 * expansion can cause code bloat, especially for rb_{insert,remove)(), and
699 * 2) type, linkage, comparison functions, etc. need not be specified at every
703 #define rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp) \
705 a_prefix##new(a_tree_type *tree) { \
706 rb_new(a_type, a_field, tree); \
709 a_prefix##first(a_tree_type *tree) { \
711 rb_first(a_type, a_field, tree, ret); \
715 a_prefix##last(a_tree_type *tree) { \
717 rb_last(a_type, a_field, tree, ret); \
721 a_prefix##next(a_tree_type *tree, a_type *node) { \
723 rb_next(a_type, a_field, a_cmp, tree, node, ret); \
727 a_prefix##prev(a_tree_type *tree, a_type *node) { \
729 rb_prev(a_type, a_field, a_cmp, tree, node, ret); \
733 a_prefix##search(a_tree_type *tree, a_type *key) { \
735 rb_search(a_type, a_field, a_cmp, tree, key, ret); \
739 a_prefix##nsearch(a_tree_type *tree, a_type *key) { \
741 rb_nsearch(a_type, a_field, a_cmp, tree, key, ret); \
745 a_prefix##psearch(a_tree_type *tree, a_type *key) { \
747 rb_psearch(a_type, a_field, a_cmp, tree, key, ret); \
751 a_prefix##insert(a_tree_type *tree, a_type *node) { \
752 rb_insert(a_type, a_field, a_cmp, tree, node); \
755 a_prefix##remove(a_tree_type *tree, a_type *node) { \
756 rb_remove(a_type, a_field, a_cmp, tree, node); \
760 * The iterators simulate recursion via an array of pointers that store the
761 * current path. This is critical to performance, since a series of calls to
762 * rb_{next,prev}() would require time proportional to (n lg n), whereas this
763 * implementation only requires time proportional to (n).
765 * Since the iterators cache a path down the tree, any tree modification may
766 * cause the cached path to become invalid. In order to continue iteration,
767 * use something like the following sequence:
770 * a_type *node, *tnode;
772 * rb_foreach_begin(a_type, a_field, a_tree, node) {
774 * rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
775 * rb_remove(a_type, a_field, a_cmp, a_tree, node);
776 * rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
778 * } rb_foreach_end(a_type, a_field, a_tree, node)
781 * Note that this idiom is not advised if every iteration modifies the tree,
782 * since in that case there is no algorithmic complexity improvement over a
783 * series of rb_{next,prev}() calls, thus making the setup overhead wasted
787 #define rb_foreach_begin(a_type, a_field, a_tree, a_var) { /* brace A */ \
788 /* Compute the maximum possible tree depth (3X the black height). */\
789 unsigned rbp_f_height; \
790 rbp_black_height(a_type, a_field, a_tree, rbp_f_height); \
793 /* Initialize the path to contain the left spine. */\
794 a_type *rbp_f_path[rbp_f_height]; \
795 a_type *rbp_f_node; \
796 bool rbp_f_synced = false; \
797 unsigned rbp_f_depth = 0; \
798 if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
799 rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
801 while ((rbp_f_node = rbp_left_get(a_type, a_field, \
802 rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
803 rbp_f_path[rbp_f_depth] = rbp_f_node; \
807 /* While the path is non-empty, iterate. */\
808 while (rbp_f_depth > 0) { /* brace C */ \
809 (a_var) = rbp_f_path[rbp_f_depth-1];
812 * Note that rb_foreach_begin omits closing }'s because
813 * it expects that it will be succeeded by a call to
814 * rb_foreach_end which will have the closing }
817 /* Only use if modifying the tree during iteration. */
818 #define rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node) \
819 /* Re-initialize the path to contain the path to a_node. */\
821 if (a_node != NULL) { \
822 if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
823 rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
825 rbp_f_node = rbp_f_path[0]; \
827 int rbp_f_cmp = (a_cmp)((a_node), \
828 rbp_f_path[rbp_f_depth-1]); \
829 if (rbp_f_cmp < 0) { \
830 rbp_f_node = rbp_left_get(a_type, a_field, \
831 rbp_f_path[rbp_f_depth-1]); \
832 } else if (rbp_f_cmp > 0) { \
833 rbp_f_node = rbp_right_get(a_type, a_field, \
834 rbp_f_path[rbp_f_depth-1]); \
838 assert(rbp_f_node != &(a_tree)->rbt_nil); \
839 rbp_f_path[rbp_f_depth] = rbp_f_node; \
846 #define rb_foreach_end(a_type, a_field, a_tree, a_var) \
847 if (rbp_f_synced) { \
848 rbp_f_synced = false; \
851 /* Find the successor. */\
852 if ((rbp_f_node = rbp_right_get(a_type, a_field, \
853 rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
854 /* The successor is the left-most node in the right */\
856 rbp_f_path[rbp_f_depth] = rbp_f_node; \
858 while ((rbp_f_node = rbp_left_get(a_type, a_field, \
859 rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
860 rbp_f_path[rbp_f_depth] = rbp_f_node; \
864 /* The successor is above the current node. Unwind */\
865 /* until a left-leaning edge is removed from the */\
866 /* path, or the path is empty. */\
867 for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) { \
868 if (rbp_left_get(a_type, a_field, rbp_f_path[rbp_f_depth-1]) \
869 == rbp_f_path[rbp_f_depth]) { \
874 } /* close brace C */ \
875 } /* close brace B */ \
876 } /* close brace A */
880 #define rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) { /* brace A */ \
881 /* Compute the maximum possible tree depth (3X the black height). */\
882 unsigned rbp_fr_height; \
883 rbp_black_height(a_type, a_field, a_tree, rbp_fr_height); \
884 rbp_fr_height *= 3; \
886 /* Initialize the path to contain the right spine. */\
887 a_type *rbp_fr_path[rbp_fr_height]; \
888 a_type *rbp_fr_node; \
889 bool rbp_fr_synced = false; \
890 unsigned rbp_fr_depth = 0; \
891 if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
892 rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
894 while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
895 rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
896 rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
900 /* While the path is non-empty, iterate. */\
901 while (rbp_fr_depth > 0) { /* brace C */ \
902 (a_var) = rbp_fr_path[rbp_fr_depth-1];
905 /* Only use if modifying the tree during iteration. */
906 #define rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node) \
907 /* Re-initialize the path to contain the path to a_node. */\
909 if (a_node != NULL) { \
910 if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
911 rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
913 rbp_fr_node = rbp_fr_path[0]; \
915 int rbp_fr_cmp = (a_cmp)((a_node), rbp_fr_path[rbp_fr_depth-1]); \
916 if (rbp_fr_cmp < 0) { \
917 rbp_fr_node = rbp_left_get(a_type, a_field, \
918 rbp_fr_path[rbp_fr_depth-1]); \
919 } else if (rbp_fr_cmp > 0) { \
920 rbp_fr_node = rbp_right_get(a_type, a_field, rbp_fr_path[rbp_fr_depth-1]); \
924 assert(rbp_fr_node != &(a_tree)->rbt_nil); \
925 rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
930 rbp_fr_synced = true;
932 #define rb_foreach_reverse_end(a_type, a_field, a_tree, a_var) \
933 if (rbp_fr_synced) { \
934 rbp_fr_synced = false; \
937 if (rbp_fr_depth == 0) { \
938 /* rb_foreach_reverse_sync() was called with a NULL */\
942 /* Find the predecessor. */\
943 if ((rbp_fr_node = rbp_left_get(a_type, a_field, \
944 rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
945 /* The predecessor is the right-most node in the left */\
947 rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
949 while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
950 rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {\
951 rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
955 /* The predecessor is above the current node. Unwind */\
956 /* until a right-leaning edge is removed from the */\
957 /* path, or the path is empty. */\
958 for (rbp_fr_depth--; rbp_fr_depth > 0; rbp_fr_depth--) {\
959 if (rbp_right_get(a_type, a_field, rbp_fr_path[rbp_fr_depth-1]) \
960 == rbp_fr_path[rbp_fr_depth]) { \
965 } /* Close brace C */ \
966 } /* close brace B */ \