]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/radix.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / net / radix.c
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1988, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)radix.c 8.4 (Berkeley) 11/2/94
9bccf70c 61 * $FreeBSD: src/sys/net/radix.c,v 1.20.2.2 2001/03/06 00:56:50 obrien Exp $
1c79356b
A
62 */
63
64/*
65 * Routines to build and maintain radix trees for routing lookups.
66 */
67#ifndef _RADIX_H_
68#include <sys/param.h>
1c79356b
A
69#include <sys/systm.h>
70#include <sys/malloc.h>
0a7de745 71#define M_DONTWAIT M_NOWAIT
1c79356b 72#include <sys/domain.h>
1c79356b
A
73#include <sys/syslog.h>
74#include <net/radix.h>
91447636
A
75#include <sys/socket.h>
76#include <sys/socketvar.h>
77#include <kern/locks.h>
1c79356b
A
78#endif
79
0a7de745
A
80static int rn_walktree_from(struct radix_node_head *h, void *a,
81 void *m, walktree_f_t *f, void *w);
91447636 82static int rn_walktree(struct radix_node_head *, walktree_f_t *, void *);
1c79356b 83static struct radix_node
0a7de745
A
84*rn_insert(void *, struct radix_node_head *, int *,
85 struct radix_node[2]),
86*rn_newpair(void *, int, struct radix_node[2]),
87*rn_search(void *, struct radix_node *),
88*rn_search_m(void *, struct radix_node *, void *);
1c79356b 89
0a7de745 90static int max_keylen;
1c79356b
A
91static struct radix_mask *rn_mkfreelist;
92static struct radix_node_head *mask_rnhead;
93static char *addmask_key;
94static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1};
95static char *rn_zeros, *rn_ones;
96
91447636 97
1c79356b
A
98#define rn_masktop (mask_rnhead->rnh_treetop)
99#undef Bcmp
9bccf70c 100#define Bcmp(a, b, l) \
b0d623f7 101 (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (uint32_t)l))
1c79356b 102
0a7de745 103static int rn_lexobetter(void *m_arg, void *n_arg);
1c79356b 104static struct radix_mask *
0a7de745
A
105rn_new_radix_mask(struct radix_node *tt,
106 struct radix_mask *next);
c910b4d9
A
107static int rn_satisfies_leaf(char *trial, struct radix_node *leaf, int skip,
108 rn_matchf_t *f, void *w);
109
0a7de745 110#define RN_MATCHF(rn, f, arg) (f == NULL || (*f)((rn), arg))
1c79356b
A
111
112/*
113 * The data structure for the keys is a radix tree with one way
9bccf70c 114 * branching removed. The index rn_bit at an internal node n represents a bit
1c79356b 115 * position to be tested. The tree is arranged so that all descendants
9bccf70c
A
116 * of a node n have keys whose bits all agree up to position rn_bit - 1.
117 * (We say the index of n is rn_bit.)
1c79356b 118 *
9bccf70c 119 * There is at least one descendant which has a one bit at position rn_bit,
1c79356b
A
120 * and at least one with a zero there.
121 *
122 * A route is determined by a pair of key and mask. We require that the
123 * bit-wise logical and of the key and mask to be the key.
124 * We define the index of a route to associated with the mask to be
125 * the first bit number in the mask where 0 occurs (with bit number 0
126 * representing the highest order bit).
127 *
128 * We say a mask is normal if every bit is 0, past the index of the mask.
9bccf70c 129 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_bit,
1c79356b 130 * and m is a normal mask, then the route applies to every descendant of n.
9bccf70c 131 * If the index(m) < rn_bit, this implies the trailing last few bits of k
1c79356b
A
132 * before bit b are all 0, (and hence consequently true of every descendant
133 * of n), so the route applies to all descendants of the node as well.
134 *
135 * Similar logic shows that a non-normal mask m such that
136 * index(m) <= index(n) could potentially apply to many children of n.
137 * Thus, for each non-host route, we attach its mask to a list at an internal
138 * node as high in the tree as we can go.
139 *
140 * The present version of the code makes use of normal routes in short-
141 * circuiting an explict mask and compare operation when testing whether
142 * a key satisfies a normal route, and also in remembering the unique leaf
143 * that governs a subtree.
144 */
145
146static struct radix_node *
2d21ac55 147rn_search(void *v_arg, struct radix_node *head)
1c79356b 148{
2d21ac55
A
149 struct radix_node *x;
150 caddr_t v;
1c79356b 151
9bccf70c 152 for (x = head, v = v_arg; x->rn_bit >= 0;) {
0a7de745 153 if (x->rn_bmask & v[x->rn_offset]) {
9bccf70c 154 x = x->rn_right;
0a7de745 155 } else {
9bccf70c 156 x = x->rn_left;
0a7de745 157 }
1c79356b 158 }
0a7de745 159 return x;
1c79356b
A
160}
161
162static struct radix_node *
2d21ac55 163rn_search_m(void *v_arg, struct radix_node *head, void *m_arg)
1c79356b 164{
2d21ac55
A
165 struct radix_node *x;
166 caddr_t v = v_arg, m = m_arg;
1c79356b 167
9bccf70c
A
168 for (x = head; x->rn_bit >= 0;) {
169 if ((x->rn_bmask & m[x->rn_offset]) &&
0a7de745 170 (x->rn_bmask & v[x->rn_offset])) {
9bccf70c 171 x = x->rn_right;
0a7de745 172 } else {
9bccf70c 173 x = x->rn_left;
0a7de745 174 }
1c79356b
A
175 }
176 return x;
177}
178
179int
2d21ac55 180rn_refines(void *m_arg, void *n_arg)
1c79356b 181{
2d21ac55
A
182 caddr_t m = m_arg, n = n_arg;
183 caddr_t lim, lim2 = lim = n + *(u_char *)n;
1c79356b
A
184 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
185 int masks_are_equal = 1;
186
0a7de745 187 if (longer > 0) {
1c79356b 188 lim -= longer;
0a7de745 189 }
1c79356b 190 while (n < lim) {
0a7de745 191 if (*n & ~(*m)) {
1c79356b 192 return 0;
0a7de745
A
193 }
194 if (*n++ != *m++) {
1c79356b 195 masks_are_equal = 0;
0a7de745 196 }
1c79356b 197 }
0a7de745
A
198 while (n < lim2) {
199 if (*n++) {
1c79356b 200 return 0;
0a7de745
A
201 }
202 }
203 if (masks_are_equal && (longer < 0)) {
204 for (lim2 = m - longer; m < lim2;) {
205 if (*m++) {
1c79356b 206 return 1;
0a7de745
A
207 }
208 }
209 }
210 return !masks_are_equal;
1c79356b
A
211}
212
213struct radix_node *
2d21ac55 214rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head)
c910b4d9 215{
0a7de745 216 return rn_lookup_args(v_arg, m_arg, head, NULL, NULL);
c910b4d9
A
217}
218
219struct radix_node *
220rn_lookup_args(void *v_arg, void *m_arg, struct radix_node_head *head,
221 rn_matchf_t *f, void *w)
1c79356b 222{
2d21ac55
A
223 struct radix_node *x;
224 caddr_t netmask = NULL;
1c79356b
A
225
226 if (m_arg) {
9bccf70c 227 x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_offset);
0a7de745
A
228 if (x == 0) {
229 return NULL;
230 }
1c79356b
A
231 netmask = x->rn_key;
232 }
c910b4d9 233 x = rn_match_args(v_arg, head, f, w);
1c79356b 234 if (x && netmask) {
0a7de745 235 while (x && x->rn_mask != netmask) {
1c79356b 236 x = x->rn_dupedkey;
0a7de745 237 }
1c79356b
A
238 }
239 return x;
240}
241
c910b4d9
A
242/*
243 * Returns true if address 'trial' has no bits differing from the
244 * leaf's key when compared under the leaf's mask. In other words,
245 * returns true when 'trial' matches leaf. If a leaf-matching
246 * routine is passed in, it is also used to find a match on the
247 * conditions defined by the caller of rn_match.
248 */
1c79356b 249static int
c910b4d9
A
250rn_satisfies_leaf(char *trial, struct radix_node *leaf, int skip,
251 rn_matchf_t *f, void *w)
1c79356b 252{
2d21ac55 253 char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
1c79356b
A
254 char *cplim;
255 int length = min(*(u_char *)cp, *(u_char *)cp2);
256
0a7de745 257 if (cp3 == 0) {
1c79356b 258 cp3 = rn_ones;
0a7de745 259 } else {
1c79356b 260 length = min(length, *(u_char *)cp3);
0a7de745 261 }
1c79356b 262 cplim = cp + length; cp3 += skip; cp2 += skip;
0a7de745
A
263 for (cp += skip; cp < cplim; cp++, cp2++, cp3++) {
264 if ((*cp ^ *cp2) & *cp3) {
1c79356b 265 return 0;
0a7de745
A
266 }
267 }
c910b4d9 268
0a7de745 269 return RN_MATCHF(leaf, f, w);
1c79356b
A
270}
271
272struct radix_node *
2d21ac55 273rn_match(void *v_arg, struct radix_node_head *head)
c910b4d9 274{
0a7de745 275 return rn_match_args(v_arg, head, NULL, NULL);
c910b4d9
A
276}
277
278struct radix_node *
279rn_match_args(void *v_arg, struct radix_node_head *head,
280 rn_matchf_t *f, void *w)
1c79356b
A
281{
282 caddr_t v = v_arg;
2d21ac55
A
283 struct radix_node *t = head->rnh_treetop, *x;
284 caddr_t cp = v, cp2;
1c79356b
A
285 caddr_t cplim;
286 struct radix_node *saved_t, *top = t;
9bccf70c 287 int off = t->rn_offset, vlen = *(u_char *)cp, matched_off;
2d21ac55 288 int test, b, rn_bit;
1c79356b
A
289
290 /*
291 * Open code rn_search(v, top) to avoid overhead of extra
292 * subroutine call.
293 */
0a7de745
A
294 for (; t->rn_bit >= 0;) {
295 if (t->rn_bmask & cp[t->rn_offset]) {
9bccf70c 296 t = t->rn_right;
0a7de745 297 } else {
9bccf70c 298 t = t->rn_left;
0a7de745 299 }
1c79356b
A
300 }
301 /*
302 * See if we match exactly as a host destination
303 * or at least learn how many bits match, for normal mask finesse.
304 *
305 * It doesn't hurt us to limit how many bytes to check
306 * to the length of the mask, since if it matches we had a genuine
307 * match and the leaf we have is the most specific one anyway;
308 * if it didn't match with a shorter length it would fail
309 * with a long one. This wins big for class B&C netmasks which
310 * are probably the most common case...
311 */
0a7de745 312 if (t->rn_mask) {
1c79356b 313 vlen = *(u_char *)t->rn_mask;
0a7de745 314 }
1c79356b 315 cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
0a7de745
A
316 for (; cp < cplim; cp++, cp2++) {
317 if (*cp != *cp2) {
1c79356b 318 goto on1;
0a7de745
A
319 }
320 }
1c79356b
A
321 /*
322 * This extra grot is in case we are explicitly asked
323 * to look up the default. Ugh!
9bccf70c
A
324 *
325 * Never return the root node itself, it seems to cause a
326 * lot of confusion.
1c79356b 327 */
0a7de745 328 if (t->rn_flags & RNF_ROOT) {
1c79356b 329 t = t->rn_dupedkey;
0a7de745 330 }
c910b4d9 331 if (t == NULL || RN_MATCHF(t, f, w)) {
0a7de745 332 return t;
c910b4d9
A
333 } else {
334 /*
335 * Although we found an exact match on the key,
336 * f() is looking for some other criteria as well.
337 * Continue looking as if the exact match failed.
338 */
339 if (t->rn_parent->rn_flags & RNF_ROOT) {
340 /* Hit the top; have to give up */
0a7de745 341 return NULL;
c910b4d9
A
342 }
343 b = 0;
344 goto keeplooking;
345 }
1c79356b
A
346on1:
347 test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
0a7de745 348 for (b = 7; (test >>= 1) > 0;) {
1c79356b 349 b--;
0a7de745 350 }
c910b4d9 351keeplooking:
1c79356b
A
352 matched_off = cp - v;
353 b += matched_off << 3;
9bccf70c 354 rn_bit = -1 - b;
1c79356b
A
355 /*
356 * If there is a host route in a duped-key chain, it will be first.
357 */
0a7de745 358 if ((saved_t = t)->rn_mask == 0) {
1c79356b 359 t = t->rn_dupedkey;
0a7de745 360 }
c910b4d9 361 for (; t; t = t->rn_dupedkey) {
1c79356b
A
362 /*
363 * Even if we don't match exactly as a host,
364 * we may match if the leaf we wound up at is
365 * a route to a net.
366 */
367 if (t->rn_flags & RNF_NORMAL) {
0a7de745
A
368 if ((rn_bit <= t->rn_bit) && RN_MATCHF(t, f, w)) {
369 return t;
370 }
c910b4d9 371 } else if (rn_satisfies_leaf(v, t, matched_off, f, w)) {
0a7de745 372 return t;
c910b4d9
A
373 }
374 }
1c79356b
A
375 t = saved_t;
376 /* start searching up the tree */
377 do {
2d21ac55 378 struct radix_mask *m;
9bccf70c 379 t = t->rn_parent;
1c79356b 380 m = t->rn_mklist;
9bccf70c
A
381 /*
382 * If non-contiguous masks ever become important
383 * we can restore the masking and open coding of
384 * the search and satisfaction test and put the
385 * calculation of "off" back before the "do".
386 */
387 while (m) {
388 if (m->rm_flags & RNF_NORMAL) {
c910b4d9 389 if ((rn_bit <= m->rm_bit) &&
0a7de745
A
390 RN_MATCHF(m->rm_leaf, f, w)) {
391 return m->rm_leaf;
392 }
9bccf70c
A
393 } else {
394 off = min(t->rn_offset, matched_off);
395 x = rn_search_m(v, t, m->rm_mask);
0a7de745 396 while (x && x->rn_mask != m->rm_mask) {
9bccf70c 397 x = x->rn_dupedkey;
0a7de745
A
398 }
399 if (x && rn_satisfies_leaf(v, x, off, f, w)) {
400 return x;
401 }
9bccf70c
A
402 }
403 m = m->rm_mklist;
1c79356b
A
404 }
405 } while (t != top);
0a7de745 406 return NULL;
1c79356b
A
407}
408
409#ifdef RN_DEBUG
0a7de745
A
410int rn_nodenum;
411struct radix_node *rn_clist;
412int rn_saveinfo;
413int rn_debug = 1;
1c79356b
A
414#endif
415
416static struct radix_node *
2d21ac55 417rn_newpair(void *v, int b, struct radix_node nodes[2])
1c79356b 418{
2d21ac55 419 struct radix_node *tt = nodes, *t = tt + 1;
9bccf70c
A
420 t->rn_bit = b;
421 t->rn_bmask = 0x80 >> (b & 7);
422 t->rn_left = tt;
423 t->rn_offset = b >> 3;
424 tt->rn_bit = -1;
425 tt->rn_key = (caddr_t)v;
426 tt->rn_parent = t;
1c79356b 427 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
2d21ac55 428 tt->rn_mklist = t->rn_mklist = NULL;
1c79356b
A
429#ifdef RN_DEBUG
430 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
9bccf70c
A
431 tt->rn_twin = t;
432 tt->rn_ybro = rn_clist;
433 rn_clist = tt;
1c79356b
A
434#endif
435 return t;
436}
437
438static struct radix_node *
2d21ac55 439rn_insert(void *v_arg, struct radix_node_head *head, int *dupentry,
0a7de745 440 struct radix_node nodes[2])
1c79356b
A
441{
442 caddr_t v = v_arg;
443 struct radix_node *top = head->rnh_treetop;
9bccf70c 444 int head_off = top->rn_offset, vlen = (int)*((u_char *)v);
2d21ac55
A
445 struct radix_node *t = rn_search(v_arg, top);
446 caddr_t cp = v + head_off;
447 int b;
1c79356b 448 struct radix_node *tt;
0a7de745 449 /*
1c79356b
A
450 * Find first bit at which v and t->rn_key differ
451 */
0a7de745
A
452 {
453 caddr_t cp2 = t->rn_key + head_off;
454 int cmp_res;
455 caddr_t cplim = v + vlen;
1c79356b 456
0a7de745
A
457 while (cp < cplim) {
458 if (*cp2++ != *cp++) {
459 goto on1;
460 }
461 }
462 *dupentry = 1;
463 return t;
1c79356b 464on1:
0a7de745
A
465 *dupentry = 0;
466 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
467 for (b = (cp - v) << 3; cmp_res; b--) {
468 cmp_res >>= 1;
469 }
470 }
471 {
472 struct radix_node *p, *x = top;
473 cp = v;
474 do {
475 p = x;
476 if (cp[x->rn_offset] & x->rn_bmask) {
477 x = x->rn_right;
478 } else {
479 x = x->rn_left;
480 }
481 } while (b > (unsigned) x->rn_bit);
482 /* x->rn_bit < b && x->rn_bit >= 0 */
1c79356b 483#ifdef RN_DEBUG
0a7de745
A
484 if (rn_debug) {
485 log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
486 }
1c79356b 487#endif
0a7de745
A
488 t = rn_newpair(v_arg, b, nodes);
489 tt = t->rn_left;
490 if ((cp[p->rn_offset] & p->rn_bmask) == 0) {
491 p->rn_left = t;
492 } else {
493 p->rn_right = t;
494 }
495 x->rn_parent = t;
496 t->rn_parent = p; /* frees x, p as temp vars below */
497 if ((cp[t->rn_offset] & t->rn_bmask) == 0) {
498 t->rn_right = x;
499 } else {
500 t->rn_right = tt;
501 t->rn_left = x;
502 }
1c79356b 503#ifdef RN_DEBUG
0a7de745
A
504 if (rn_debug) {
505 log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
506 }
1c79356b 507#endif
0a7de745
A
508 }
509 return tt;
1c79356b
A
510}
511
512struct radix_node *
2d21ac55 513rn_addmask(void *n_arg, int search, int skip)
1c79356b
A
514{
515 caddr_t netmask = (caddr_t)n_arg;
2d21ac55
A
516 struct radix_node *x;
517 caddr_t cp, cplim;
518 int b = 0, mlen, j;
1c79356b
A
519 int maskduplicated, m0, isnormal;
520 struct radix_node *saved_x;
521 static int last_zeroed = 0;
522
0a7de745 523 if ((mlen = *(u_char *)netmask) > max_keylen) {
1c79356b 524 mlen = max_keylen;
0a7de745
A
525 }
526 if (skip == 0) {
1c79356b 527 skip = 1;
0a7de745
A
528 }
529 if (mlen <= skip) {
530 return mask_rnhead->rnh_nodes;
531 }
532 if (skip > 1) {
1c79356b 533 Bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
0a7de745
A
534 }
535 if ((m0 = mlen) > skip) {
1c79356b 536 Bcopy(netmask + skip, addmask_key + skip, mlen - skip);
0a7de745 537 }
1c79356b
A
538 /*
539 * Trim trailing zeroes.
540 */
0a7de745 541 for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) {
1c79356b 542 cp--;
0a7de745 543 }
1c79356b
A
544 mlen = cp - addmask_key;
545 if (mlen <= skip) {
0a7de745 546 if (m0 >= last_zeroed) {
1c79356b 547 last_zeroed = mlen;
0a7de745
A
548 }
549 return mask_rnhead->rnh_nodes;
1c79356b 550 }
0a7de745 551 if (m0 < last_zeroed) {
1c79356b 552 Bzero(addmask_key + m0, last_zeroed - m0);
0a7de745 553 }
1c79356b
A
554 *addmask_key = last_zeroed = mlen;
555 x = rn_search(addmask_key, rn_masktop);
0a7de745 556 if (Bcmp(addmask_key, x->rn_key, mlen) != 0) {
2d21ac55 557 x = NULL;
0a7de745
A
558 }
559 if (x || search) {
560 return x;
561 }
562 R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof(*x));
563 if ((saved_x = x) == 0) {
564 return NULL;
565 }
566 Bzero(x, max_keylen + 2 * sizeof(*x));
1c79356b
A
567 netmask = cp = (caddr_t)(x + 2);
568 Bcopy(addmask_key, cp, mlen);
569 x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
570 if (maskduplicated) {
571 log(LOG_ERR, "rn_addmask: mask impossibly already in tree");
91447636 572 R_Free(saved_x);
0a7de745 573 return x;
1c79356b 574 }
6601e61a 575 mask_rnhead->rnh_cnt++;
1c79356b
A
576 /*
577 * Calculate index of mask, and check for normalcy.
578 */
579 cplim = netmask + mlen; isnormal = 1;
0a7de745 580 for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) {
1c79356b 581 cp++;
0a7de745 582 }
1c79356b 583 if (cp != cplim) {
0a7de745 584 for (j = 0x80; (j & *cp) != 0; j >>= 1) {
1c79356b 585 b++;
0a7de745
A
586 }
587 if (*cp != normal_chars[b] || cp != (cplim - 1)) {
1c79356b 588 isnormal = 0;
0a7de745 589 }
1c79356b
A
590 }
591 b += (cp - netmask) << 3;
9bccf70c 592 x->rn_bit = -1 - b;
0a7de745 593 if (isnormal) {
1c79356b 594 x->rn_flags |= RNF_NORMAL;
0a7de745
A
595 }
596 return x;
1c79356b
A
597}
598
0a7de745
A
599static int
600/* XXX: arbitrary ordering for non-contiguous masks */
2d21ac55 601rn_lexobetter(void *m_arg, void *n_arg)
1c79356b 602{
2d21ac55 603 u_char *mp = m_arg, *np = n_arg, *lim;
1c79356b 604
0a7de745 605 if (*mp > *np) {
1c79356b 606 return 1; /* not really, but need to check longer one first */
0a7de745
A
607 }
608 if (*mp == *np) {
609 for (lim = mp + *mp; mp < lim;) {
610 if (*mp++ > *np++) {
1c79356b 611 return 1;
0a7de745
A
612 }
613 }
614 }
1c79356b
A
615 return 0;
616}
617
618static struct radix_mask *
2d21ac55 619rn_new_radix_mask(struct radix_node *tt, struct radix_mask *next)
1c79356b 620{
2d21ac55 621 struct radix_mask *m;
1c79356b
A
622
623 MKGet(m);
624 if (m == 0) {
625 log(LOG_ERR, "Mask for route not entered\n");
0a7de745 626 return NULL;
1c79356b
A
627 }
628 Bzero(m, sizeof *m);
9bccf70c 629 m->rm_bit = tt->rn_bit;
1c79356b 630 m->rm_flags = tt->rn_flags;
0a7de745 631 if (tt->rn_flags & RNF_NORMAL) {
1c79356b 632 m->rm_leaf = tt;
0a7de745 633 } else {
1c79356b 634 m->rm_mask = tt->rn_mask;
0a7de745 635 }
1c79356b
A
636 m->rm_mklist = next;
637 tt->rn_mklist = m;
638 return m;
639}
640
641struct radix_node *
2d21ac55 642rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
0a7de745 643 struct radix_node treenodes[2])
1c79356b
A
644{
645 caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
2d21ac55 646 struct radix_node *t, *x = NULL, *tt;
1c79356b
A
647 struct radix_node *saved_tt, *top = head->rnh_treetop;
648 short b = 0, b_leaf = 0;
649 int keyduplicated;
650 caddr_t mmask;
651 struct radix_mask *m, **mp;
652
653 /*
654 * In dealing with non-contiguous masks, there may be
655 * many different routes which have the same mask.
656 * We will find it useful to have a unique pointer to
657 * the mask to speed avoiding duplicate references at
658 * nodes and possibly save time in calculating indices.
659 */
0a7de745
A
660 if (netmask) {
661 if ((x = rn_addmask(netmask, 0, top->rn_offset)) == 0) {
662 return NULL;
663 }
9bccf70c
A
664 b_leaf = x->rn_bit;
665 b = -1 - x->rn_bit;
1c79356b
A
666 netmask = x->rn_key;
667 }
668 /*
669 * Deal with duplicated keys: attach node to previous instance
670 */
671 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
672 if (keyduplicated) {
673 for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
0a7de745
A
674 if (tt->rn_mask == netmask) {
675 return NULL;
676 }
1c79356b
A
677 if (netmask == 0 ||
678 (tt->rn_mask &&
0a7de745
A
679 ((b_leaf < tt->rn_bit) /* index(netmask) > node */
680 || rn_refines(netmask, tt->rn_mask)
681 || rn_lexobetter(netmask, tt->rn_mask)))) {
1c79356b 682 break;
0a7de745 683 }
1c79356b
A
684 }
685 /*
686 * If the mask is not duplicated, we wouldn't
687 * find it among possible duplicate key entries
688 * anyway, so the above test doesn't hurt.
689 *
690 * We sort the masks for a duplicated key the same way as
691 * in a masklist -- most specific to least specific.
692 * This may require the unfortunate nuisance of relocating
693 * the head of the list.
694 */
695 if (tt == saved_tt) {
0a7de745 696 struct radix_node *xx = x;
1c79356b
A
697 /* link in at head of list */
698 (tt = treenodes)->rn_dupedkey = t;
699 tt->rn_flags = t->rn_flags;
9bccf70c 700 tt->rn_parent = x = t->rn_parent;
0a7de745
A
701 t->rn_parent = tt; /* parent */
702 if (x->rn_left == t) {
9bccf70c 703 x->rn_left = tt;
0a7de745 704 } else {
9bccf70c 705 x->rn_right = tt;
0a7de745 706 }
1c79356b
A
707 saved_tt = tt; x = xx;
708 } else {
709 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
710 t->rn_dupedkey = tt;
0a7de745
A
711 tt->rn_parent = t; /* parent */
712 if (tt->rn_dupedkey) { /* parent */
9bccf70c 713 tt->rn_dupedkey->rn_parent = tt; /* parent */
0a7de745 714 }
1c79356b
A
715 }
716#ifdef RN_DEBUG
0a7de745 717 t = tt + 1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
1c79356b
A
718 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
719#endif
720 tt->rn_key = (caddr_t) v;
9bccf70c 721 tt->rn_bit = -1;
1c79356b
A
722 tt->rn_flags = RNF_ACTIVE;
723 }
6601e61a 724 head->rnh_cnt++;
1c79356b
A
725 /*
726 * Put mask in tree.
727 */
728 if (netmask) {
729 tt->rn_mask = netmask;
9bccf70c 730 tt->rn_bit = x->rn_bit;
1c79356b
A
731 tt->rn_flags |= x->rn_flags & RNF_NORMAL;
732 }
9bccf70c 733 t = saved_tt->rn_parent;
0a7de745 734 if (keyduplicated) {
1c79356b 735 goto on2;
0a7de745 736 }
9bccf70c 737 b_leaf = -1 - t->rn_bit;
0a7de745 738 if (t->rn_right == saved_tt) {
9bccf70c 739 x = t->rn_left;
0a7de745 740 } else {
9bccf70c 741 x = t->rn_right;
0a7de745 742 }
1c79356b 743 /* Promote general routes from below */
9bccf70c 744 if (x->rn_bit < 0) {
0a7de745
A
745 for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) {
746 if (x->rn_mask && (x->rn_bit >= b_leaf) && x->rn_mklist == 0) {
747 *mp = m = rn_new_radix_mask(x, NULL);
748 if (m) {
749 mp = &m->rm_mklist;
750 }
751 }
1c79356b
A
752 }
753 } else if (x->rn_mklist) {
754 /*
755 * Skip over masks whose index is > that of new node
756 */
0a7de745
A
757 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
758 if (m->rm_bit >= b_leaf) {
1c79356b 759 break;
0a7de745
A
760 }
761 }
2d21ac55 762 t->rn_mklist = m; *mp = NULL;
1c79356b
A
763 }
764on2:
765 /* Add new route to highest possible ancestor's list */
0a7de745 766 if ((netmask == 0) || (b > t->rn_bit)) {
1c79356b 767 return tt; /* can't lift at all */
0a7de745 768 }
9bccf70c 769 b_leaf = tt->rn_bit;
1c79356b
A
770 do {
771 x = t;
9bccf70c
A
772 t = t->rn_parent;
773 } while (b <= t->rn_bit && x != top);
1c79356b
A
774 /*
775 * Search through routes associated with node to
776 * insert new route according to index.
777 * Need same criteria as when sorting dupedkeys to avoid
778 * double loop on deletion.
779 */
780 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
0a7de745 781 if (m->rm_bit < b_leaf) {
1c79356b 782 continue;
0a7de745
A
783 }
784 if (m->rm_bit > b_leaf) {
1c79356b 785 break;
0a7de745 786 }
1c79356b
A
787 if (m->rm_flags & RNF_NORMAL) {
788 mmask = m->rm_leaf->rn_mask;
789 if (tt->rn_flags & RNF_NORMAL) {
0a7de745
A
790 log(LOG_ERR,
791 "Non-unique normal route, mask not entered");
1c79356b
A
792 return tt;
793 }
0a7de745 794 } else {
1c79356b 795 mmask = m->rm_mask;
0a7de745 796 }
1c79356b
A
797 if (mmask == netmask) {
798 m->rm_refs++;
799 tt->rn_mklist = m;
800 return tt;
801 }
9bccf70c 802 if (rn_refines(netmask, mmask)
0a7de745 803 || rn_lexobetter(netmask, mmask)) {
1c79356b 804 break;
0a7de745 805 }
1c79356b
A
806 }
807 *mp = rn_new_radix_mask(tt, *mp);
808 return tt;
809}
810
811struct radix_node *
2d21ac55 812rn_delete(void *v_arg, void *netmask_arg, struct radix_node_head *head)
1c79356b 813{
2d21ac55 814 struct radix_node *t, *p, *x, *tt;
1c79356b
A
815 struct radix_mask *m, *saved_m, **mp;
816 struct radix_node *dupedkey, *saved_tt, *top;
817 caddr_t v, netmask;
818 int b, head_off, vlen;
819
820 v = v_arg;
821 netmask = netmask_arg;
822 x = head->rnh_treetop;
823 tt = rn_search(v, x);
9bccf70c 824 head_off = x->rn_offset;
1c79356b
A
825 vlen = *(u_char *)v;
826 saved_tt = tt;
827 top = x;
828 if (tt == 0 ||
0a7de745
A
829 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) {
830 return NULL;
831 }
1c79356b
A
832 /*
833 * Delete our route from mask lists.
834 */
835 if (netmask) {
0a7de745
A
836 if ((x = rn_addmask(netmask, 1, head_off)) == 0) {
837 return NULL;
838 }
1c79356b 839 netmask = x->rn_key;
0a7de745
A
840 while (tt->rn_mask != netmask) {
841 if ((tt = tt->rn_dupedkey) == 0) {
842 return NULL;
843 }
844 }
1c79356b 845 }
0a7de745 846 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) {
1c79356b 847 goto on1;
0a7de745 848 }
1c79356b
A
849 if (tt->rn_flags & RNF_NORMAL) {
850 if (m->rm_leaf != tt || m->rm_refs > 0) {
851 log(LOG_ERR, "rn_delete: inconsistent annotation\n");
2d21ac55 852 return NULL; /* dangling ref could cause disaster */
1c79356b
A
853 }
854 } else {
855 if (m->rm_mask != tt->rn_mask) {
856 log(LOG_ERR, "rn_delete: inconsistent annotation\n");
857 goto on1;
858 }
0a7de745 859 if (--m->rm_refs >= 0) {
1c79356b 860 goto on1;
0a7de745 861 }
1c79356b 862 }
9bccf70c
A
863 b = -1 - tt->rn_bit;
864 t = saved_tt->rn_parent;
0a7de745 865 if (b > t->rn_bit) {
1c79356b 866 goto on1; /* Wasn't lifted at all */
0a7de745 867 }
1c79356b
A
868 do {
869 x = t;
9bccf70c
A
870 t = t->rn_parent;
871 } while (b <= t->rn_bit && x != top);
0a7de745 872 for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
1c79356b
A
873 if (m == saved_m) {
874 *mp = m->rm_mklist;
875 MKFree(m);
876 break;
877 }
0a7de745 878 }
1c79356b
A
879 if (m == 0) {
880 log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
0a7de745
A
881 if (tt->rn_flags & RNF_NORMAL) {
882 return NULL; /* Dangling ref to us */
883 }
1c79356b
A
884 }
885on1:
886 /*
887 * Eliminate us from tree
888 */
0a7de745
A
889 if (tt->rn_flags & RNF_ROOT) {
890 return NULL;
891 }
6601e61a 892 head->rnh_cnt--;
1c79356b
A
893#ifdef RN_DEBUG
894 /* Get us out of the creation list */
0a7de745
A
895 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {
896 }
897 if (t) {
898 t->rn_ybro = tt->rn_ybro;
899 }
1c79356b 900#endif
9bccf70c 901 t = tt->rn_parent;
1c79356b
A
902 dupedkey = saved_tt->rn_dupedkey;
903 if (dupedkey) {
904 /*
905 * at this point, tt is the deletion target and saved_tt
906 * is the head of the dupekey chain
907 */
908 if (tt == saved_tt) {
909 /* remove from head of chain */
9bccf70c 910 x = dupedkey; x->rn_parent = t;
0a7de745 911 if (t->rn_left == tt) {
9bccf70c 912 t->rn_left = x;
0a7de745 913 } else {
9bccf70c 914 t->rn_right = x;
0a7de745 915 }
1c79356b
A
916 } else {
917 /* find node in front of tt on the chain */
0a7de745 918 for (x = p = saved_tt; p && p->rn_dupedkey != tt;) {
1c79356b 919 p = p->rn_dupedkey;
0a7de745 920 }
1c79356b
A
921 if (p) {
922 p->rn_dupedkey = tt->rn_dupedkey;
0a7de745 923 if (tt->rn_dupedkey) { /* parent */
9bccf70c 924 tt->rn_dupedkey->rn_parent = p;
0a7de745
A
925 }
926 /* parent */
927 } else {
928 log(LOG_ERR, "rn_delete: couldn't find us\n");
929 }
1c79356b
A
930 }
931 t = tt + 1;
0a7de745 932 if (t->rn_flags & RNF_ACTIVE) {
1c79356b 933#ifndef RN_DEBUG
9bccf70c
A
934 *++x = *t;
935 p = t->rn_parent;
1c79356b 936#else
9bccf70c
A
937 b = t->rn_info;
938 *++x = *t;
939 t->rn_info = b;
940 p = t->rn_parent;
1c79356b 941#endif
0a7de745 942 if (p->rn_left == t) {
9bccf70c 943 p->rn_left = x;
0a7de745 944 } else {
9bccf70c 945 p->rn_right = x;
0a7de745 946 }
9bccf70c
A
947 x->rn_left->rn_parent = x;
948 x->rn_right->rn_parent = x;
1c79356b
A
949 }
950 goto out;
951 }
0a7de745 952 if (t->rn_left == tt) {
9bccf70c 953 x = t->rn_right;
0a7de745 954 } else {
9bccf70c 955 x = t->rn_left;
0a7de745 956 }
9bccf70c 957 p = t->rn_parent;
0a7de745 958 if (p->rn_right == t) {
9bccf70c 959 p->rn_right = x;
0a7de745 960 } else {
9bccf70c 961 p->rn_left = x;
0a7de745 962 }
9bccf70c 963 x->rn_parent = p;
1c79356b
A
964 /*
965 * Demote routes attached to us.
966 */
967 if (t->rn_mklist) {
9bccf70c 968 if (x->rn_bit >= 0) {
0a7de745 969 for (mp = &x->rn_mklist; (m = *mp);) {
1c79356b 970 mp = &m->rm_mklist;
0a7de745 971 }
1c79356b
A
972 *mp = t->rn_mklist;
973 } else {
974 /* If there are any key,mask pairs in a sibling
0a7de745
A
975 * duped-key chain, some subset will appear sorted
976 * in the same order attached to our mklist */
977 for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) {
1c79356b
A
978 if (m == x->rn_mklist) {
979 struct radix_mask *mm = m->rm_mklist;
2d21ac55 980 x->rn_mklist = NULL;
0a7de745 981 if (--(m->rm_refs) < 0) {
1c79356b 982 MKFree(m);
0a7de745 983 }
1c79356b
A
984 m = mm;
985 }
0a7de745
A
986 }
987 if (m) {
39236c6e
A
988 log(LOG_ERR, "rn_delete: Orphaned Mask "
989 "0x%llx at 0x%llx\n",
990 (uint64_t)VM_KERNEL_ADDRPERM(m),
991 (uint64_t)VM_KERNEL_ADDRPERM(x));
0a7de745 992 }
1c79356b
A
993 }
994 }
995 /*
996 * We may be holding an active internal node in the tree.
997 */
998 x = tt + 1;
999 if (t != x) {
1000#ifndef RN_DEBUG
1001 *t = *x;
1002#else
9bccf70c
A
1003 b = t->rn_info;
1004 *t = *x;
1005 t->rn_info = b;
1c79356b 1006#endif
9bccf70c
A
1007 t->rn_left->rn_parent = t;
1008 t->rn_right->rn_parent = t;
1009 p = x->rn_parent;
0a7de745 1010 if (p->rn_left == x) {
9bccf70c 1011 p->rn_left = t;
0a7de745 1012 } else {
9bccf70c 1013 p->rn_right = t;
0a7de745 1014 }
1c79356b
A
1015 }
1016out:
1017 tt->rn_flags &= ~RNF_ACTIVE;
1018 tt[1].rn_flags &= ~RNF_ACTIVE;
0a7de745 1019 return tt;
1c79356b
A
1020}
1021
1022/*
1023 * This is the same as rn_walktree() except for the parameters and the
1024 * exit.
1025 */
1026static int
2d21ac55
A
1027rn_walktree_from(struct radix_node_head *h, void *a, void *m, walktree_f_t *f,
1028 void *w)
1c79356b
A
1029{
1030 int error;
1031 struct radix_node *base, *next;
1032 u_char *xa = (u_char *)a;
1033 u_char *xm = (u_char *)m;
6601e61a
A
1034 struct radix_node *rn, *last;
1035 int stopping;
1c79356b 1036 int lastb;
6601e61a
A
1037 int rnh_cnt;
1038
1039 /*
1040 * This gets complicated because we may delete the node while
1041 * applying the function f to it; we cannot simply use the next
1042 * leaf as the successor node in advance, because that leaf may
1043 * be removed as well during deletion when it is a clone of the
1044 * current node. When that happens, we would end up referring
1045 * to an already-freed radix node as the successor node. To get
1046 * around this issue, if we detect that the radix tree has changed
1047 * in dimension (smaller than before), we simply restart the walk
1048 * from the top of tree.
1049 */
1050restart:
1051 last = NULL;
1052 stopping = 0;
1053 rnh_cnt = h->rnh_cnt;
1c79356b
A
1054
1055 /*
1056 * rn_search_m is sort-of-open-coded here.
1057 */
0a7de745 1058 for (rn = h->rnh_treetop; rn->rn_bit >= 0;) {
1c79356b 1059 last = rn;
0a7de745 1060 if (!(rn->rn_bmask & xm[rn->rn_offset])) {
1c79356b 1061 break;
0a7de745 1062 }
6601e61a 1063
0a7de745 1064 if (rn->rn_bmask & xa[rn->rn_offset]) {
9bccf70c 1065 rn = rn->rn_right;
0a7de745 1066 } else {
9bccf70c 1067 rn = rn->rn_left;
0a7de745 1068 }
1c79356b 1069 }
1c79356b
A
1070
1071 /*
1072 * Two cases: either we stepped off the end of our mask,
1073 * in which case last == rn, or we reached a leaf, in which
1074 * case we want to start from the last node we looked at.
1075 * Either way, last is the node we want to start from.
1076 */
1077 rn = last;
9bccf70c 1078 lastb = rn->rn_bit;
1c79356b 1079
6601e61a 1080 /* First time through node, go left */
0a7de745 1081 while (rn->rn_bit >= 0) {
9bccf70c 1082 rn = rn->rn_left;
0a7de745 1083 }
1c79356b
A
1084
1085 while (!stopping) {
1c79356b
A
1086 base = rn;
1087 /* If at right child go back up, otherwise, go right */
9bccf70c 1088 while (rn->rn_parent->rn_right == rn
0a7de745 1089 && !(rn->rn_flags & RNF_ROOT)) {
9bccf70c 1090 rn = rn->rn_parent;
1c79356b
A
1091
1092 /* if went up beyond last, stop */
6601e61a 1093 if (rn->rn_bit <= lastb) {
1c79356b 1094 stopping = 1;
6601e61a
A
1095 /*
1096 * XXX we should jump to the 'Process leaves'
1097 * part, because the values of 'rn' and 'next'
1098 * we compute will not be used. Not a big deal
1099 * because this loop will terminate, but it is
1100 * inefficient and hard to understand!
1101 */
1c79356b
A
1102 }
1103 }
1104
2d21ac55
A
1105 /*
1106 * The following code (bug fix) inherited from FreeBSD is
1107 * currently disabled, because our implementation uses the
1108 * RTF_PRCLONING scheme that has been abandoned in current
1109 * FreeBSD release. The scheme involves setting such a flag
1110 * for the default route entry, and therefore all off-link
1111 * destinations would become clones of that entry. Enabling
1112 * the following code would be problematic at this point,
1113 * because the removal of default route would cause only
1114 * the left-half of the tree to be traversed, leaving the
1115 * right-half untouched. If there are clones of the entry
1116 * that reside in that right-half, they would not be deleted
1117 * and would linger around until they expire or explicitly
1118 * deleted, which is a very bad thing.
1119 *
1120 * This code should be uncommented only after we get rid
1121 * of the RTF_PRCLONING scheme.
1122 */
1123#if 0
1124 /*
1125 * At the top of the tree, no need to traverse the right
1126 * half, prevent the traversal of the entire tree in the
1127 * case of default route.
1128 */
0a7de745 1129 if (rn->rn_parent->rn_flags & RNF_ROOT) {
2d21ac55 1130 stopping = 1;
0a7de745 1131 }
2d21ac55
A
1132#endif
1133
6601e61a 1134 /* Find the next *leaf* to start from */
0a7de745 1135 for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;) {
9bccf70c 1136 rn = rn->rn_left;
0a7de745 1137 }
1c79356b
A
1138 next = rn;
1139 /* Process leaves */
1140 while ((rn = base) != 0) {
1141 base = rn->rn_dupedkey;
1c79356b 1142 if (!(rn->rn_flags & RNF_ROOT)
0a7de745
A
1143 && (error = (*f)(rn, w))) {
1144 return error;
1145 }
1c79356b 1146 }
6601e61a 1147 /* If one or more nodes got deleted, restart from top */
0a7de745 1148 if (h->rnh_cnt < rnh_cnt) {
6601e61a 1149 goto restart;
0a7de745 1150 }
1c79356b 1151 rn = next;
0a7de745 1152 if (rn->rn_flags & RNF_ROOT) {
1c79356b 1153 stopping = 1;
0a7de745 1154 }
1c79356b
A
1155 }
1156 return 0;
1157}
1158
1159static int
2d21ac55 1160rn_walktree(struct radix_node_head *h, walktree_f_t *f, void *w)
1c79356b
A
1161{
1162 int error;
1163 struct radix_node *base, *next;
6601e61a
A
1164 struct radix_node *rn;
1165 int rnh_cnt;
1166
1c79356b 1167 /*
6601e61a
A
1168 * This gets complicated because we may delete the node while
1169 * applying the function f to it; we cannot simply use the next
1170 * leaf as the successor node in advance, because that leaf may
1171 * be removed as well during deletion when it is a clone of the
1172 * current node. When that happens, we would end up referring
1173 * to an already-freed radix node as the successor node. To get
1174 * around this issue, if we detect that the radix tree has changed
1175 * in dimension (smaller than before), we simply restart the walk
1176 * from the top of tree.
1c79356b 1177 */
6601e61a
A
1178restart:
1179 rn = h->rnh_treetop;
1180 rnh_cnt = h->rnh_cnt;
1181
1c79356b 1182 /* First time through node, go left */
0a7de745 1183 while (rn->rn_bit >= 0) {
6601e61a 1184 rn = rn->rn_left;
0a7de745 1185 }
1c79356b
A
1186 for (;;) {
1187 base = rn;
1188 /* If at right child go back up, otherwise, go right */
6601e61a 1189 while (rn->rn_parent->rn_right == rn &&
0a7de745 1190 (rn->rn_flags & RNF_ROOT) == 0) {
9bccf70c 1191 rn = rn->rn_parent;
0a7de745 1192 }
6601e61a 1193 /* Find the next *leaf* to start from */
0a7de745 1194 for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;) {
9bccf70c 1195 rn = rn->rn_left;
0a7de745 1196 }
1c79356b
A
1197 next = rn;
1198 /* Process leaves */
6601e61a 1199 while ((rn = base) != NULL) {
1c79356b 1200 base = rn->rn_dupedkey;
9bccf70c 1201 if (!(rn->rn_flags & RNF_ROOT)
0a7de745
A
1202 && (error = (*f)(rn, w))) {
1203 return error;
1204 }
1c79356b 1205 }
6601e61a 1206 /* If one or more nodes got deleted, restart from top */
0a7de745 1207 if (h->rnh_cnt < rnh_cnt) {
6601e61a 1208 goto restart;
0a7de745 1209 }
1c79356b 1210 rn = next;
0a7de745
A
1211 if (rn->rn_flags & RNF_ROOT) {
1212 return 0;
1213 }
1c79356b
A
1214 }
1215 /* NOTREACHED */
1216}
1217
1218int
2d21ac55 1219rn_inithead(void **head, int off)
1c79356b 1220{
2d21ac55
A
1221 struct radix_node_head *rnh;
1222 struct radix_node *t, *tt, *ttt;
0a7de745
A
1223 if (*head) {
1224 return 1;
1225 }
1226 R_Malloc(rnh, struct radix_node_head *, sizeof(*rnh));
1227 if (rnh == 0) {
1228 return 0;
1229 }
1230 Bzero(rnh, sizeof(*rnh));
1c79356b
A
1231 *head = rnh;
1232 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
1233 ttt = rnh->rnh_nodes + 2;
9bccf70c
A
1234 t->rn_right = ttt;
1235 t->rn_parent = t;
1236 tt = t->rn_left;
1c79356b 1237 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
9bccf70c 1238 tt->rn_bit = -1 - off;
1c79356b
A
1239 *ttt = *tt;
1240 ttt->rn_key = rn_ones;
1241 rnh->rnh_addaddr = rn_addroute;
1242 rnh->rnh_deladdr = rn_delete;
1243 rnh->rnh_matchaddr = rn_match;
c910b4d9 1244 rnh->rnh_matchaddr_args = rn_match_args;
1c79356b 1245 rnh->rnh_lookup = rn_lookup;
c910b4d9 1246 rnh->rnh_lookup_args = rn_lookup_args;
1c79356b
A
1247 rnh->rnh_walktree = rn_walktree;
1248 rnh->rnh_walktree_from = rn_walktree_from;
1249 rnh->rnh_treetop = t;
6601e61a 1250 rnh->rnh_cnt = 3;
0a7de745 1251 return 1;
1c79356b
A
1252}
1253
1254void
2d21ac55 1255rn_init(void)
1c79356b
A
1256{
1257 char *cp, *cplim;
1c79356b
A
1258 struct domain *dom;
1259
91447636 1260 /* lock already held when rn_init is called */
39236c6e 1261 TAILQ_FOREACH(dom, &domains, dom_entry) {
0a7de745 1262 if (dom->dom_maxrtkey > max_keylen) {
1c79356b 1263 max_keylen = dom->dom_maxrtkey;
0a7de745 1264 }
39236c6e 1265 }
1c79356b
A
1266 if (max_keylen == 0) {
1267 log(LOG_ERR,
1268 "rn_init: radix functions require max_keylen be set\n");
1269 return;
1270 }
1271 R_Malloc(rn_zeros, char *, 3 * max_keylen);
0a7de745 1272 if (rn_zeros == NULL) {
1c79356b 1273 panic("rn_init");
0a7de745 1274 }
1c79356b
A
1275 Bzero(rn_zeros, 3 * max_keylen);
1276 rn_ones = cp = rn_zeros + max_keylen;
1277 addmask_key = cplim = rn_ones + max_keylen;
0a7de745 1278 while (cp < cplim) {
1c79356b 1279 *cp++ = -1;
0a7de745
A
1280 }
1281 if (rn_inithead((void **)&mask_rnhead, 0) == 0) {
1c79356b 1282 panic("rn_init 2");
0a7de745 1283 }
91447636 1284}