]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/route.c
937341b00a139fbc0d60355a646248c78b3196df
[apple/xnu.git] / bsd / net / route.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1980, 1986, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)route.c 8.2 (Berkeley) 11/15/93
61 * $FreeBSD: src/sys/net/route.c,v 1.59.2.3 2001/07/29 19:18:02 ume Exp $
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/socket.h>
69 #include <sys/domain.h>
70 #include <sys/syslog.h>
71 #include <sys/queue.h>
72 #include <kern/lock.h>
73 #include <kern/zalloc.h>
74
75 #include <net/if.h>
76 #include <net/route.h>
77
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip_mroute.h>
81 #include <netinet/ip_var.h>
82
83 #include <net/if_dl.h>
84
85 #include <libkern/OSAtomic.h>
86 #include <libkern/OSDebug.h>
87
88 #include <pexpert/pexpert.h>
89
90 /*
91 * Synchronization notes:
92 *
93 * Routing entries fall under two locking domains: the global routing table
94 * lock (rnh_lock) and the per-entry lock (rt_lock); the latter is a mutex that
95 * resides (statically defined) in the rtentry structure.
96 *
97 * The locking domains for routing are defined as follows:
98 *
99 * The global routing lock is used to serialize all accesses to the radix
100 * trees defined by rt_tables[], as well as the tree of masks. This includes
101 * lookups, insertions and removals of nodes to/from the respective tree.
102 * It is also used to protect certain fields in the route entry that aren't
103 * often modified and/or require global serialization (more details below.)
104 *
105 * The per-route entry lock is used to serialize accesses to several routing
106 * entry fields (more details below.) Acquiring and releasing this lock is
107 * done via RT_LOCK() and RT_UNLOCK() routines.
108 *
109 * In cases where both rnh_lock and rt_lock must be held, the former must be
110 * acquired first in order to maintain lock ordering. It is not a requirement
111 * that rnh_lock be acquired first before rt_lock, but in case both must be
112 * acquired in succession, the correct lock ordering must be followed.
113 *
114 * The fields of the rtentry structure are protected in the following way:
115 *
116 * rt_nodes[]
117 *
118 * - Routing table lock (rnh_lock).
119 *
120 * rt_parent, rt_mask, rt_llinfo_free
121 *
122 * - Set once during creation and never changes; no locks to read.
123 *
124 * rt_flags, rt_genmask, rt_llinfo, rt_rmx, rt_refcnt, rt_gwroute
125 *
126 * - Routing entry lock (rt_lock) for read/write access.
127 *
128 * - Some values of rt_flags are either set once at creation time,
129 * or aren't currently used, and thus checking against them can
130 * be done without rt_lock: RTF_GATEWAY, RTF_HOST, RTF_DYNAMIC,
131 * RTF_DONE, RTF_XRESOLVE, RTF_STATIC, RTF_BLACKHOLE, RTF_ANNOUNCE,
132 * RTF_USETRAILERS, RTF_WASCLONED, RTF_PINNED, RTF_LOCAL,
133 * RTF_BROADCAST, RTF_MULTICAST, RTF_IFSCOPE.
134 *
135 * rt_key, rt_gateway, rt_ifp, rt_ifa
136 *
137 * - Always written/modified with both rnh_lock and rt_lock held.
138 *
139 * - May be read freely with rnh_lock held, else must hold rt_lock
140 * for read access; holding both locks for read is also okay.
141 *
142 * - In the event rnh_lock is not acquired, or is not possible to be
143 * acquired across the operation, setting RTF_CONDEMNED on a route
144 * entry will prevent its rt_key, rt_gateway, rt_ifp and rt_ifa
145 * from being modified. This is typically done on a route that
146 * has been chosen for a removal (from the tree) prior to dropping
147 * the rt_lock, so that those values will remain the same until
148 * the route is freed.
149 *
150 * When rnh_lock is held rt_setgate(), rt_setif(), and rtsetifa() are
151 * single-threaded, thus exclusive. This flag will also prevent the
152 * route from being looked up via rt_lookup().
153 *
154 * generation_id
155 *
156 * - Assumes that 32-bit writes are atomic; no locks.
157 *
158 * rt_dlt, rt_output
159 *
160 * - Currently unused; no locks.
161 *
162 * Operations on a route entry can be described as follows:
163 *
164 * CREATE an entry with reference count set to 0 as part of RTM_ADD/RESOLVE.
165 *
166 * INSERTION of an entry into the radix tree holds the rnh_lock, checks
167 * for duplicates and then adds the entry. rtrequest returns the entry
168 * after bumping up the reference count to 1 (for the caller).
169 *
170 * LOOKUP of an entry holds the rnh_lock and bumps up the reference count
171 * before returning; it is valid to also bump up the reference count using
172 * RT_ADDREF after the lookup has returned an entry.
173 *
174 * REMOVAL of an entry from the radix tree holds the rnh_lock, removes the
175 * entry but does not decrement the reference count. Removal happens when
176 * the route is explicitly deleted (RTM_DELETE) or when it is in the cached
177 * state and it expires. The route is said to be "down" when it is no
178 * longer present in the tree. Freeing the entry will happen on the last
179 * reference release of such a "down" route.
180 *
181 * RT_ADDREF/RT_REMREF operates on the routing entry which increments/
182 * decrements the reference count, rt_refcnt, atomically on the rtentry.
183 * rt_refcnt is modified only using this routine. The general rule is to
184 * do RT_ADDREF in the function that is passing the entry as an argument,
185 * in order to prevent the entry from being freed by the callee.
186 */
187
188 #define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
189 #define SA(p) ((struct sockaddr *)(p))
190
191 extern void kdp_set_gateway_mac (void *gatewaymac);
192
193 extern struct domain routedomain;
194 struct route_cb route_cb;
195 __private_extern__ struct rtstat rtstat = { 0, 0, 0, 0, 0 };
196 struct radix_node_head *rt_tables[AF_MAX+1];
197
198 lck_mtx_t *rnh_lock; /* global routing tables mutex */
199 static lck_attr_t *rnh_lock_attr;
200 static lck_grp_t *rnh_lock_grp;
201 static lck_grp_attr_t *rnh_lock_grp_attr;
202
203 /* Lock group and attribute for routing entry locks */
204 static lck_attr_t *rte_mtx_attr;
205 static lck_grp_t *rte_mtx_grp;
206 static lck_grp_attr_t *rte_mtx_grp_attr;
207
208 lck_mtx_t *route_domain_mtx; /*### global routing tables mutex for now */
209 int rttrash = 0; /* routes not in table but not freed */
210
211 unsigned int rte_debug;
212
213 /* Possible flags for rte_debug */
214 #define RTD_DEBUG 0x1 /* enable or disable rtentry debug facility */
215 #define RTD_TRACE 0x2 /* trace alloc, free, refcnt and lock */
216 #define RTD_NO_FREE 0x4 /* don't free (good to catch corruptions) */
217
218 #define RTE_NAME "rtentry" /* name for zone and rt_lock */
219
220 static struct zone *rte_zone; /* special zone for rtentry */
221 #define RTE_ZONE_MAX 65536 /* maximum elements in zone */
222 #define RTE_ZONE_NAME RTE_NAME /* name of rtentry zone */
223
224 #define RTD_INUSE 0xFEEDFACE /* entry is in use */
225 #define RTD_FREED 0xDEADBEEF /* entry is freed */
226
227 /* For gdb */
228 __private_extern__ unsigned int ctrace_stack_size = CTRACE_STACK_SIZE;
229 __private_extern__ unsigned int ctrace_hist_size = CTRACE_HIST_SIZE;
230
231 /*
232 * Debug variant of rtentry structure.
233 */
234 struct rtentry_dbg {
235 struct rtentry rtd_entry; /* rtentry */
236 struct rtentry rtd_entry_saved; /* saved rtentry */
237 uint32_t rtd_inuse; /* in use pattern */
238 uint16_t rtd_refhold_cnt; /* # of rtref */
239 uint16_t rtd_refrele_cnt; /* # of rtunref */
240 uint32_t rtd_lock_cnt; /* # of locks */
241 uint32_t rtd_unlock_cnt; /* # of unlocks */
242 /*
243 * Alloc and free callers.
244 */
245 ctrace_t rtd_alloc;
246 ctrace_t rtd_free;
247 /*
248 * Circular lists of rtref and rtunref callers.
249 */
250 ctrace_t rtd_refhold[CTRACE_HIST_SIZE];
251 ctrace_t rtd_refrele[CTRACE_HIST_SIZE];
252 /*
253 * Circular lists of locks and unlocks.
254 */
255 ctrace_t rtd_lock[CTRACE_HIST_SIZE];
256 ctrace_t rtd_unlock[CTRACE_HIST_SIZE];
257 /*
258 * Trash list linkage
259 */
260 TAILQ_ENTRY(rtentry_dbg) rtd_trash_link;
261 };
262
263 #define atomic_add_16_ov(a, n) \
264 ((uint16_t) OSAddAtomic16(n, (volatile SInt16 *)a))
265 #define atomic_add_32_ov(a, n) \
266 ((uint32_t) OSAddAtomic(n, a))
267
268 /* List of trash route entries protected by rnh_lock */
269 static TAILQ_HEAD(, rtentry_dbg) rttrash_head;
270
271 static void rte_lock_init(struct rtentry *);
272 static void rte_lock_destroy(struct rtentry *);
273 static inline struct rtentry *rte_alloc_debug(void);
274 static inline void rte_free_debug(struct rtentry *);
275 static inline void rte_lock_debug(struct rtentry_dbg *);
276 static inline void rte_unlock_debug(struct rtentry_dbg *);
277 static void rt_maskedcopy(struct sockaddr *,
278 struct sockaddr *, struct sockaddr *);
279 static void rtable_init(void **);
280 static inline void rtref_audit(struct rtentry_dbg *);
281 static inline void rtunref_audit(struct rtentry_dbg *);
282 static struct rtentry *rtalloc1_common_locked(struct sockaddr *, int, uint32_t,
283 unsigned int);
284 static int rtrequest_common_locked(int, struct sockaddr *,
285 struct sockaddr *, struct sockaddr *, int, struct rtentry **,
286 unsigned int);
287 static void rtalloc_ign_common_locked(struct route *, uint32_t, unsigned int);
288 static inline void sa_set_ifscope(struct sockaddr *, unsigned int);
289 static struct sockaddr *sin_copy(struct sockaddr_in *, struct sockaddr_in *,
290 unsigned int);
291 static struct sockaddr *mask_copy(struct sockaddr *, struct sockaddr_in *,
292 unsigned int);
293 static struct sockaddr *sa_trim(struct sockaddr *, int);
294 static struct radix_node *node_lookup(struct sockaddr *, struct sockaddr *,
295 unsigned int);
296 static struct radix_node *node_lookup_default(void);
297 static int rn_match_ifscope(struct radix_node *, void *);
298 static struct ifaddr *ifa_ifwithroute_common_locked(int,
299 const struct sockaddr *, const struct sockaddr *, unsigned int);
300 static struct rtentry *rte_alloc(void);
301 static void rte_free(struct rtentry *);
302 static void rtfree_common(struct rtentry *, boolean_t);
303
304 uint32_t route_generation = 0;
305
306 /*
307 * sockaddr_in with embedded interface scope; this is used internally
308 * to keep track of scoped route entries in the routing table. The
309 * fact that such a scope is embedded in the structure is an artifact
310 * of the current implementation which could change in future.
311 */
312 struct sockaddr_inifscope {
313 __uint8_t sin_len;
314 sa_family_t sin_family;
315 in_port_t sin_port;
316 struct in_addr sin_addr;
317 /*
318 * To avoid possible conflict with an overlaid sockaddr_inarp
319 * having sin_other set to SIN_PROXY, we use the first 4-bytes
320 * of sin_zero since sin_srcaddr is one of the unused fields
321 * in sockaddr_inarp.
322 */
323 union {
324 char sin_zero[8];
325 struct {
326 __uint32_t ifscope;
327 } _in_index;
328 } un;
329 #define sin_ifscope un._in_index.ifscope
330 };
331
332 #define SIN(sa) ((struct sockaddr_in *)(size_t)(sa))
333 #define SINIFSCOPE(sa) ((struct sockaddr_inifscope *)(size_t)(sa))
334
335 #define ASSERT_SINIFSCOPE(sa) { \
336 if ((sa)->sa_family != AF_INET || \
337 (sa)->sa_len < sizeof (struct sockaddr_in)) \
338 panic("%s: bad sockaddr_in %p\n", __func__, sa); \
339 }
340
341 /*
342 * Argument to leaf-matching routine; at present it is scoped routing
343 * specific but can be expanded in future to include other search filters.
344 */
345 struct matchleaf_arg {
346 unsigned int ifscope; /* interface scope */
347 };
348
349 /*
350 * For looking up the non-scoped default route (sockaddr instead
351 * of sockaddr_in for convenience).
352 */
353 static struct sockaddr sin_def = {
354 sizeof (struct sockaddr_in), AF_INET, { 0, }
355 };
356
357 /*
358 * Interface index (scope) of the primary interface; determined at
359 * the time when the default, non-scoped route gets added, changed
360 * or deleted. Protected by rnh_lock.
361 */
362 static unsigned int primary_ifscope = IFSCOPE_NONE;
363
364 #define INET_DEFAULT(dst) \
365 ((dst)->sa_family == AF_INET && SIN(dst)->sin_addr.s_addr == 0)
366
367 #define RT(r) ((struct rtentry *)r)
368 #define RT_HOST(r) (RT(r)->rt_flags & RTF_HOST)
369
370 /*
371 * Given a route, determine whether or not it is the non-scoped default
372 * route; dst typically comes from rt_key(rt) but may be coming from
373 * a separate place when rt is in the process of being created.
374 */
375 boolean_t
376 rt_inet_default(struct rtentry *rt, struct sockaddr *dst)
377 {
378 return (INET_DEFAULT(dst) && !(rt->rt_flags & RTF_IFSCOPE));
379 }
380
381 /*
382 * Set the ifscope of the primary interface; caller holds rnh_lock.
383 */
384 void
385 set_primary_ifscope(unsigned int ifscope)
386 {
387 primary_ifscope = ifscope;
388 }
389
390 /*
391 * Return the ifscope of the primary interface; caller holds rnh_lock.
392 */
393 unsigned int
394 get_primary_ifscope(void)
395 {
396 return (primary_ifscope);
397 }
398
399 /*
400 * Embed ifscope into a given a sockaddr_in.
401 */
402 static inline void
403 sa_set_ifscope(struct sockaddr *sa, unsigned int ifscope)
404 {
405 /* Caller must pass in sockaddr_in */
406 ASSERT_SINIFSCOPE(sa);
407
408 SINIFSCOPE(sa)->sin_ifscope = ifscope;
409 }
410
411 /*
412 * Given a sockaddr_in, return the embedded ifscope to the caller.
413 */
414 unsigned int
415 sa_get_ifscope(struct sockaddr *sa)
416 {
417 /* Caller must pass in sockaddr_in */
418 ASSERT_SINIFSCOPE(sa);
419
420 return (SINIFSCOPE(sa)->sin_ifscope);
421 }
422
423 /*
424 * Copy a sockaddr_in src to dst and embed ifscope into dst.
425 */
426 static struct sockaddr *
427 sin_copy(struct sockaddr_in *src, struct sockaddr_in *dst, unsigned int ifscope)
428 {
429 *dst = *src;
430 sa_set_ifscope(SA(dst), ifscope);
431
432 return (SA(dst));
433 }
434
435 /*
436 * Copy a mask from src to a sockaddr_in dst and embed ifscope into dst.
437 */
438 static struct sockaddr *
439 mask_copy(struct sockaddr *src, struct sockaddr_in *dst, unsigned int ifscope)
440 {
441 /* We know dst is at least the size of sockaddr{_in} */
442 bzero(dst, sizeof (*dst));
443 rt_maskedcopy(src, SA(dst), src);
444
445 /*
446 * The length of the mask sockaddr would need to be adjusted
447 * to cover the additional sin_ifscope field; when ifscope is
448 * IFSCOPE_NONE, we'd end up clearing the embedded ifscope on
449 * the destination mask in addition to extending the length
450 * of the sockaddr, as a side effect. This is okay, as any
451 * trailing zeroes would be skipped by rn_addmask prior to
452 * inserting or looking up the mask in the mask tree.
453 */
454 SINIFSCOPE(dst)->sin_ifscope = ifscope;
455 SINIFSCOPE(dst)->sin_len =
456 offsetof(struct sockaddr_inifscope, sin_ifscope) +
457 sizeof (SINIFSCOPE(dst)->sin_ifscope);
458
459 return (SA(dst));
460 }
461
462 /*
463 * Trim trailing zeroes on a sockaddr and update its length.
464 */
465 static struct sockaddr *
466 sa_trim(struct sockaddr *sa, int skip)
467 {
468 caddr_t cp, base = (caddr_t)sa + skip;
469
470 if (sa->sa_len <= skip)
471 return (sa);
472
473 for (cp = base + (sa->sa_len - skip); cp > base && cp[-1] == 0;)
474 cp--;
475
476 sa->sa_len = (cp - base) + skip;
477 if (sa->sa_len < skip) {
478 /* Must not happen, and if so, panic */
479 panic("%s: broken logic (sa_len %d < skip %d )", __func__,
480 sa->sa_len, skip);
481 /* NOTREACHED */
482 } else if (sa->sa_len == skip) {
483 /* If we end up with all zeroes, then there's no mask */
484 sa->sa_len = 0;
485 }
486
487 return (sa);
488 }
489
490 /*
491 * Called by rtm_msg{1,2} routines to "scrub" the embedded interface scope
492 * away from the socket address structure, so that clients of the routing
493 * socket will not be confused by the presence of the embedded scope, or the
494 * side effect of the increased length due to that. The source sockaddr is
495 * not modified; instead, the scrubbing happens on the destination sockaddr
496 * storage that is passed in by the caller.
497 */
498 struct sockaddr *
499 rtm_scrub_ifscope(int idx, struct sockaddr *hint, struct sockaddr *sa,
500 struct sockaddr_storage *ss)
501 {
502 struct sockaddr *ret = sa;
503
504 switch (idx) {
505 case RTAX_DST:
506 /*
507 * If this is for an AF_INET destination address, call
508 * sin_copy() with IFSCOPE_NONE as it does what we need.
509 */
510 if (sa->sa_family == AF_INET &&
511 SINIFSCOPE(sa)->sin_ifscope != IFSCOPE_NONE) {
512 bzero(ss, sizeof (*ss));
513 ret = sin_copy(SIN(sa), SIN(ss), IFSCOPE_NONE);
514 }
515 break;
516
517 case RTAX_NETMASK: {
518 /*
519 * If this is for a mask, we can't tell whether or not
520 * there is an embedded interface scope, as the span of
521 * bytes between sa_len and the beginning of the mask
522 * (offset of sin_addr in the case of AF_INET) may be
523 * filled with all-ones by rn_addmask(), and hence we
524 * cannot rely on sa_family. Because of this, we use
525 * the sa_family of the hint sockaddr (RTAX_{DST,IFA})
526 * as indicator as to whether or not the mask is to be
527 * treated as one for AF_INET. Clearing the embedded
528 * scope involves setting it to IFSCOPE_NONE followed
529 * by calling sa_trim() to trim trailing zeroes from
530 * the storage sockaddr, which reverses what was done
531 * earlier by mask_copy() on the source sockaddr.
532 */
533 int skip = offsetof(struct sockaddr_in, sin_addr);
534 if (sa->sa_len > skip && sa->sa_len <= sizeof (*ss) &&
535 hint != NULL && hint->sa_family == AF_INET) {
536 bzero(ss, sizeof (*ss));
537 bcopy(sa, ss, sa->sa_len);
538 SINIFSCOPE(ss)->sin_ifscope = IFSCOPE_NONE;
539 ret = sa_trim(SA(ss), skip);
540 }
541 break;
542 }
543 default:
544 break;
545 }
546
547 return (ret);
548 }
549
550 /*
551 * Callback leaf-matching routine for rn_matchaddr_args used
552 * for looking up an exact match for a scoped route entry.
553 */
554 static int
555 rn_match_ifscope(struct radix_node *rn, void *arg)
556 {
557 struct rtentry *rt = (struct rtentry *)rn;
558 struct matchleaf_arg *ma = arg;
559
560 if (!(rt->rt_flags & RTF_IFSCOPE) || rt_key(rt)->sa_family != AF_INET)
561 return (0);
562
563 return (SINIFSCOPE(rt_key(rt))->sin_ifscope == ma->ifscope);
564 }
565
566 static void
567 rtable_init(void **table)
568 {
569 struct domain *dom;
570 for (dom = domains; dom; dom = dom->dom_next)
571 if (dom->dom_rtattach)
572 dom->dom_rtattach(&table[dom->dom_family],
573 dom->dom_rtoffset);
574 }
575
576 void
577 route_init(void)
578 {
579 int size;
580
581 PE_parse_boot_argn("rte_debug", &rte_debug, sizeof (rte_debug));
582 if (rte_debug != 0)
583 rte_debug |= RTD_DEBUG;
584
585 rnh_lock_grp_attr = lck_grp_attr_alloc_init();
586 rnh_lock_grp = lck_grp_alloc_init("route", rnh_lock_grp_attr);
587 rnh_lock_attr = lck_attr_alloc_init();
588 if ((rnh_lock = lck_mtx_alloc_init(rnh_lock_grp,
589 rnh_lock_attr)) == NULL) {
590 printf("route_init: can't alloc rnh_lock\n");
591 return;
592 }
593
594 rte_mtx_grp_attr = lck_grp_attr_alloc_init();
595 rte_mtx_grp = lck_grp_alloc_init(RTE_NAME, rte_mtx_grp_attr);
596 rte_mtx_attr = lck_attr_alloc_init();
597
598 lck_mtx_lock(rnh_lock);
599 rn_init(); /* initialize all zeroes, all ones, mask table */
600 lck_mtx_unlock(rnh_lock);
601 rtable_init((void **)rt_tables);
602 route_domain_mtx = routedomain.dom_mtx;
603
604 if (rte_debug & RTD_DEBUG)
605 size = sizeof (struct rtentry_dbg);
606 else
607 size = sizeof (struct rtentry);
608
609 rte_zone = zinit(size, RTE_ZONE_MAX * size, 0, RTE_ZONE_NAME);
610 if (rte_zone == NULL)
611 panic("route_init: failed allocating rte_zone");
612
613 zone_change(rte_zone, Z_EXPAND, TRUE);
614
615 TAILQ_INIT(&rttrash_head);
616 }
617
618 /*
619 * Atomically increment route generation counter
620 */
621 void
622 routegenid_update(void)
623 {
624 (void) atomic_add_32_ov(&route_generation, 1);
625 }
626
627 /*
628 * Packet routing routines.
629 */
630 void
631 rtalloc(struct route *ro)
632 {
633 rtalloc_ign(ro, 0);
634 }
635
636 void
637 rtalloc_ign_locked(struct route *ro, uint32_t ignore)
638 {
639 return (rtalloc_ign_common_locked(ro, ignore, IFSCOPE_NONE));
640 }
641
642 void
643 rtalloc_scoped_ign_locked(struct route *ro, uint32_t ignore,
644 unsigned int ifscope)
645 {
646 return (rtalloc_ign_common_locked(ro, ignore, ifscope));
647 }
648
649 static void
650 rtalloc_ign_common_locked(struct route *ro, uint32_t ignore,
651 unsigned int ifscope)
652 {
653 struct rtentry *rt;
654
655 if ((rt = ro->ro_rt) != NULL) {
656 RT_LOCK_SPIN(rt);
657 if (rt->rt_ifp != NULL && (rt->rt_flags & RTF_UP) &&
658 rt->generation_id == route_generation) {
659 RT_UNLOCK(rt);
660 return;
661 }
662 RT_UNLOCK(rt);
663 rtfree_locked(rt);
664 ro->ro_rt = NULL;
665 }
666 ro->ro_rt = rtalloc1_common_locked(&ro->ro_dst, 1, ignore, ifscope);
667 if (ro->ro_rt != NULL) {
668 ro->ro_rt->generation_id = route_generation;
669 RT_LOCK_ASSERT_NOTHELD(ro->ro_rt);
670 }
671 }
672
673 void
674 rtalloc_ign(struct route *ro, uint32_t ignore)
675 {
676 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
677 lck_mtx_lock(rnh_lock);
678 rtalloc_ign_locked(ro, ignore);
679 lck_mtx_unlock(rnh_lock);
680 }
681
682 void
683 rtalloc_scoped_ign(struct route *ro, uint32_t ignore, unsigned int ifscope)
684 {
685 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
686 lck_mtx_lock(rnh_lock);
687 rtalloc_scoped_ign_locked(ro, ignore, ifscope);
688 lck_mtx_unlock(rnh_lock);
689 }
690
691 struct rtentry *
692 rtalloc1_locked(struct sockaddr *dst, int report, uint32_t ignflags)
693 {
694 return (rtalloc1_common_locked(dst, report, ignflags, IFSCOPE_NONE));
695 }
696
697 struct rtentry *
698 rtalloc1_scoped_locked(struct sockaddr *dst, int report, uint32_t ignflags,
699 unsigned int ifscope)
700 {
701 return (rtalloc1_common_locked(dst, report, ignflags, ifscope));
702 }
703
704 /*
705 * Look up the route that matches the address given
706 * Or, at least try.. Create a cloned route if needed.
707 */
708 static struct rtentry *
709 rtalloc1_common_locked(struct sockaddr *dst, int report, uint32_t ignflags,
710 unsigned int ifscope)
711 {
712 struct radix_node_head *rnh = rt_tables[dst->sa_family];
713 struct rtentry *rt, *newrt = NULL;
714 struct rt_addrinfo info;
715 uint32_t nflags;
716 int err = 0, msgtype = RTM_MISS;
717
718 if (rnh == NULL)
719 goto unreachable;
720
721 /*
722 * Find the longest prefix or exact (in the scoped case) address match;
723 * callee adds a reference to entry and checks for root node as well
724 */
725 rt = rt_lookup(FALSE, dst, NULL, rnh, ifscope);
726 if (rt == NULL)
727 goto unreachable;
728
729 RT_LOCK_SPIN(rt);
730 newrt = rt;
731 nflags = rt->rt_flags & ~ignflags;
732 RT_UNLOCK(rt);
733 if (report && (nflags & (RTF_CLONING | RTF_PRCLONING))) {
734 /*
735 * We are apparently adding (report = 0 in delete).
736 * If it requires that it be cloned, do so.
737 * (This implies it wasn't a HOST route.)
738 */
739 err = rtrequest_locked(RTM_RESOLVE, dst, NULL, NULL, 0, &newrt);
740 if (err) {
741 /*
742 * If the cloning didn't succeed, maybe what we
743 * have from lookup above will do. Return that;
744 * no need to hold another reference since it's
745 * already done.
746 */
747 newrt = rt;
748 goto miss;
749 }
750
751 /*
752 * We cloned it; drop the original route found during lookup.
753 * The resulted cloned route (newrt) would now have an extra
754 * reference held during rtrequest.
755 */
756 rtfree_locked(rt);
757 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
758 /*
759 * If the new route specifies it be
760 * externally resolved, then go do that.
761 */
762 msgtype = RTM_RESOLVE;
763 goto miss;
764 }
765 }
766 goto done;
767
768 unreachable:
769 /*
770 * Either we hit the root or couldn't find any match,
771 * Which basically means "cant get there from here"
772 */
773 rtstat.rts_unreach++;
774 miss:
775 if (report) {
776 /*
777 * If required, report the failure to the supervising
778 * Authorities.
779 * For a delete, this is not an error. (report == 0)
780 */
781 bzero((caddr_t)&info, sizeof(info));
782 info.rti_info[RTAX_DST] = dst;
783 rt_missmsg(msgtype, &info, 0, err);
784 }
785 done:
786 return (newrt);
787 }
788
789 struct rtentry *
790 rtalloc1(struct sockaddr *dst, int report, uint32_t ignflags)
791 {
792 struct rtentry * entry;
793 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
794 lck_mtx_lock(rnh_lock);
795 entry = rtalloc1_locked(dst, report, ignflags);
796 lck_mtx_unlock(rnh_lock);
797 return (entry);
798 }
799
800 struct rtentry *
801 rtalloc1_scoped(struct sockaddr *dst, int report, uint32_t ignflags,
802 unsigned int ifscope)
803 {
804 struct rtentry * entry;
805 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
806 lck_mtx_lock(rnh_lock);
807 entry = rtalloc1_scoped_locked(dst, report, ignflags, ifscope);
808 lck_mtx_unlock(rnh_lock);
809 return (entry);
810 }
811
812 /*
813 * Remove a reference count from an rtentry.
814 * If the count gets low enough, take it out of the routing table
815 */
816 void
817 rtfree_locked(struct rtentry *rt)
818 {
819 rtfree_common(rt, TRUE);
820 }
821
822 static void
823 rtfree_common(struct rtentry *rt, boolean_t locked)
824 {
825 struct radix_node_head *rnh;
826
827 /*
828 * Atomically decrement the reference count and if it reaches 0,
829 * and there is a close function defined, call the close function.
830 */
831 RT_LOCK_SPIN(rt);
832 if (rtunref(rt) > 0) {
833 RT_UNLOCK(rt);
834 return;
835 }
836
837 /*
838 * To avoid violating lock ordering, we must drop rt_lock before
839 * trying to acquire the global rnh_lock. If we are called with
840 * rnh_lock held, then we already have exclusive access; otherwise
841 * we do the lock dance.
842 */
843 if (!locked) {
844 /*
845 * Note that we check it again below after grabbing rnh_lock,
846 * since it is possible that another thread doing a lookup wins
847 * the race, grabs the rnh_lock first, and bumps up the reference
848 * count in which case the route should be left alone as it is
849 * still in use. It's also possible that another thread frees
850 * the route after we drop rt_lock; to prevent the route from
851 * being freed, we hold an extra reference.
852 */
853 RT_ADDREF_LOCKED(rt);
854 RT_UNLOCK(rt);
855 lck_mtx_lock(rnh_lock);
856 RT_LOCK_SPIN(rt);
857 RT_REMREF_LOCKED(rt);
858 if (rt->rt_refcnt > 0) {
859 /* We've lost the race, so abort */
860 RT_UNLOCK(rt);
861 goto done;
862 }
863 }
864
865 /*
866 * We may be blocked on other lock(s) as part of freeing
867 * the entry below, so convert from spin to full mutex.
868 */
869 RT_CONVERT_LOCK(rt);
870
871 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
872
873 /* Negative refcnt must never happen */
874 if (rt->rt_refcnt != 0)
875 panic("rt %p invalid refcnt %d", rt, rt->rt_refcnt);
876
877 /*
878 * find the tree for that address family
879 * Note: in the case of igmp packets, there might not be an rnh
880 */
881 rnh = rt_tables[rt_key(rt)->sa_family];
882
883 /*
884 * On last reference give the "close method" a chance to cleanup
885 * private state. This also permits (for IPv4 and IPv6) a chance
886 * to decide if the routing table entry should be purged immediately
887 * or at a later time. When an immediate purge is to happen the
888 * close routine typically issues RTM_DELETE which clears the RTF_UP
889 * flag on the entry so that the code below reclaims the storage.
890 */
891 if (rnh != NULL && rnh->rnh_close != NULL)
892 rnh->rnh_close((struct radix_node *)rt, rnh);
893
894 /*
895 * If we are no longer "up" (and ref == 0) then we can free the
896 * resources associated with the route.
897 */
898 if (!(rt->rt_flags & RTF_UP)) {
899 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
900 panic("rt %p freed while in radix tree\n", rt);
901 /*
902 * the rtentry must have been removed from the routing table
903 * so it is represented in rttrash; remove that now.
904 */
905 (void) OSDecrementAtomic(&rttrash);
906 if (rte_debug & RTD_DEBUG) {
907 TAILQ_REMOVE(&rttrash_head, (struct rtentry_dbg *)rt,
908 rtd_trash_link);
909 }
910
911 /*
912 * Route is no longer in the tree and refcnt is 0;
913 * we have exclusive access, so destroy it.
914 */
915 RT_UNLOCK(rt);
916
917 /*
918 * release references on items we hold them on..
919 * e.g other routes and ifaddrs.
920 */
921 if (rt->rt_parent != NULL) {
922 rtfree_locked(rt->rt_parent);
923 rt->rt_parent = NULL;
924 }
925
926 if (rt->rt_ifa != NULL) {
927 ifafree(rt->rt_ifa);
928 rt->rt_ifa = NULL;
929 }
930
931 /*
932 * Now free any attached link-layer info.
933 */
934 if (rt->rt_llinfo != NULL) {
935 if (rt->rt_llinfo_free != NULL)
936 (*rt->rt_llinfo_free)(rt->rt_llinfo);
937 else
938 R_Free(rt->rt_llinfo);
939 rt->rt_llinfo = NULL;
940 }
941
942 /*
943 * The key is separately alloc'd so free it (see rt_setgate()).
944 * This also frees the gateway, as they are always malloc'd
945 * together.
946 */
947 R_Free(rt_key(rt));
948
949 /*
950 * and the rtentry itself of course
951 */
952 rte_lock_destroy(rt);
953 rte_free(rt);
954 } else {
955 /*
956 * The "close method" has been called, but the route is
957 * still in the radix tree with zero refcnt, i.e. "up"
958 * and in the cached state.
959 */
960 RT_UNLOCK(rt);
961 }
962 done:
963 if (!locked)
964 lck_mtx_unlock(rnh_lock);
965 }
966
967 void
968 rtfree(struct rtentry *rt)
969 {
970 rtfree_common(rt, FALSE);
971 }
972
973 /*
974 * Decrements the refcount but does not free the route when
975 * the refcount reaches zero. Unless you have really good reason,
976 * use rtfree not rtunref.
977 */
978 int
979 rtunref(struct rtentry *p)
980 {
981 RT_LOCK_ASSERT_HELD(p);
982
983 if (p->rt_refcnt == 0)
984 panic("%s(%p) bad refcnt\n", __func__, p);
985
986 --p->rt_refcnt;
987
988 if (rte_debug & RTD_DEBUG)
989 rtunref_audit((struct rtentry_dbg *)p);
990
991 /* Return new value */
992 return (p->rt_refcnt);
993 }
994
995 static inline void
996 rtunref_audit(struct rtentry_dbg *rte)
997 {
998 uint16_t idx;
999
1000 if (rte->rtd_inuse != RTD_INUSE)
1001 panic("rtunref: on freed rte=%p\n", rte);
1002
1003 idx = atomic_add_16_ov(&rte->rtd_refrele_cnt, 1) % CTRACE_HIST_SIZE;
1004 if (rte_debug & RTD_TRACE)
1005 ctrace_record(&rte->rtd_refrele[idx]);
1006 }
1007
1008 /*
1009 * Add a reference count from an rtentry.
1010 */
1011 void
1012 rtref(struct rtentry *p)
1013 {
1014 RT_LOCK_ASSERT_HELD(p);
1015
1016 if (++p->rt_refcnt == 0)
1017 panic("%s(%p) bad refcnt\n", __func__, p);
1018
1019 if (rte_debug & RTD_DEBUG)
1020 rtref_audit((struct rtentry_dbg *)p);
1021 }
1022
1023 static inline void
1024 rtref_audit(struct rtentry_dbg *rte)
1025 {
1026 uint16_t idx;
1027
1028 if (rte->rtd_inuse != RTD_INUSE)
1029 panic("rtref_audit: on freed rte=%p\n", rte);
1030
1031 idx = atomic_add_16_ov(&rte->rtd_refhold_cnt, 1) % CTRACE_HIST_SIZE;
1032 if (rte_debug & RTD_TRACE)
1033 ctrace_record(&rte->rtd_refhold[idx]);
1034 }
1035
1036 void
1037 rtsetifa(struct rtentry *rt, struct ifaddr* ifa)
1038 {
1039 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1040
1041 RT_LOCK_ASSERT_HELD(rt);
1042
1043 if (rt->rt_ifa == ifa)
1044 return;
1045
1046 /* Release the old ifa */
1047 if (rt->rt_ifa)
1048 ifafree(rt->rt_ifa);
1049
1050 /* Set rt_ifa */
1051 rt->rt_ifa = ifa;
1052
1053 /* Take a reference to the ifa */
1054 if (rt->rt_ifa)
1055 ifaref(rt->rt_ifa);
1056 }
1057
1058 /*
1059 * Force a routing table entry to the specified
1060 * destination to go through the given gateway.
1061 * Normally called as a result of a routing redirect
1062 * message from the network layer.
1063 */
1064 void
1065 rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway,
1066 struct sockaddr *netmask, int flags, struct sockaddr *src,
1067 struct rtentry **rtp)
1068 {
1069 struct rtentry *rt = NULL;
1070 int error = 0;
1071 short *stat = 0;
1072 struct rt_addrinfo info;
1073 struct ifaddr *ifa = NULL;
1074 unsigned int ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
1075 struct sockaddr_in sin;
1076
1077 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1078 lck_mtx_lock(rnh_lock);
1079
1080 /*
1081 * Verify the gateway is directly reachable; if scoped routing
1082 * is enabled, verify that it is reachable from the interface
1083 * where the ICMP redirect arrived on.
1084 */
1085 if ((ifa = ifa_ifwithnet_scoped(gateway, ifscope)) == NULL) {
1086 error = ENETUNREACH;
1087 goto out;
1088 }
1089
1090 /* Lookup route to the destination (from the original IP header) */
1091 rt = rtalloc1_scoped_locked(dst, 0, RTF_CLONING|RTF_PRCLONING, ifscope);
1092 if (rt != NULL)
1093 RT_LOCK(rt);
1094
1095 /* Embed scope in src for comparison against rt_gateway below */
1096 if (ip_doscopedroute && src->sa_family == AF_INET)
1097 src = sin_copy(SIN(src), &sin, ifscope);
1098
1099 /*
1100 * If the redirect isn't from our current router for this dst,
1101 * it's either old or wrong. If it redirects us to ourselves,
1102 * we have a routing loop, perhaps as a result of an interface
1103 * going down recently.
1104 */
1105 if (!(flags & RTF_DONE) && rt != NULL &&
1106 (!equal(src, rt->rt_gateway) || !equal(rt->rt_ifa->ifa_addr,
1107 ifa->ifa_addr))) {
1108 error = EINVAL;
1109 } else {
1110 ifafree(ifa);
1111 if ((ifa = ifa_ifwithaddr(gateway))) {
1112 ifafree(ifa);
1113 ifa = NULL;
1114 error = EHOSTUNREACH;
1115 }
1116 }
1117
1118 if (ifa) {
1119 ifafree(ifa);
1120 ifa = NULL;
1121 }
1122
1123 if (error) {
1124 if (rt != NULL)
1125 RT_UNLOCK(rt);
1126 goto done;
1127 }
1128
1129 /*
1130 * Create a new entry if we just got back a wildcard entry
1131 * or the the lookup failed. This is necessary for hosts
1132 * which use routing redirects generated by smart gateways
1133 * to dynamically build the routing tables.
1134 */
1135 if ((rt == NULL) || (rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2))
1136 goto create;
1137 /*
1138 * Don't listen to the redirect if it's
1139 * for a route to an interface.
1140 */
1141 RT_LOCK_ASSERT_HELD(rt);
1142 if (rt->rt_flags & RTF_GATEWAY) {
1143 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
1144 /*
1145 * Changing from route to net => route to host.
1146 * Create new route, rather than smashing route
1147 * to net; similar to cloned routes, the newly
1148 * created host route is scoped as well.
1149 */
1150 create:
1151 if (rt != NULL)
1152 RT_UNLOCK(rt);
1153 flags |= RTF_GATEWAY | RTF_DYNAMIC;
1154 error = rtrequest_scoped_locked(RTM_ADD, dst,
1155 gateway, netmask, flags, NULL, ifscope);
1156 stat = &rtstat.rts_dynamic;
1157 } else {
1158 /*
1159 * Smash the current notion of the gateway to
1160 * this destination. Should check about netmask!!!
1161 */
1162 rt->rt_flags |= RTF_MODIFIED;
1163 flags |= RTF_MODIFIED;
1164 stat = &rtstat.rts_newgateway;
1165 /*
1166 * add the key and gateway (in one malloc'd chunk).
1167 */
1168 error = rt_setgate(rt, rt_key(rt), gateway);
1169 RT_UNLOCK(rt);
1170 }
1171 } else {
1172 RT_UNLOCK(rt);
1173 error = EHOSTUNREACH;
1174 }
1175 done:
1176 if (rt != NULL) {
1177 RT_LOCK_ASSERT_NOTHELD(rt);
1178 if (rtp && !error)
1179 *rtp = rt;
1180 else
1181 rtfree_locked(rt);
1182 }
1183 out:
1184 if (error) {
1185 rtstat.rts_badredirect++;
1186 } else {
1187 if (stat != NULL)
1188 (*stat)++;
1189 if (use_routegenid)
1190 routegenid_update();
1191 }
1192 lck_mtx_unlock(rnh_lock);
1193 bzero((caddr_t)&info, sizeof(info));
1194 info.rti_info[RTAX_DST] = dst;
1195 info.rti_info[RTAX_GATEWAY] = gateway;
1196 info.rti_info[RTAX_NETMASK] = netmask;
1197 info.rti_info[RTAX_AUTHOR] = src;
1198 rt_missmsg(RTM_REDIRECT, &info, flags, error);
1199 }
1200
1201 /*
1202 * Routing table ioctl interface.
1203 */
1204 int
1205 rtioctl(unsigned long req, caddr_t data, struct proc *p)
1206 {
1207 #pragma unused(p)
1208 #if INET && MROUTING
1209 return mrt_ioctl(req, data);
1210 #else
1211 return ENXIO;
1212 #endif
1213 }
1214
1215 struct ifaddr *
1216 ifa_ifwithroute(
1217 int flags,
1218 const struct sockaddr *dst,
1219 const struct sockaddr *gateway)
1220 {
1221 struct ifaddr *ifa;
1222
1223 lck_mtx_lock(rnh_lock);
1224 ifa = ifa_ifwithroute_locked(flags, dst, gateway);
1225 lck_mtx_unlock(rnh_lock);
1226
1227 return (ifa);
1228 }
1229
1230 struct ifaddr *
1231 ifa_ifwithroute_locked(int flags, const struct sockaddr *dst,
1232 const struct sockaddr *gateway)
1233 {
1234 return (ifa_ifwithroute_common_locked((flags & ~RTF_IFSCOPE), dst,
1235 gateway, IFSCOPE_NONE));
1236 }
1237
1238 struct ifaddr *
1239 ifa_ifwithroute_scoped_locked(int flags, const struct sockaddr *dst,
1240 const struct sockaddr *gateway, unsigned int ifscope)
1241 {
1242 if (ifscope != IFSCOPE_NONE)
1243 flags |= RTF_IFSCOPE;
1244 else
1245 flags &= ~RTF_IFSCOPE;
1246
1247 return (ifa_ifwithroute_common_locked(flags, dst, gateway, ifscope));
1248 }
1249
1250 static struct ifaddr *
1251 ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst,
1252 const struct sockaddr *gateway, unsigned int ifscope)
1253 {
1254 struct ifaddr *ifa = NULL;
1255 struct rtentry *rt = NULL;
1256 struct sockaddr_in dst_in, gw_in;
1257
1258 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1259
1260 if (ip_doscopedroute) {
1261 /*
1262 * Just in case the sockaddr passed in by the caller
1263 * contains embedded scope, make sure to clear it since
1264 * IPv4 interface addresses aren't scoped.
1265 */
1266 if (dst != NULL && dst->sa_family == AF_INET)
1267 dst = sin_copy(SIN(dst), &dst_in, IFSCOPE_NONE);
1268 if (gateway != NULL && gateway->sa_family == AF_INET)
1269 gateway = sin_copy(SIN(gateway), &gw_in, IFSCOPE_NONE);
1270 }
1271
1272 if (!(flags & RTF_GATEWAY)) {
1273 /*
1274 * If we are adding a route to an interface,
1275 * and the interface is a pt to pt link
1276 * we should search for the destination
1277 * as our clue to the interface. Otherwise
1278 * we can use the local address.
1279 */
1280 if (flags & RTF_HOST) {
1281 ifa = ifa_ifwithdstaddr(dst);
1282 }
1283 if (ifa == NULL)
1284 ifa = ifa_ifwithaddr_scoped(gateway, ifscope);
1285 } else {
1286 /*
1287 * If we are adding a route to a remote net
1288 * or host, the gateway may still be on the
1289 * other end of a pt to pt link.
1290 */
1291 ifa = ifa_ifwithdstaddr(gateway);
1292 }
1293 if (ifa == NULL)
1294 ifa = ifa_ifwithnet_scoped(gateway, ifscope);
1295 if (ifa == NULL) {
1296 /* Workaround to avoid gcc warning regarding const variable */
1297 rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)dst,
1298 0, 0, ifscope);
1299 if (rt != NULL) {
1300 RT_LOCK_SPIN(rt);
1301 ifa = rt->rt_ifa;
1302 if (ifa != NULL)
1303 ifaref(ifa);
1304 RT_REMREF_LOCKED(rt);
1305 RT_UNLOCK(rt);
1306 rt = NULL;
1307 }
1308 }
1309 if (ifa != NULL && ifa->ifa_addr->sa_family != dst->sa_family) {
1310 struct ifaddr *newifa;
1311 /* Callee adds reference to newifa upon success */
1312 newifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
1313 if (newifa != NULL) {
1314 ifafree(ifa);
1315 ifa = newifa;
1316 }
1317 }
1318 /*
1319 * If we are adding a gateway, it is quite possible that the
1320 * routing table has a static entry in place for the gateway,
1321 * that may not agree with info garnered from the interfaces.
1322 * The routing table should carry more precedence than the
1323 * interfaces in this matter. Must be careful not to stomp
1324 * on new entries from rtinit, hence (ifa->ifa_addr != gateway).
1325 */
1326 if ((ifa == NULL ||
1327 !equal(ifa->ifa_addr, (struct sockaddr *)(size_t)gateway)) &&
1328 (rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)gateway,
1329 0, 0, ifscope)) != NULL) {
1330 if (ifa != NULL)
1331 ifafree(ifa);
1332 RT_LOCK_SPIN(rt);
1333 ifa = rt->rt_ifa;
1334 if (ifa != NULL)
1335 ifaref(ifa);
1336 RT_REMREF_LOCKED(rt);
1337 RT_UNLOCK(rt);
1338 }
1339 /*
1340 * If an interface scope was specified, the interface index of
1341 * the found ifaddr must be equivalent to that of the scope;
1342 * otherwise there is no match.
1343 */
1344 if ((flags & RTF_IFSCOPE) &&
1345 ifa != NULL && ifa->ifa_ifp->if_index != ifscope) {
1346 ifafree(ifa);
1347 ifa = NULL;
1348 }
1349
1350 return (ifa);
1351 }
1352
1353 static int rt_fixdelete(struct radix_node *, void *);
1354 static int rt_fixchange(struct radix_node *, void *);
1355
1356 struct rtfc_arg {
1357 struct rtentry *rt0;
1358 struct radix_node_head *rnh;
1359 };
1360
1361 int
1362 rtrequest_locked(int req, struct sockaddr *dst, struct sockaddr *gateway,
1363 struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
1364 {
1365 return (rtrequest_common_locked(req, dst, gateway, netmask,
1366 (flags & ~RTF_IFSCOPE), ret_nrt, IFSCOPE_NONE));
1367 }
1368
1369 int
1370 rtrequest_scoped_locked(int req, struct sockaddr *dst,
1371 struct sockaddr *gateway, struct sockaddr *netmask, int flags,
1372 struct rtentry **ret_nrt, unsigned int ifscope)
1373 {
1374 if (ifscope != IFSCOPE_NONE)
1375 flags |= RTF_IFSCOPE;
1376 else
1377 flags &= ~RTF_IFSCOPE;
1378
1379 return (rtrequest_common_locked(req, dst, gateway, netmask,
1380 flags, ret_nrt, ifscope));
1381 }
1382
1383 /*
1384 * Do appropriate manipulations of a routing tree given all the bits of
1385 * info needed.
1386 *
1387 * Embedding the scope in the radix key is an internal job that should be
1388 * left to routines in this module. Callers should specify the scope value
1389 * to the "scoped" variants of route routines instead of manipulating the
1390 * key itself. This is typically done when creating a scoped route, e.g.
1391 * rtrequest(RTM_ADD). Once such a route is created and marked with the
1392 * RTF_IFSCOPE flag, callers can simply use its rt_key(rt) to clone it
1393 * (RTM_RESOLVE) or to remove it (RTM_DELETE). An exception to this is
1394 * during certain routing socket operations where the search key might be
1395 * derived from the routing message itself, in which case the caller must
1396 * specify the destination address and scope value for RTM_ADD/RTM_DELETE.
1397 */
1398 static int
1399 rtrequest_common_locked(int req, struct sockaddr *dst0,
1400 struct sockaddr *gateway, struct sockaddr *netmask, int flags,
1401 struct rtentry **ret_nrt, unsigned int ifscope)
1402 {
1403 int error = 0;
1404 struct rtentry *rt;
1405 struct radix_node *rn;
1406 struct radix_node_head *rnh;
1407 struct ifaddr *ifa = NULL;
1408 struct sockaddr *ndst, *dst = dst0;
1409 struct sockaddr_in sin, mask;
1410 #define senderr(x) { error = x ; goto bad; }
1411
1412 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1413 /*
1414 * Find the correct routing tree to use for this Address Family
1415 */
1416 if ((rnh = rt_tables[dst->sa_family]) == 0)
1417 senderr(ESRCH);
1418 /*
1419 * If we are adding a host route then we don't want to put
1420 * a netmask in the tree
1421 */
1422 if (flags & RTF_HOST)
1423 netmask = 0;
1424
1425 /*
1426 * If RTF_IFSCOPE is specified, use a local copy of the destination
1427 * address to embed the scope into. This logic is repeated below
1428 * in the RTM_RESOLVE handler since the caller does not normally
1429 * specify such a flag during a resolve; instead it passes in the
1430 * route used for cloning for which the scope info is derived from.
1431 * Note also that in the case of RTM_DELETE, the address passed in
1432 * by the caller might already contain the embedded scope info when
1433 * it is the key itself, thus making RTF_IFSCOPE unnecessary; one
1434 * instance where it is explicitly set is inside route_output()
1435 * as part of handling a routing socket request.
1436 */
1437 if (req != RTM_RESOLVE && (flags & RTF_IFSCOPE)) {
1438 /* Scoped routing is for AF_INET only */
1439 if (dst->sa_family != AF_INET ||
1440 (req == RTM_ADD && !ip_doscopedroute))
1441 senderr(EINVAL);
1442
1443 if (ifscope == IFSCOPE_NONE) {
1444 flags &= ~RTF_IFSCOPE;
1445 } else {
1446 /* Embed ifscope into the key (local copy) */
1447 dst = sin_copy(SIN(dst), &sin, ifscope);
1448
1449 /* Embed ifscope into netmask (local copy) */
1450 if (netmask != NULL)
1451 netmask = mask_copy(netmask, &mask, ifscope);
1452 }
1453 }
1454
1455 switch (req) {
1456 case RTM_DELETE:
1457 /*
1458 * Remove the item from the tree and return it.
1459 * Complain if it is not there and do no more processing.
1460 */
1461 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == 0)
1462 senderr(ESRCH);
1463 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
1464 panic ("rtrequest delete");
1465 rt = (struct rtentry *)rn;
1466
1467 /*
1468 * Take an extra reference to handle the deletion of a route
1469 * entry whose reference count is already 0; e.g. an expiring
1470 * cloned route entry or an entry that was added to the table
1471 * with 0 reference. If the caller is interested in this route,
1472 * we will return it with the reference intact. Otherwise we
1473 * will decrement the reference via rtfree_locked() and then
1474 * possibly deallocate it.
1475 */
1476 RT_LOCK(rt);
1477 RT_ADDREF_LOCKED(rt);
1478 rt->rt_flags &= ~RTF_UP;
1479
1480 /*
1481 * For consistency, in case the caller didn't set the flag.
1482 */
1483 rt->rt_flags |= RTF_CONDEMNED;
1484
1485 /*
1486 * Now search what's left of the subtree for any cloned
1487 * routes which might have been formed from this node.
1488 */
1489 if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
1490 rt_mask(rt)) {
1491 RT_UNLOCK(rt);
1492 rnh->rnh_walktree_from(rnh, dst, rt_mask(rt),
1493 rt_fixdelete, rt);
1494 RT_LOCK(rt);
1495 }
1496
1497 /*
1498 * Remove any external references we may have.
1499 * This might result in another rtentry being freed if
1500 * we held its last reference.
1501 */
1502 if (rt->rt_gwroute != NULL) {
1503 rtfree_locked(rt->rt_gwroute);
1504 rt->rt_gwroute = NULL;
1505 }
1506
1507 /*
1508 * give the protocol a chance to keep things in sync.
1509 */
1510 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
1511 ifa->ifa_rtrequest(RTM_DELETE, rt, SA(0));
1512 ifa = NULL;
1513
1514 /*
1515 * one more rtentry floating around that is not
1516 * linked to the routing table.
1517 */
1518 (void) OSIncrementAtomic(&rttrash);
1519 if (rte_debug & RTD_DEBUG) {
1520 TAILQ_INSERT_TAIL(&rttrash_head,
1521 (struct rtentry_dbg *)rt, rtd_trash_link);
1522 }
1523
1524 /*
1525 * If this is the (non-scoped) default route, clear
1526 * the interface index used for the primary ifscope.
1527 */
1528 if (rt_inet_default(rt, rt_key(rt)))
1529 set_primary_ifscope(IFSCOPE_NONE);
1530
1531 RT_UNLOCK(rt);
1532
1533 /*
1534 * If the caller wants it, then it can have it,
1535 * but it's up to it to free the rtentry as we won't be
1536 * doing it.
1537 */
1538 if (ret_nrt != NULL) {
1539 /* Return the route to caller with reference intact */
1540 *ret_nrt = rt;
1541 } else {
1542 /* Dereference or deallocate the route */
1543 rtfree_locked(rt);
1544 }
1545 break;
1546
1547 case RTM_RESOLVE:
1548 if (ret_nrt == 0 || (rt = *ret_nrt) == 0)
1549 senderr(EINVAL);
1550 /*
1551 * If cloning, we have the parent route given by the caller
1552 * and will use its rt_gateway, rt_rmx as part of the cloning
1553 * process below. Since rnh_lock is held at this point, the
1554 * parent's rt_ifa and rt_gateway will not change, and its
1555 * relevant rt_flags will not change as well. The only thing
1556 * that could change are the metrics, and thus we hold the
1557 * parent route's rt_lock later on during the actual copying
1558 * of rt_rmx.
1559 */
1560 ifa = rt->rt_ifa;
1561 ifaref(ifa);
1562 flags = rt->rt_flags &
1563 ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
1564 flags |= RTF_WASCLONED;
1565 gateway = rt->rt_gateway;
1566 if ((netmask = rt->rt_genmask) == 0)
1567 flags |= RTF_HOST;
1568
1569 if (!ip_doscopedroute || dst->sa_family != AF_INET)
1570 goto makeroute;
1571 /*
1572 * When scoped routing is enabled, cloned entries are
1573 * always scoped according to the interface portion of
1574 * the parent route. The exception to this are IPv4
1575 * link local addresses.
1576 */
1577 if (!IN_LINKLOCAL(ntohl(SIN(dst)->sin_addr.s_addr))) {
1578 if (flags & RTF_IFSCOPE) {
1579 ifscope = sa_get_ifscope(rt_key(rt));
1580 } else {
1581 ifscope = rt->rt_ifp->if_index;
1582 flags |= RTF_IFSCOPE;
1583 }
1584 } else {
1585 ifscope = IFSCOPE_NONE;
1586 flags &= ~RTF_IFSCOPE;
1587 }
1588
1589 /* Embed or clear ifscope into/from the key (local copy) */
1590 dst = sin_copy(SIN(dst), &sin, ifscope);
1591
1592 /* Embed or clear ifscope into/from netmask (local copy) */
1593 if (netmask != NULL)
1594 netmask = mask_copy(netmask, &mask, ifscope);
1595
1596 goto makeroute;
1597
1598 case RTM_ADD:
1599 if ((flags & RTF_GATEWAY) && !gateway)
1600 panic("rtrequest: RTF_GATEWAY but no gateway");
1601
1602 if (flags & RTF_IFSCOPE) {
1603 ifa = ifa_ifwithroute_scoped_locked(flags, dst0,
1604 gateway, ifscope);
1605 } else {
1606 ifa = ifa_ifwithroute_locked(flags, dst0, gateway);
1607 }
1608 if (ifa == NULL)
1609 senderr(ENETUNREACH);
1610 makeroute:
1611 if ((rt = rte_alloc()) == NULL)
1612 senderr(ENOBUFS);
1613 Bzero(rt, sizeof(*rt));
1614 rte_lock_init(rt);
1615 RT_LOCK(rt);
1616 rt->rt_flags = RTF_UP | flags;
1617
1618 /*
1619 * Add the gateway. Possibly re-malloc-ing the storage for it
1620 * also add the rt_gwroute if possible.
1621 */
1622 if ((error = rt_setgate(rt, dst, gateway)) != 0) {
1623 RT_UNLOCK(rt);
1624 rte_lock_destroy(rt);
1625 rte_free(rt);
1626 senderr(error);
1627 }
1628
1629 /*
1630 * point to the (possibly newly malloc'd) dest address.
1631 */
1632 ndst = rt_key(rt);
1633
1634 /*
1635 * make sure it contains the value we want (masked if needed).
1636 */
1637 if (netmask)
1638 rt_maskedcopy(dst, ndst, netmask);
1639 else
1640 Bcopy(dst, ndst, dst->sa_len);
1641
1642 /*
1643 * Note that we now have a reference to the ifa.
1644 * This moved from below so that rnh->rnh_addaddr() can
1645 * examine the ifa and ifa->ifa_ifp if it so desires.
1646 */
1647 rtsetifa(rt, ifa);
1648 rt->rt_ifp = rt->rt_ifa->ifa_ifp;
1649
1650 /* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
1651
1652 rn = rnh->rnh_addaddr((caddr_t)ndst, (caddr_t)netmask,
1653 rnh, rt->rt_nodes);
1654 if (rn == 0) {
1655 struct rtentry *rt2;
1656 /*
1657 * Uh-oh, we already have one of these in the tree.
1658 * We do a special hack: if the route that's already
1659 * there was generated by the protocol-cloning
1660 * mechanism, then we just blow it away and retry
1661 * the insertion of the new one.
1662 */
1663 if (flags & RTF_IFSCOPE) {
1664 rt2 = rtalloc1_scoped_locked(dst0, 0,
1665 RTF_CLONING | RTF_PRCLONING, ifscope);
1666 } else {
1667 rt2 = rtalloc1_locked(dst, 0,
1668 RTF_CLONING | RTF_PRCLONING);
1669 }
1670 if (rt2 && rt2->rt_parent) {
1671 /*
1672 * rnh_lock is held here, so rt_key and
1673 * rt_gateway of rt2 will not change.
1674 */
1675 (void) rtrequest_locked(RTM_DELETE, rt_key(rt2),
1676 rt2->rt_gateway, rt_mask(rt2),
1677 rt2->rt_flags, 0);
1678 rtfree_locked(rt2);
1679 rn = rnh->rnh_addaddr((caddr_t)ndst,
1680 (caddr_t)netmask,
1681 rnh, rt->rt_nodes);
1682 } else if (rt2) {
1683 /* undo the extra ref we got */
1684 rtfree_locked(rt2);
1685 }
1686 }
1687
1688 /*
1689 * If it still failed to go into the tree,
1690 * then un-make it (this should be a function)
1691 */
1692 if (rn == 0) {
1693 if (rt->rt_gwroute) {
1694 rtfree_locked(rt->rt_gwroute);
1695 rt->rt_gwroute = NULL;
1696 }
1697 if (rt->rt_ifa) {
1698 ifafree(rt->rt_ifa);
1699 rt->rt_ifa = NULL;
1700 }
1701 R_Free(rt_key(rt));
1702 RT_UNLOCK(rt);
1703 rte_lock_destroy(rt);
1704 rte_free(rt);
1705 senderr(EEXIST);
1706 }
1707
1708 rt->rt_parent = 0;
1709
1710 /*
1711 * If we got here from RESOLVE, then we are cloning so clone
1712 * the rest, and note that we are a clone (and increment the
1713 * parent's references). rnh_lock is still held, which prevents
1714 * a lookup from returning the newly-created route. Hence
1715 * holding and releasing the parent's rt_lock while still
1716 * holding the route's rt_lock is safe since the new route
1717 * is not yet externally visible.
1718 */
1719 if (req == RTM_RESOLVE) {
1720 RT_LOCK_SPIN(*ret_nrt);
1721 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
1722 if ((*ret_nrt)->rt_flags & (RTF_CLONING | RTF_PRCLONING)) {
1723 rt->rt_parent = (*ret_nrt);
1724 RT_ADDREF_LOCKED(*ret_nrt);
1725 }
1726 RT_UNLOCK(*ret_nrt);
1727 }
1728
1729 /*
1730 * if this protocol has something to add to this then
1731 * allow it to do that as well.
1732 */
1733 if (ifa->ifa_rtrequest)
1734 ifa->ifa_rtrequest(req, rt, SA(ret_nrt ? *ret_nrt : 0));
1735 ifafree(ifa);
1736 ifa = 0;
1737
1738 /*
1739 * If this is the (non-scoped) default route, record
1740 * the interface index used for the primary ifscope.
1741 */
1742 if (rt_inet_default(rt, rt_key(rt)))
1743 set_primary_ifscope(rt->rt_ifp->if_index);
1744
1745 /*
1746 * actually return a resultant rtentry and
1747 * give the caller a single reference.
1748 */
1749 if (ret_nrt) {
1750 *ret_nrt = rt;
1751 RT_ADDREF_LOCKED(rt);
1752 }
1753
1754 /*
1755 * We repeat the same procedure from rt_setgate() here because
1756 * it doesn't fire when we call it there because the node
1757 * hasn't been added to the tree yet.
1758 */
1759 if (req == RTM_ADD &&
1760 !(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
1761 struct rtfc_arg arg;
1762 arg.rnh = rnh;
1763 arg.rt0 = rt;
1764 RT_UNLOCK(rt);
1765 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
1766 rt_fixchange, &arg);
1767 } else {
1768 RT_UNLOCK(rt);
1769 }
1770 break;
1771 }
1772 bad:
1773 if (ifa)
1774 ifafree(ifa);
1775 return (error);
1776 }
1777
1778 int
1779 rtrequest(
1780 int req,
1781 struct sockaddr *dst,
1782 struct sockaddr *gateway,
1783 struct sockaddr *netmask,
1784 int flags,
1785 struct rtentry **ret_nrt)
1786 {
1787 int error;
1788 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1789 lck_mtx_lock(rnh_lock);
1790 error = rtrequest_locked(req, dst, gateway, netmask, flags, ret_nrt);
1791 lck_mtx_unlock(rnh_lock);
1792 return (error);
1793 }
1794 /*
1795 * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1796 * (i.e., the routes related to it by the operation of cloning). This
1797 * routine is iterated over all potential former-child-routes by way of
1798 * rnh->rnh_walktree_from() above, and those that actually are children of
1799 * the late parent (passed in as VP here) are themselves deleted.
1800 */
1801 static int
1802 rt_fixdelete(struct radix_node *rn, void *vp)
1803 {
1804 struct rtentry *rt = (struct rtentry *)rn;
1805 struct rtentry *rt0 = vp;
1806
1807 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1808
1809 RT_LOCK(rt);
1810 if (rt->rt_parent == rt0 &&
1811 !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1812 /*
1813 * Safe to drop rt_lock and use rt_key, since holding
1814 * rnh_lock here prevents another thread from calling
1815 * rt_setgate() on this route.
1816 */
1817 RT_UNLOCK(rt);
1818 return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
1819 rt_mask(rt), rt->rt_flags, NULL));
1820 }
1821 RT_UNLOCK(rt);
1822 return 0;
1823 }
1824
1825 /*
1826 * This routine is called from rt_setgate() to do the analogous thing for
1827 * adds and changes. There is the added complication in this case of a
1828 * middle insert; i.e., insertion of a new network route between an older
1829 * network route and (cloned) host routes. For this reason, a simple check
1830 * of rt->rt_parent is insufficient; each candidate route must be tested
1831 * against the (mask, value) of the new route (passed as before in vp)
1832 * to see if the new route matches it.
1833 *
1834 * XXX - it may be possible to do fixdelete() for changes and reserve this
1835 * routine just for adds. I'm not sure why I thought it was necessary to do
1836 * changes this way.
1837 */
1838 static int
1839 rt_fixchange(struct radix_node *rn, void *vp)
1840 {
1841 struct rtentry *rt = (struct rtentry *)rn;
1842 struct rtfc_arg *ap = vp;
1843 struct rtentry *rt0 = ap->rt0;
1844 struct radix_node_head *rnh = ap->rnh;
1845 u_char *xk1, *xm1, *xk2, *xmp;
1846 int i, len;
1847
1848 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1849
1850 RT_LOCK(rt);
1851
1852 if (!rt->rt_parent ||
1853 (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1854 RT_UNLOCK(rt);
1855 return (0);
1856 }
1857
1858 if (rt->rt_parent == rt0)
1859 goto delete_rt;
1860
1861 /*
1862 * There probably is a function somewhere which does this...
1863 * if not, there should be.
1864 */
1865 len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1866
1867 xk1 = (u_char *)rt_key(rt0);
1868 xm1 = (u_char *)rt_mask(rt0);
1869 xk2 = (u_char *)rt_key(rt);
1870
1871 /*
1872 * Avoid applying a less specific route; do this only if the parent
1873 * route (rt->rt_parent) is a network route, since otherwise its mask
1874 * will be NULL if it is a cloning host route.
1875 */
1876 if ((xmp = (u_char *)rt_mask(rt->rt_parent)) != NULL) {
1877 int mlen = rt_mask(rt->rt_parent)->sa_len;
1878 if (mlen > rt_mask(rt0)->sa_len) {
1879 RT_UNLOCK(rt);
1880 return (0);
1881 }
1882
1883 for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1884 if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1885 RT_UNLOCK(rt);
1886 return (0);
1887 }
1888 }
1889 }
1890
1891 for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1892 if ((xk2[i] & xm1[i]) != xk1[i]) {
1893 RT_UNLOCK(rt);
1894 return (0);
1895 }
1896 }
1897
1898 /*
1899 * OK, this node is a clone, and matches the node currently being
1900 * changed/added under the node's mask. So, get rid of it.
1901 */
1902 delete_rt:
1903 /*
1904 * Safe to drop rt_lock and use rt_key, since holding rnh_lock here
1905 * prevents another thread from calling rt_setgate() on this route.
1906 */
1907 RT_UNLOCK(rt);
1908 return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
1909 rt_mask(rt), rt->rt_flags, NULL));
1910 }
1911
1912 /*
1913 * Round up sockaddr len to multiples of 32-bytes. This will reduce
1914 * or even eliminate the need to re-allocate the chunk of memory used
1915 * for rt_key and rt_gateway in the event the gateway portion changes.
1916 * Certain code paths (e.g. IPSec) are notorious for caching the address
1917 * of rt_gateway; this rounding-up would help ensure that the gateway
1918 * portion never gets deallocated (though it may change contents) and
1919 * thus greatly simplifies things.
1920 */
1921 #define SA_SIZE(x) (-(-((uintptr_t)(x)) & -(32)))
1922
1923 /*
1924 * Sets the gateway and/or gateway route portion of a route; may be
1925 * called on an existing route to modify the gateway portion. Both
1926 * rt_key and rt_gateway are allocated out of the same memory chunk.
1927 * Route entry lock must be held by caller; this routine will return
1928 * with the lock held.
1929 */
1930 int
1931 rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate)
1932 {
1933 int dlen = SA_SIZE(dst->sa_len), glen = SA_SIZE(gate->sa_len);
1934 struct radix_node_head *rnh = rt_tables[dst->sa_family];
1935
1936 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1937 RT_LOCK_ASSERT_HELD(rt);
1938
1939 /*
1940 * If this is for a route that is on its way of being removed,
1941 * or is temporarily frozen, reject the modification request.
1942 */
1943 if (rt->rt_flags & RTF_CONDEMNED)
1944 return (EBUSY);
1945
1946 /* Add an extra ref for ourselves */
1947 RT_ADDREF_LOCKED(rt);
1948
1949 /*
1950 * A host route with the destination equal to the gateway
1951 * will interfere with keeping LLINFO in the routing
1952 * table, so disallow it.
1953 */
1954 if (((rt->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) ==
1955 (RTF_HOST|RTF_GATEWAY)) && (dst->sa_len == gate->sa_len) &&
1956 (bcmp(dst, gate, dst->sa_len) == 0)) {
1957 /*
1958 * The route might already exist if this is an RTM_CHANGE
1959 * or a routing redirect, so try to delete it.
1960 */
1961 if (rt_key(rt) != NULL) {
1962 /*
1963 * Safe to drop rt_lock and use rt_key, rt_gateway,
1964 * since holding rnh_lock here prevents another thread
1965 * from calling rt_setgate() on this route.
1966 */
1967 RT_UNLOCK(rt);
1968 (void) rtrequest_locked(RTM_DELETE, rt_key(rt),
1969 rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL);
1970 RT_LOCK(rt);
1971 }
1972 /* Release extra ref */
1973 RT_REMREF_LOCKED(rt);
1974 return (EADDRNOTAVAIL);
1975 }
1976
1977 /*
1978 * The destination is not directly reachable. Get a route
1979 * to the next-hop gateway and store it in rt_gwroute.
1980 */
1981 if (rt->rt_flags & RTF_GATEWAY) {
1982 struct rtentry *gwrt;
1983 unsigned int ifscope;
1984
1985 ifscope = (dst->sa_family == AF_INET) ?
1986 sa_get_ifscope(dst) : IFSCOPE_NONE;
1987
1988 RT_UNLOCK(rt);
1989 gwrt = rtalloc1_scoped_locked(gate, 1, RTF_PRCLONING, ifscope);
1990 if (gwrt != NULL)
1991 RT_LOCK_ASSERT_NOTHELD(gwrt);
1992 RT_LOCK(rt);
1993
1994 /*
1995 * Cloning loop avoidance:
1996 *
1997 * In the presence of protocol-cloning and bad configuration,
1998 * it is possible to get stuck in bottomless mutual recursion
1999 * (rtrequest rt_setgate rtalloc1). We avoid this by not
2000 * allowing protocol-cloning to operate for gateways (which
2001 * is probably the correct choice anyway), and avoid the
2002 * resulting reference loops by disallowing any route to run
2003 * through itself as a gateway. This is obviously mandatory
2004 * when we get rt->rt_output(). It implies that a route to
2005 * the gateway must already be present in the system in order
2006 * for the gateway to be referred to by another route.
2007 */
2008 if (gwrt == rt) {
2009 RT_REMREF_LOCKED(gwrt);
2010 /* Release extra ref */
2011 RT_REMREF_LOCKED(rt);
2012 return (EADDRINUSE); /* failure */
2013 }
2014
2015 /*
2016 * If scoped, the gateway route must use the same interface;
2017 * we're holding rnh_lock now, so rt_gateway and rt_ifp of gwrt
2018 * should not change and are freely accessible.
2019 */
2020 if (ifscope != IFSCOPE_NONE && (rt->rt_flags & RTF_IFSCOPE) &&
2021 gwrt != NULL && gwrt->rt_ifp != NULL &&
2022 gwrt->rt_ifp->if_index != ifscope) {
2023 rtfree_locked(gwrt); /* rt != gwrt, no deadlock */
2024 /* Release extra ref */
2025 RT_REMREF_LOCKED(rt);
2026 return ((rt->rt_flags & RTF_HOST) ?
2027 EHOSTUNREACH : ENETUNREACH);
2028 }
2029
2030 /* Check again since we dropped the lock above */
2031 if (rt->rt_flags & RTF_CONDEMNED) {
2032 if (gwrt != NULL)
2033 rtfree_locked(gwrt);
2034 /* Release extra ref */
2035 RT_REMREF_LOCKED(rt);
2036 return (EBUSY);
2037 }
2038
2039 if (rt->rt_gwroute != NULL)
2040 rtfree_locked(rt->rt_gwroute);
2041 rt->rt_gwroute = gwrt;
2042
2043 /*
2044 * In case the (non-scoped) default route gets modified via
2045 * an ICMP redirect, record the interface index used for the
2046 * primary ifscope. Also done in rt_setif() to take care
2047 * of the non-redirect cases.
2048 */
2049 if (rt_inet_default(rt, dst) && rt->rt_ifp != NULL)
2050 set_primary_ifscope(rt->rt_ifp->if_index);
2051
2052 /*
2053 * Tell the kernel debugger about the new default gateway
2054 * if the gateway route uses the primary interface, or
2055 * if we are in a transient state before the non-scoped
2056 * default gateway is installed (similar to how the system
2057 * was behaving in the past). In future, it would be good
2058 * to do all this only when KDP is enabled.
2059 */
2060 if ((dst->sa_family == AF_INET) &&
2061 gwrt != NULL && gwrt->rt_gateway->sa_family == AF_LINK &&
2062 (gwrt->rt_ifp->if_index == get_primary_ifscope() ||
2063 get_primary_ifscope() == IFSCOPE_NONE))
2064 kdp_set_gateway_mac(SDL(gwrt->rt_gateway)->sdl_data);
2065 }
2066
2067 /*
2068 * Prepare to store the gateway in rt_gateway. Both dst and gateway
2069 * are stored one after the other in the same malloc'd chunk. If we
2070 * have room, reuse the old buffer since rt_gateway already points
2071 * to the right place. Otherwise, malloc a new block and update
2072 * the 'dst' address and point rt_gateway to the right place.
2073 */
2074 if (rt->rt_gateway == NULL || glen > SA_SIZE(rt->rt_gateway->sa_len)) {
2075 caddr_t new;
2076
2077 /* The underlying allocation is done with M_WAITOK set */
2078 R_Malloc(new, caddr_t, dlen + glen);
2079 if (new == NULL) {
2080 if (rt->rt_gwroute != NULL)
2081 rtfree_locked(rt->rt_gwroute);
2082 rt->rt_gwroute = NULL;
2083 /* Release extra ref */
2084 RT_REMREF_LOCKED(rt);
2085 return (ENOBUFS);
2086 }
2087
2088 /*
2089 * Copy from 'dst' and not rt_key(rt) because we can get
2090 * here to initialize a newly allocated route entry, in
2091 * which case rt_key(rt) is NULL (and so does rt_gateway).
2092 */
2093 bzero(new, dlen + glen);
2094 Bcopy(dst, new, dst->sa_len);
2095 R_Free(rt_key(rt)); /* free old block; NULL is okay */
2096 rt->rt_nodes->rn_key = new;
2097 rt->rt_gateway = (struct sockaddr *)(new + dlen);
2098 }
2099
2100 /*
2101 * Copy the new gateway value into the memory chunk.
2102 */
2103 Bcopy(gate, rt->rt_gateway, gate->sa_len);
2104
2105 /*
2106 * For consistency between rt_gateway and rt_key(gwrt).
2107 */
2108 if ((rt->rt_flags & RTF_GATEWAY) && rt->rt_gwroute != NULL &&
2109 (rt->rt_gwroute->rt_flags & RTF_IFSCOPE) &&
2110 rt->rt_gateway->sa_family == AF_INET &&
2111 rt_key(rt->rt_gwroute)->sa_family == AF_INET) {
2112 sa_set_ifscope(rt->rt_gateway,
2113 sa_get_ifscope(rt_key(rt->rt_gwroute)));
2114 }
2115
2116 /*
2117 * This isn't going to do anything useful for host routes, so
2118 * don't bother. Also make sure we have a reasonable mask
2119 * (we don't yet have one during adds).
2120 */
2121 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
2122 struct rtfc_arg arg;
2123 arg.rnh = rnh;
2124 arg.rt0 = rt;
2125 RT_UNLOCK(rt);
2126 rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt),
2127 rt_fixchange, &arg);
2128 RT_LOCK(rt);
2129 }
2130
2131 /* Release extra ref */
2132 RT_REMREF_LOCKED(rt);
2133 return (0);
2134 }
2135
2136 #undef SA_SIZE
2137
2138 static void
2139 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
2140 struct sockaddr *netmask)
2141 {
2142 u_char *cp1 = (u_char *)src;
2143 u_char *cp2 = (u_char *)dst;
2144 u_char *cp3 = (u_char *)netmask;
2145 u_char *cplim = cp2 + *cp3;
2146 u_char *cplim2 = cp2 + *cp1;
2147
2148 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
2149 cp3 += 2;
2150 if (cplim > cplim2)
2151 cplim = cplim2;
2152 while (cp2 < cplim)
2153 *cp2++ = *cp1++ & *cp3++;
2154 if (cp2 < cplim2)
2155 bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
2156 }
2157
2158 /*
2159 * Lookup an AF_INET scoped or non-scoped route depending on the ifscope
2160 * value passed in by the caller (IFSCOPE_NONE implies non-scoped).
2161 */
2162 static struct radix_node *
2163 node_lookup(struct sockaddr *dst, struct sockaddr *netmask,
2164 unsigned int ifscope)
2165 {
2166 struct radix_node_head *rnh = rt_tables[AF_INET];
2167 struct radix_node *rn;
2168 struct sockaddr_in sin, mask;
2169 struct matchleaf_arg ma = { ifscope };
2170 rn_matchf_t *f = rn_match_ifscope;
2171 void *w = &ma;
2172
2173 if (dst->sa_family != AF_INET)
2174 return (NULL);
2175
2176 /*
2177 * Embed ifscope into the search key; for a non-scoped
2178 * search this will clear out any embedded scope value.
2179 */
2180 dst = sin_copy(SIN(dst), &sin, ifscope);
2181
2182 /* Embed (or clear) ifscope into netmask */
2183 if (netmask != NULL)
2184 netmask = mask_copy(netmask, &mask, ifscope);
2185
2186 if (ifscope == IFSCOPE_NONE)
2187 f = w = NULL;
2188
2189 rn = rnh->rnh_lookup_args(dst, netmask, rnh, f, w);
2190 if (rn != NULL && (rn->rn_flags & RNF_ROOT))
2191 rn = NULL;
2192
2193 return (rn);
2194 }
2195
2196 /*
2197 * Lookup the AF_INET non-scoped default route.
2198 */
2199 static struct radix_node *
2200 node_lookup_default(void)
2201 {
2202 struct radix_node_head *rnh = rt_tables[AF_INET];
2203 return (rnh->rnh_lookup(&sin_def, NULL, rnh));
2204 }
2205
2206 /*
2207 * Common routine to lookup/match a route. It invokes the lookup/matchaddr
2208 * callback which could be address family-specific. The main difference
2209 * between the two (at least for AF_INET/AF_INET6) is that a lookup does
2210 * not alter the expiring state of a route, whereas a match would unexpire
2211 * or revalidate the route.
2212 *
2213 * The optional scope or interface index property of a route allows for a
2214 * per-interface route instance. This permits multiple route entries having
2215 * the same destination (but not necessarily the same gateway) to exist in
2216 * the routing table; each of these entries is specific to the corresponding
2217 * interface. This is made possible by embedding the scope value into the
2218 * radix key, thus making each route entry unique. These scoped entries
2219 * exist along with the regular, non-scoped entries in the same radix tree
2220 * for a given address family (currently AF_INET only); the scope logically
2221 * partitions it into multiple per-interface sub-trees.
2222 *
2223 * When a scoped route lookup is performed, the routing table is searched for
2224 * the best match that would result in a route using the same interface as the
2225 * one associated with the scope (the exception to this are routes that point
2226 * to the loopback interface). The search rule follows the longest matching
2227 * prefix with the additional interface constraint.
2228 */
2229 struct rtentry *
2230 rt_lookup(boolean_t lookup_only, struct sockaddr *dst, struct sockaddr *netmask,
2231 struct radix_node_head *rnh, unsigned int ifscope)
2232 {
2233 struct radix_node *rn0, *rn;
2234 boolean_t dontcare = (ifscope == IFSCOPE_NONE);
2235
2236 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
2237
2238 if (!lookup_only)
2239 netmask = NULL;
2240
2241 /*
2242 * Non-scoped route lookup.
2243 */
2244 if (!ip_doscopedroute || dst->sa_family != AF_INET) {
2245 if (lookup_only)
2246 rn = rnh->rnh_lookup(dst, netmask, rnh);
2247 else
2248 rn = rnh->rnh_matchaddr(dst, rnh);
2249
2250 /*
2251 * Don't return a root node; also, rnh_matchaddr callback
2252 * would have done the necessary work to clear RTPRF_OURS
2253 * for certain protocol families.
2254 */
2255 if (rn != NULL && (rn->rn_flags & RNF_ROOT))
2256 rn = NULL;
2257 if (rn != NULL) {
2258 RT_LOCK_SPIN(RT(rn));
2259 if (!(RT(rn)->rt_flags & RTF_CONDEMNED)) {
2260 RT_ADDREF_LOCKED(RT(rn));
2261 RT_UNLOCK(RT(rn));
2262 } else {
2263 RT_UNLOCK(RT(rn));
2264 rn = NULL;
2265 }
2266 }
2267 return (RT(rn));
2268 }
2269
2270 /*
2271 * Scoped route lookup:
2272 *
2273 * We first perform a non-scoped lookup for the original result.
2274 * Afterwards, depending on whether or not the caller has specified
2275 * a scope, we perform a more specific scoped search and fallback
2276 * to this original result upon failure.
2277 */
2278 rn0 = rn = node_lookup(dst, netmask, IFSCOPE_NONE);
2279
2280 /*
2281 * If the caller did not specify a scope, use the primary scope
2282 * derived from the system's non-scoped default route. If, for
2283 * any reason, there is no primary interface, return what we have.
2284 */
2285 if (dontcare && (ifscope = get_primary_ifscope()) == IFSCOPE_NONE)
2286 goto done;
2287
2288 /*
2289 * Keep the original result if either of the following is true:
2290 *
2291 * 1) The interface portion of the route has the same interface
2292 * index as the scope value and it is marked with RTF_IFSCOPE.
2293 * 2) The route uses the loopback interface, in which case the
2294 * destination (host/net) is local/loopback.
2295 *
2296 * Otherwise, do a more specified search using the scope;
2297 * we're holding rnh_lock now, so rt_ifp should not change.
2298 */
2299 if (rn != NULL) {
2300 struct rtentry *rt = RT(rn);
2301 if (rt->rt_ifp != lo_ifp) {
2302 if (rt->rt_ifp->if_index != ifscope) {
2303 /*
2304 * Wrong interface; keep the original result
2305 * only if the caller did not specify a scope,
2306 * and do a more specific scoped search using
2307 * the scope of the found route. Otherwise,
2308 * start again from scratch.
2309 */
2310 rn = NULL;
2311 if (dontcare)
2312 ifscope = rt->rt_ifp->if_index;
2313 else
2314 rn0 = NULL;
2315 } else if (!(rt->rt_flags & RTF_IFSCOPE)) {
2316 /*
2317 * Right interface, except that this route
2318 * isn't marked with RTF_IFSCOPE. Do a more
2319 * specific scoped search. Keep the original
2320 * result and return it it in case the scoped
2321 * search fails.
2322 */
2323 rn = NULL;
2324 }
2325 }
2326 }
2327
2328 /*
2329 * Scoped search. Find the most specific entry having the same
2330 * interface scope as the one requested. The following will result
2331 * in searching for the longest prefix scoped match.
2332 */
2333 if (rn == NULL)
2334 rn = node_lookup(dst, netmask, ifscope);
2335
2336 /*
2337 * Use the original result if either of the following is true:
2338 *
2339 * 1) The scoped search did not yield any result.
2340 * 2) The result from the scoped search is a scoped default route,
2341 * and the original (non-scoped) result is not a default route,
2342 * i.e. the original result is a more specific host/net route.
2343 * 3) The scoped search yielded a net route but the original
2344 * result is a host route, i.e. the original result is treated
2345 * as a more specific route.
2346 */
2347 if (rn == NULL || (rn0 != NULL &&
2348 ((INET_DEFAULT(rt_key(RT(rn))) && !INET_DEFAULT(rt_key(RT(rn0)))) ||
2349 (!RT_HOST(rn) && RT_HOST(rn0)))))
2350 rn = rn0;
2351
2352 /*
2353 * If we still don't have a route, use the non-scoped default
2354 * route as long as the interface portion satistifes the scope.
2355 */
2356 if (rn == NULL && (rn = node_lookup_default()) != NULL &&
2357 RT(rn)->rt_ifp->if_index != ifscope)
2358 rn = NULL;
2359
2360 done:
2361 if (rn != NULL) {
2362 /*
2363 * Manually clear RTPRF_OURS using in_validate() and
2364 * bump up the reference count after, and not before;
2365 * we only get here for AF_INET. node_lookup() has
2366 * done the check against RNF_ROOT, so we can be sure
2367 * that we're not returning a root node here.
2368 */
2369 RT_LOCK_SPIN(RT(rn));
2370 if (!(RT(rn)->rt_flags & RTF_CONDEMNED)) {
2371 if (!lookup_only)
2372 (void) in_validate(rn);
2373 RT_ADDREF_LOCKED(RT(rn));
2374 RT_UNLOCK(RT(rn));
2375 } else {
2376 RT_UNLOCK(RT(rn));
2377 rn = NULL;
2378 }
2379 }
2380
2381 return (RT(rn));
2382 }
2383
2384 /*
2385 * Set up a routing table entry, normally
2386 * for an interface.
2387 */
2388 int
2389 rtinit(struct ifaddr *ifa, int cmd, int flags)
2390 {
2391 int error;
2392 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2393 lck_mtx_lock(rnh_lock);
2394 error = rtinit_locked(ifa, cmd, flags);
2395 lck_mtx_unlock(rnh_lock);
2396 return (error);
2397 }
2398
2399 int
2400 rtinit_locked(struct ifaddr *ifa, int cmd, int flags)
2401 {
2402 struct rtentry *rt;
2403 struct sockaddr *dst;
2404 struct sockaddr *deldst;
2405 struct mbuf *m = 0;
2406 struct rtentry *nrt = 0;
2407 int error;
2408
2409 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
2410 /*
2411 * If it's a delete, check that if it exists, it's on the correct
2412 * interface or we might scrub a route to another ifa which would
2413 * be confusing at best and possibly worse.
2414 */
2415 if (cmd == RTM_DELETE) {
2416 /*
2417 * It's a delete, so it should already exist..
2418 * If it's a net, mask off the host bits
2419 * (Assuming we have a mask)
2420 */
2421 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
2422 m = m_get(M_DONTWAIT, MT_SONAME);
2423 if (m == NULL) {
2424 return(ENOBUFS);
2425 }
2426 deldst = mtod(m, struct sockaddr *);
2427 rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
2428 dst = deldst;
2429 }
2430 /*
2431 * Get an rtentry that is in the routing tree and
2432 * contains the correct info. (if this fails, can't get there).
2433 * We set "report" to FALSE so that if it doesn't exist,
2434 * it doesn't report an error or clone a route, etc. etc.
2435 */
2436 rt = rtalloc1_locked(dst, 0, 0);
2437 if (rt) {
2438 /*
2439 * Ok so we found the rtentry. it has an extra reference
2440 * for us at this stage. we won't need that so
2441 * lop that off now.
2442 */
2443 RT_LOCK_SPIN(rt);
2444 if (rt->rt_ifa != ifa) {
2445 RT_REMREF_LOCKED(rt);
2446 RT_UNLOCK(rt);
2447 /*
2448 * If the interface in the rtentry doesn't match
2449 * the interface we are using, then we don't
2450 * want to delete it, so return an error.
2451 * This seems to be the only point of
2452 * this whole RTM_DELETE clause.
2453 */
2454 if (m)
2455 (void) m_free(m);
2456 return (flags & RTF_HOST ? EHOSTUNREACH
2457 : ENETUNREACH);
2458 } else {
2459 RT_REMREF_LOCKED(rt);
2460 RT_UNLOCK(rt);
2461 }
2462 }
2463 /* XXX */
2464 #if 0
2465 else {
2466 /*
2467 * One would think that as we are deleting, and we know
2468 * it doesn't exist, we could just return at this point
2469 * with an "ELSE" clause, but apparently not..
2470 */
2471 lck_mtx_unlock(rnh_lock);
2472 return (flags & RTF_HOST ? EHOSTUNREACH
2473 : ENETUNREACH);
2474 }
2475 #endif
2476 }
2477 /*
2478 * Do the actual request
2479 */
2480 error = rtrequest_locked(cmd, dst, ifa->ifa_addr, ifa->ifa_netmask,
2481 flags | ifa->ifa_flags, &nrt);
2482 if (m)
2483 (void) m_free(m);
2484 /*
2485 * If we are deleting, and we found an entry, then
2486 * it's been removed from the tree.. now throw it away.
2487 */
2488 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
2489 /*
2490 * notify any listening routing agents of the change
2491 */
2492 RT_LOCK(rt);
2493 rt_newaddrmsg(cmd, ifa, error, nrt);
2494 if (use_routegenid)
2495 routegenid_update();
2496 RT_UNLOCK(rt);
2497 rtfree_locked(rt);
2498 }
2499
2500 /*
2501 * We are adding, and we have a returned routing entry.
2502 * We need to sanity check the result.
2503 */
2504 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
2505 RT_LOCK(rt);
2506 /*
2507 * If it came back with an unexpected interface, then it must
2508 * have already existed or something. (XXX)
2509 */
2510 if (rt->rt_ifa != ifa) {
2511 if (!(rt->rt_ifa->ifa_ifp->if_flags &
2512 (IFF_POINTOPOINT|IFF_LOOPBACK)))
2513 printf("rtinit: wrong ifa (%p) was (%p)\n",
2514 ifa, rt->rt_ifa);
2515 /*
2516 * Ask that the protocol in question
2517 * remove anything it has associated with
2518 * this route and ifaddr.
2519 */
2520 if (rt->rt_ifa->ifa_rtrequest)
2521 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, SA(0));
2522 /*
2523 * Set the route's ifa.
2524 */
2525 rtsetifa(rt, ifa);
2526 /*
2527 * And substitute in references to the ifaddr
2528 * we are adding.
2529 */
2530 rt->rt_ifp = ifa->ifa_ifp;
2531 rt->rt_rmx.rmx_mtu = ifa->ifa_ifp->if_mtu; /*XXX*/
2532 /*
2533 * Now ask the protocol to check if it needs
2534 * any special processing in its new form.
2535 */
2536 if (ifa->ifa_rtrequest)
2537 ifa->ifa_rtrequest(RTM_ADD, rt, SA(0));
2538 }
2539 /*
2540 * notify any listenning routing agents of the change
2541 */
2542 rt_newaddrmsg(cmd, ifa, error, nrt);
2543 if (use_routegenid)
2544 routegenid_update();
2545 /*
2546 * We just wanted to add it; we don't actually need a
2547 * reference. This will result in a route that's added
2548 * to the routing table without a reference count. The
2549 * RTM_DELETE code will do the necessary step to adjust
2550 * the reference count at deletion time.
2551 */
2552 RT_REMREF_LOCKED(rt);
2553 RT_UNLOCK(rt);
2554 }
2555 return (error);
2556 }
2557
2558 static void
2559 rte_lock_init(struct rtentry *rt)
2560 {
2561 lck_mtx_init(&rt->rt_lock, rte_mtx_grp, rte_mtx_attr);
2562 }
2563
2564 static void
2565 rte_lock_destroy(struct rtentry *rt)
2566 {
2567 RT_LOCK_ASSERT_NOTHELD(rt);
2568 lck_mtx_destroy(&rt->rt_lock, rte_mtx_grp);
2569 }
2570
2571 void
2572 rt_lock(struct rtentry *rt, boolean_t spin)
2573 {
2574 RT_LOCK_ASSERT_NOTHELD(rt);
2575 if (spin)
2576 lck_mtx_lock_spin(&rt->rt_lock);
2577 else
2578 lck_mtx_lock(&rt->rt_lock);
2579 if (rte_debug & RTD_DEBUG)
2580 rte_lock_debug((struct rtentry_dbg *)rt);
2581 }
2582
2583 void
2584 rt_unlock(struct rtentry *rt)
2585 {
2586 RT_LOCK_ASSERT_HELD(rt);
2587 if (rte_debug & RTD_DEBUG)
2588 rte_unlock_debug((struct rtentry_dbg *)rt);
2589 lck_mtx_unlock(&rt->rt_lock);
2590
2591 }
2592
2593 static inline void
2594 rte_lock_debug(struct rtentry_dbg *rte)
2595 {
2596 uint32_t idx;
2597
2598 idx = atomic_add_32_ov(&rte->rtd_lock_cnt, 1) % CTRACE_HIST_SIZE;
2599 if (rte_debug & RTD_TRACE)
2600 ctrace_record(&rte->rtd_lock[idx]);
2601 }
2602
2603 static inline void
2604 rte_unlock_debug(struct rtentry_dbg *rte)
2605 {
2606 uint32_t idx;
2607
2608 idx = atomic_add_32_ov(&rte->rtd_unlock_cnt, 1) % CTRACE_HIST_SIZE;
2609 if (rte_debug & RTD_TRACE)
2610 ctrace_record(&rte->rtd_unlock[idx]);
2611 }
2612
2613 static struct rtentry *
2614 rte_alloc(void)
2615 {
2616 if (rte_debug & RTD_DEBUG)
2617 return (rte_alloc_debug());
2618
2619 return ((struct rtentry *)zalloc(rte_zone));
2620 }
2621
2622 static void
2623 rte_free(struct rtentry *p)
2624 {
2625 if (rte_debug & RTD_DEBUG) {
2626 rte_free_debug(p);
2627 return;
2628 }
2629
2630 if (p->rt_refcnt != 0)
2631 panic("rte_free: rte=%p refcnt=%d non-zero\n", p, p->rt_refcnt);
2632
2633 zfree(rte_zone, p);
2634 }
2635
2636 static inline struct rtentry *
2637 rte_alloc_debug(void)
2638 {
2639 struct rtentry_dbg *rte;
2640
2641 rte = ((struct rtentry_dbg *)zalloc(rte_zone));
2642 if (rte != NULL) {
2643 bzero(rte, sizeof (*rte));
2644 if (rte_debug & RTD_TRACE)
2645 ctrace_record(&rte->rtd_alloc);
2646 rte->rtd_inuse = RTD_INUSE;
2647 }
2648 return ((struct rtentry *)rte);
2649 }
2650
2651 static inline void
2652 rte_free_debug(struct rtentry *p)
2653 {
2654 struct rtentry_dbg *rte = (struct rtentry_dbg *)p;
2655
2656 if (p->rt_refcnt != 0)
2657 panic("rte_free: rte=%p refcnt=%d\n", p, p->rt_refcnt);
2658
2659 if (rte->rtd_inuse == RTD_FREED)
2660 panic("rte_free: double free rte=%p\n", rte);
2661 else if (rte->rtd_inuse != RTD_INUSE)
2662 panic("rte_free: corrupted rte=%p\n", rte);
2663
2664 bcopy((caddr_t)p, (caddr_t)&rte->rtd_entry_saved, sizeof (*p));
2665 /* Preserve rt_lock to help catch use-after-free cases */
2666 bzero((caddr_t)p, offsetof(struct rtentry, rt_lock));
2667
2668 rte->rtd_inuse = RTD_FREED;
2669
2670 if (rte_debug & RTD_TRACE)
2671 ctrace_record(&rte->rtd_free);
2672
2673 if (!(rte_debug & RTD_NO_FREE))
2674 zfree(rte_zone, p);
2675 }
2676
2677 void
2678 ctrace_record(ctrace_t *tr)
2679 {
2680 tr->th = current_thread();
2681 bzero(tr->pc, sizeof (tr->pc));
2682 (void) OSBacktrace(tr->pc, CTRACE_STACK_SIZE);
2683 }