]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/nd6.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / bsd / netinet6 / nd6.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/nd6.c,v 1.20 2002/08/02 20:49:14 rwatson Exp $ */
30 /* $KAME: nd6.c,v 1.144 2001/05/24 07:44:00 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * XXX
63 * KAME 970409 note:
64 * BSD/OS version heavily modifies this code, related to llinfo.
65 * Since we don't have BSD/OS version of net/route.c in our hand,
66 * I left the code mostly as it was in 970310. -- itojun
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/malloc.h>
72 #include <sys/mbuf.h>
73 #include <sys/socket.h>
74 #include <sys/sockio.h>
75 #include <sys/time.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 #include <sys/errno.h>
79 #include <sys/syslog.h>
80 #include <sys/protosw.h>
81 #include <sys/proc.h>
82 #include <sys/mcache.h>
83
84 #include <kern/queue.h>
85 #include <kern/zalloc.h>
86
87 #define DONT_WARN_OBSOLETE
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_types.h>
91 #include <net/if_llreach.h>
92 #include <net/route.h>
93 #include <net/dlil.h>
94 #include <net/ntstat.h>
95
96 #include <netinet/in.h>
97 #include <netinet/in_arp.h>
98 #include <netinet/if_ether.h>
99 #include <netinet6/in6_var.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/nd6.h>
103 #include <netinet6/scope6_var.h>
104 #include <netinet/icmp6.h>
105
106 #include "loop.h"
107
108 #include <net/net_osdep.h>
109
110 #define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
111 #define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
112
113 #define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
114
115 /* timer values */
116 int nd6_prune = 1; /* walk list every 1 seconds */
117 int nd6_delay = 5; /* delay first probe time 5 second */
118 int nd6_umaxtries = 3; /* maximum unicast query */
119 int nd6_mmaxtries = 3; /* maximum multicast query */
120 int nd6_useloopback = 1; /* use loopback interface for local traffic */
121 int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */
122
123 /* preventing too many loops in ND option parsing */
124 int nd6_maxndopt = 10; /* max # of ND options allowed */
125
126 int nd6_maxnudhint = 0; /* max # of subsequent upper layer hints */
127 int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */
128
129 #if ND6_DEBUG
130 int nd6_debug = 1;
131 #else
132 int nd6_debug = 0;
133 #endif
134
135 int nd6_optimistic_dad =
136 (ND6_OPTIMISTIC_DAD_LINKLOCAL|ND6_OPTIMISTIC_DAD_AUTOCONF|
137 ND6_OPTIMISTIC_DAD_TEMPORARY|ND6_OPTIMISTIC_DAD_DYNAMIC);
138
139 static int nd6_is_new_addr_neighbor (struct sockaddr_in6 *, struct ifnet *);
140
141 /* for debugging? */
142 static int nd6_inuse, nd6_allocated;
143
144 /*
145 * Synchronization notes:
146 *
147 * The global list of ND entries are stored in llinfo_nd6; an entry
148 * gets inserted into the list when the route is created and gets
149 * removed from the list when it is deleted; this is done as part
150 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in nd6_rtrequest().
151 *
152 * Because rnh_lock and rt_lock for the entry are held during those
153 * operations, the same locks (and thus lock ordering) must be used
154 * elsewhere to access the relevant data structure fields:
155 *
156 * ln_next, ln_prev, ln_rt
157 *
158 * - Routing lock (rnh_lock)
159 *
160 * ln_hold, ln_asked, ln_expire, ln_state, ln_router, ln_byhint, ln_flags,
161 * ln_llreach, ln_lastused
162 *
163 * - Routing entry lock (rt_lock)
164 *
165 * Due to the dependency on rt_lock, llinfo_nd6 has the same lifetime
166 * as the route entry itself. When a route is deleted (RTM_DELETE),
167 * it is simply removed from the global list but the memory is not
168 * freed until the route itself is freed.
169 */
170 struct llinfo_nd6 llinfo_nd6 = {
171 &llinfo_nd6, &llinfo_nd6, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL, 0
172 };
173
174 /* Protected by nd_if_rwlock */
175 size_t nd_ifinfo_indexlim = 32; /* increased for 5589193 */
176 struct nd_ifinfo *nd_ifinfo = NULL;
177
178 static lck_grp_attr_t *nd_if_lock_grp_attr;
179 static lck_grp_t *nd_if_lock_grp;
180 static lck_attr_t *nd_if_lock_attr;
181 decl_lck_rw_data(, nd_if_rwlock_data);
182 lck_rw_t *nd_if_rwlock = &nd_if_rwlock_data;
183
184 /* Protected by nd6_mutex */
185 struct nd_drhead nd_defrouter;
186 struct nd_prhead nd_prefix = { 0 };
187
188 /* Serialization variables for nd6_drain() */
189 static boolean_t nd6_drain_busy;
190 static void *nd6_drain_waitchan = &nd6_drain_busy;
191 static int nd6_drain_waiters = 0;
192
193 int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL;
194 static struct sockaddr_in6 all1_sa;
195
196 static int regen_tmpaddr(struct in6_ifaddr *);
197 extern lck_mtx_t *nd6_mutex;
198
199 static void nd6_slowtimo(void *ignored_arg);
200 static struct llinfo_nd6 *nd6_llinfo_alloc(void);
201 static void nd6_llinfo_free(void *);
202 static void nd6_llinfo_purge(struct rtentry *);
203 static void nd6_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
204 static void nd6_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
205
206 static int nd6_siocgdrlst(void *, int);
207 static int nd6_siocgprlst(void *, int);
208
209 /*
210 * Insertion and removal from llinfo_nd6 must be done with rnh_lock held.
211 */
212 #define LN_DEQUEUE(_ln) do { \
213 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \
214 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
215 (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \
216 (_ln)->ln_prev->ln_next = (_ln)->ln_next; \
217 (_ln)->ln_prev = (_ln)->ln_next = NULL; \
218 (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \
219 } while (0)
220
221 #define LN_INSERTHEAD(_ln) do { \
222 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \
223 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
224 (_ln)->ln_next = llinfo_nd6.ln_next; \
225 llinfo_nd6.ln_next = (_ln); \
226 (_ln)->ln_prev = &llinfo_nd6; \
227 (_ln)->ln_next->ln_prev = (_ln); \
228 (_ln)->ln_flags |= ND6_LNF_IN_USE; \
229 } while (0)
230
231 static struct zone *llinfo_nd6_zone;
232 #define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */
233 #define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */
234
235 void
236 nd6_init()
237 {
238 static int nd6_init_done = 0;
239 int i;
240
241 if (nd6_init_done) {
242 log(LOG_NOTICE, "nd6_init called more than once (ignored)\n");
243 return;
244 }
245
246 all1_sa.sin6_family = AF_INET6;
247 all1_sa.sin6_len = sizeof(struct sockaddr_in6);
248 for (i = 0; i < sizeof(all1_sa.sin6_addr); i++)
249 all1_sa.sin6_addr.s6_addr[i] = 0xff;
250
251 /* initialization of the default router list */
252 TAILQ_INIT(&nd_defrouter);
253
254 nd_if_lock_grp_attr = lck_grp_attr_alloc_init();
255 nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr);
256 nd_if_lock_attr = lck_attr_alloc_init();
257 lck_rw_init(nd_if_rwlock, nd_if_lock_grp, nd_if_lock_attr);
258
259 llinfo_nd6_zone = zinit(sizeof (struct llinfo_nd6),
260 LLINFO_ND6_ZONE_MAX * sizeof (struct llinfo_nd6), 0,
261 LLINFO_ND6_ZONE_NAME);
262 if (llinfo_nd6_zone == NULL)
263 panic("%s: failed allocating llinfo_nd6_zone", __func__);
264
265 zone_change(llinfo_nd6_zone, Z_EXPAND, TRUE);
266 zone_change(llinfo_nd6_zone, Z_CALLERACCT, FALSE);
267
268 nd6_nbr_init();
269 nd6_rtr_init();
270 nd6_prproxy_init();
271
272 nd6_init_done = 1;
273
274 /* start timer */
275 timeout(nd6_slowtimo, (caddr_t)0, ND6_SLOWTIMER_INTERVAL * hz);
276 }
277
278 static struct llinfo_nd6 *
279 nd6_llinfo_alloc(void)
280 {
281 return (zalloc(llinfo_nd6_zone));
282 }
283
284 static void
285 nd6_llinfo_free(void *arg)
286 {
287 struct llinfo_nd6 *ln = arg;
288
289 if (ln->ln_next != NULL || ln->ln_prev != NULL) {
290 panic("%s: trying to free %p when it is in use", __func__, ln);
291 /* NOTREACHED */
292 }
293
294 /* Just in case there's anything there, free it */
295 if (ln->ln_hold != NULL) {
296 m_freem(ln->ln_hold);
297 ln->ln_hold = NULL;
298 }
299
300 /* Purge any link-layer info caching */
301 VERIFY(ln->ln_rt->rt_llinfo == ln);
302 if (ln->ln_rt->rt_llinfo_purge != NULL)
303 ln->ln_rt->rt_llinfo_purge(ln->ln_rt);
304
305 zfree(llinfo_nd6_zone, ln);
306 }
307
308 static void
309 nd6_llinfo_purge(struct rtentry *rt)
310 {
311 struct llinfo_nd6 *ln = rt->rt_llinfo;
312
313 RT_LOCK_ASSERT_HELD(rt);
314 VERIFY(rt->rt_llinfo_purge == nd6_llinfo_purge && ln != NULL);
315
316 if (ln->ln_llreach != NULL) {
317 RT_CONVERT_LOCK(rt);
318 ifnet_llreach_free(ln->ln_llreach);
319 ln->ln_llreach = NULL;
320 }
321 ln->ln_lastused = 0;
322 }
323
324 static void
325 nd6_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
326 {
327 struct llinfo_nd6 *ln = rt->rt_llinfo;
328 struct if_llreach *lr = ln->ln_llreach;
329
330 if (lr == NULL) {
331 bzero(ri, sizeof (*ri));
332 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
333 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
334 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
335 } else {
336 IFLR_LOCK(lr);
337 /* Export to rt_reach_info structure */
338 ifnet_lr2ri(lr, ri);
339 /* Export ND6 send expiration (calendar) time */
340 ri->ri_snd_expire =
341 ifnet_llreach_up2calexp(lr, ln->ln_lastused);
342 IFLR_UNLOCK(lr);
343 }
344 }
345
346 static void
347 nd6_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
348 {
349 struct llinfo_nd6 *ln = rt->rt_llinfo;
350 struct if_llreach *lr = ln->ln_llreach;
351
352 if (lr == NULL) {
353 bzero(iflri, sizeof (*iflri));
354 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
355 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
356 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
357 } else {
358 IFLR_LOCK(lr);
359 /* Export to ifnet_llreach_info structure */
360 ifnet_lr2iflri(lr, iflri);
361 /* Export ND6 send expiration (uptime) time */
362 iflri->iflri_snd_expire =
363 ifnet_llreach_up2upexp(lr, ln->ln_lastused);
364 IFLR_UNLOCK(lr);
365 }
366 }
367
368 int
369 nd6_ifattach(struct ifnet *ifp)
370 {
371 /*
372 * We have some arrays that should be indexed by if_index.
373 * since if_index will grow dynamically, they should grow too.
374 */
375 lck_rw_lock_exclusive(nd_if_rwlock);
376 if (nd_ifinfo == NULL || if_index >= nd_ifinfo_indexlim) {
377 size_t n;
378 caddr_t q;
379 size_t newlim = nd_ifinfo_indexlim;
380
381 while (if_index >= newlim)
382 newlim <<= 1;
383
384 /* grow nd_ifinfo */
385 n = newlim * sizeof(struct nd_ifinfo);
386 q = (caddr_t)_MALLOC(n, M_IP6NDP, M_WAITOK);
387 if (q == NULL) {
388 lck_rw_done(nd_if_rwlock);
389 return (ENOBUFS);
390 }
391 bzero(q, n);
392 nd_ifinfo_indexlim = newlim;
393 if (nd_ifinfo) {
394 bcopy((caddr_t)nd_ifinfo, q, n/2);
395 /*
396 * We might want to pattern fill the old
397 * array to catch use-after-free cases.
398 */
399 FREE((caddr_t)nd_ifinfo, M_IP6NDP);
400 }
401 nd_ifinfo = (struct nd_ifinfo *)(void *)q;
402 }
403
404 #define ND nd_ifinfo[ifp->if_index]
405 /*
406 * Don't initialize if called twice.
407 */
408 if (ND.initialized) {
409 lck_rw_done(nd_if_rwlock);
410 return (0);
411 }
412 lck_mtx_init(&ND.lock, nd_if_lock_grp, nd_if_lock_attr);
413 ND.initialized = TRUE;
414 ND.linkmtu = ifp->if_mtu;
415 ND.chlim = IPV6_DEFHLIM;
416 ND.basereachable = REACHABLE_TIME;
417 ND.reachable = ND_COMPUTE_RTIME(ND.basereachable);
418 ND.retrans = RETRANS_TIMER;
419 ND.flags = ND6_IFF_PERFORMNUD;
420 lck_rw_done(nd_if_rwlock);
421 #undef ND
422
423 nd6_setmtu(ifp);
424
425 return (0);
426 }
427
428 /*
429 * Reset ND level link MTU. This function is called when the physical MTU
430 * changes, which means we might have to adjust the ND level MTU.
431 */
432 void
433 nd6_setmtu(struct ifnet *ifp)
434 {
435 struct nd_ifinfo *ndi;
436 u_int32_t oldmaxmtu, maxmtu;
437
438 /*
439 * Make sure IPv6 is enabled for the interface first,
440 * because this can be called directly from SIOCSIFMTU for IPv4
441 */
442 lck_rw_lock_shared(nd_if_rwlock);
443 if (ifp->if_index >= nd_ifinfo_indexlim ||
444 !nd_ifinfo[ifp->if_index].initialized) {
445 lck_rw_done(nd_if_rwlock);
446 return; /* nd_ifinfo out of bound, or not yet initialized */
447 }
448
449 ndi = &nd_ifinfo[ifp->if_index];
450 VERIFY(ndi->initialized);
451 lck_mtx_lock(&ndi->lock);
452 oldmaxmtu = ndi->maxmtu;
453
454 /*
455 * The ND level maxmtu is somewhat redundant to the interface MTU
456 * and is an implementation artifact of KAME. Instead of hard-
457 * limiting the maxmtu based on the interface type here, we simply
458 * take the if_mtu value since SIOCSIFMTU would have taken care of
459 * the sanity checks related to the maximum MTU allowed for the
460 * interface (a value that is known only by the interface layer),
461 * by sending the request down via ifnet_ioctl(). The use of the
462 * ND level maxmtu and linkmtu are done via IN6_LINKMTU() which
463 * does further checking against if_mtu.
464 */
465 maxmtu = ndi->maxmtu = ifp->if_mtu;
466
467 /*
468 * Decreasing the interface MTU under IPV6 minimum MTU may cause
469 * undesirable situation. We thus notify the operator of the change
470 * explicitly. The check for oldmaxmtu is necessary to restrict the
471 * log to the case of changing the MTU, not initializing it.
472 */
473 if (oldmaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
474 log(LOG_NOTICE, "nd6_setmtu: "
475 "new link MTU on %s%d (%u) is too small for IPv6\n",
476 ifp->if_name, ifp->if_unit, (uint32_t)ndi->maxmtu);
477 }
478 ndi->linkmtu = ifp->if_mtu;
479 lck_mtx_unlock(&ndi->lock);
480 lck_rw_done(nd_if_rwlock);
481
482 /* also adjust in6_maxmtu if necessary. */
483 if (maxmtu > in6_maxmtu)
484 in6_setmaxmtu();
485 }
486
487 void
488 nd6_option_init(
489 void *opt,
490 int icmp6len,
491 union nd_opts *ndopts)
492 {
493 bzero(ndopts, sizeof(*ndopts));
494 ndopts->nd_opts_search = (struct nd_opt_hdr *)opt;
495 ndopts->nd_opts_last
496 = (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len);
497
498 if (icmp6len == 0) {
499 ndopts->nd_opts_done = 1;
500 ndopts->nd_opts_search = NULL;
501 }
502 }
503
504 /*
505 * Take one ND option.
506 */
507 struct nd_opt_hdr *
508 nd6_option(
509 union nd_opts *ndopts)
510 {
511 struct nd_opt_hdr *nd_opt;
512 int olen;
513
514 if (!ndopts)
515 panic("ndopts == NULL in nd6_option\n");
516 if (!ndopts->nd_opts_last)
517 panic("uninitialized ndopts in nd6_option\n");
518 if (!ndopts->nd_opts_search)
519 return NULL;
520 if (ndopts->nd_opts_done)
521 return NULL;
522
523 nd_opt = ndopts->nd_opts_search;
524
525 /* make sure nd_opt_len is inside the buffer */
526 if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) {
527 bzero(ndopts, sizeof(*ndopts));
528 return NULL;
529 }
530
531 olen = nd_opt->nd_opt_len << 3;
532 if (olen == 0) {
533 /*
534 * Message validation requires that all included
535 * options have a length that is greater than zero.
536 */
537 bzero(ndopts, sizeof(*ndopts));
538 return NULL;
539 }
540
541 ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen);
542 if (ndopts->nd_opts_search > ndopts->nd_opts_last) {
543 /* option overruns the end of buffer, invalid */
544 bzero(ndopts, sizeof(*ndopts));
545 return NULL;
546 } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) {
547 /* reached the end of options chain */
548 ndopts->nd_opts_done = 1;
549 ndopts->nd_opts_search = NULL;
550 }
551 return nd_opt;
552 }
553
554 /*
555 * Parse multiple ND options.
556 * This function is much easier to use, for ND routines that do not need
557 * multiple options of the same type.
558 */
559 int
560 nd6_options(
561 union nd_opts *ndopts)
562 {
563 struct nd_opt_hdr *nd_opt;
564 int i = 0;
565
566 if (ndopts == NULL)
567 panic("ndopts == NULL in nd6_options");
568 if (ndopts->nd_opts_last == NULL)
569 panic("uninitialized ndopts in nd6_options");
570 if (ndopts->nd_opts_search == NULL)
571 return 0;
572
573 while (1) {
574 nd_opt = nd6_option(ndopts);
575 if (nd_opt == NULL && ndopts->nd_opts_last == NULL) {
576 /*
577 * Message validation requires that all included
578 * options have a length that is greater than zero.
579 */
580 icmp6stat.icp6s_nd_badopt++;
581 bzero(ndopts, sizeof(*ndopts));
582 return -1;
583 }
584
585 if (nd_opt == NULL)
586 goto skip1;
587
588 switch (nd_opt->nd_opt_type) {
589 case ND_OPT_SOURCE_LINKADDR:
590 case ND_OPT_TARGET_LINKADDR:
591 case ND_OPT_MTU:
592 case ND_OPT_REDIRECTED_HEADER:
593 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
594 nd6log((LOG_INFO,
595 "duplicated ND6 option found (type=%d)\n",
596 nd_opt->nd_opt_type));
597 /* XXX bark? */
598 } else {
599 ndopts->nd_opt_array[nd_opt->nd_opt_type]
600 = nd_opt;
601 }
602 break;
603 case ND_OPT_PREFIX_INFORMATION:
604 if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
605 ndopts->nd_opt_array[nd_opt->nd_opt_type]
606 = nd_opt;
607 }
608 ndopts->nd_opts_pi_end =
609 (struct nd_opt_prefix_info *)nd_opt;
610 break;
611 case ND_OPT_RDNSS:
612 /* ignore */
613 break;
614 default:
615 /*
616 * Unknown options must be silently ignored,
617 * to accomodate future extension to the protocol.
618 */
619 nd6log((LOG_DEBUG,
620 "nd6_options: unsupported option %d - "
621 "option ignored\n", nd_opt->nd_opt_type));
622 }
623
624 skip1:
625 i++;
626 if (i > nd6_maxndopt) {
627 icmp6stat.icp6s_nd_toomanyopt++;
628 nd6log((LOG_INFO, "too many loop in nd opt\n"));
629 break;
630 }
631
632 if (ndopts->nd_opts_done)
633 break;
634 }
635
636 return 0;
637 }
638
639 void
640 nd6_drain(__unused void *ignored_arg)
641 {
642 struct llinfo_nd6 *ln;
643 struct nd_defrouter *dr;
644 struct nd_prefix *pr;
645 struct ifnet *ifp = NULL;
646 struct in6_ifaddr *ia6, *nia6;
647 struct in6_addrlifetime *lt6;
648 struct timeval timenow;
649
650 getmicrotime(&timenow);
651 again:
652 /*
653 * The global list llinfo_nd6 is modified by nd6_request() and is
654 * therefore protected by rnh_lock. For obvious reasons, we cannot
655 * hold rnh_lock across calls that might lead to code paths which
656 * attempt to acquire rnh_lock, else we deadlock. Hence for such
657 * cases we drop rt_lock and rnh_lock, make the calls, and repeat the
658 * loop. To ensure that we don't process the same entry more than
659 * once in a single timeout, we mark the "already-seen" entries with
660 * ND6_LNF_TIMER_SKIP flag. At the end of the loop, we do a second
661 * pass thru the entries and clear the flag so they can be processed
662 * during the next timeout.
663 */
664 lck_mtx_lock(rnh_lock);
665 ln = llinfo_nd6.ln_next;
666 while (ln != NULL && ln != &llinfo_nd6) {
667 struct rtentry *rt;
668 struct sockaddr_in6 *dst;
669 struct llinfo_nd6 *next;
670 struct nd_ifinfo *ndi;
671 u_int32_t retrans, flags;
672
673 /* ln_next/prev/rt is protected by rnh_lock */
674 next = ln->ln_next;
675 rt = ln->ln_rt;
676 RT_LOCK(rt);
677
678 /* We've seen this already; skip it */
679 if (ln->ln_flags & ND6_LNF_TIMER_SKIP) {
680 RT_UNLOCK(rt);
681 ln = next;
682 continue;
683 }
684
685 /* rt->rt_ifp should never be NULL */
686 if ((ifp = rt->rt_ifp) == NULL) {
687 panic("%s: ln(%p) rt(%p) rt_ifp == NULL", __func__,
688 ln, rt);
689 /* NOTREACHED */
690 }
691
692 /* rt_llinfo must always be equal to ln */
693 if ((struct llinfo_nd6 *)rt->rt_llinfo != ln) {
694 panic("%s: rt_llinfo(%p) is not equal to ln(%p)",
695 __func__, rt->rt_llinfo, ln);
696 /* NOTREACHED */
697 }
698
699 /* rt_key should never be NULL */
700 dst = (struct sockaddr_in6 *)(void *)rt_key(rt);
701 if (dst == NULL) {
702 panic("%s: rt(%p) key is NULL ln(%p)", __func__,
703 rt, ln);
704 /* NOTREACHED */
705 }
706
707 /* Set the flag in case we jump to "again" */
708 ln->ln_flags |= ND6_LNF_TIMER_SKIP;
709
710 if (ln->ln_expire > timenow.tv_sec) {
711 RT_UNLOCK(rt);
712 ln = next;
713 continue;
714 }
715
716 lck_rw_lock_shared(nd_if_rwlock);
717 if (ifp->if_index >= nd_ifinfo_indexlim) {
718 lck_rw_done(nd_if_rwlock);
719 RT_UNLOCK(rt);
720 ln = next;
721 continue;
722 }
723 ndi = ND_IFINFO(ifp);
724 VERIFY(ndi->initialized);
725 lck_mtx_lock(&ndi->lock);
726 retrans = ndi->retrans;
727 flags = ndi->flags;
728 lck_mtx_unlock(&ndi->lock);
729 lck_rw_done(nd_if_rwlock);
730
731 RT_LOCK_ASSERT_HELD(rt);
732
733 switch (ln->ln_state) {
734 case ND6_LLINFO_INCOMPLETE:
735 if (ln->ln_asked < nd6_mmaxtries) {
736 ln->ln_asked++;
737 ln->ln_expire = timenow.tv_sec + retrans / 1000;
738 RT_ADDREF_LOCKED(rt);
739 RT_UNLOCK(rt);
740 lck_mtx_unlock(rnh_lock);
741 if (ip6_forwarding) {
742 nd6_prproxy_ns_output(ifp, NULL,
743 &dst->sin6_addr, ln);
744 } else {
745 nd6_ns_output(ifp, NULL,
746 &dst->sin6_addr, ln, 0);
747 }
748 RT_REMREF(rt);
749 } else {
750 struct mbuf *m = ln->ln_hold;
751 ln->ln_hold = NULL;
752 if (m != NULL) {
753 /*
754 * Fake rcvif to make ICMP error
755 * more helpful in diagnosing
756 * for the receiver.
757 * XXX: should we consider
758 * older rcvif?
759 */
760 m->m_pkthdr.rcvif = ifp;
761 RT_UNLOCK(rt);
762 lck_mtx_unlock(rnh_lock);
763 icmp6_error(m, ICMP6_DST_UNREACH,
764 ICMP6_DST_UNREACH_ADDR, 0);
765 } else {
766 RT_UNLOCK(rt);
767 lck_mtx_unlock(rnh_lock);
768 }
769 nd6_free(rt);
770 }
771 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
772 goto again;
773
774 case ND6_LLINFO_REACHABLE:
775 if (ln->ln_expire) {
776 ln->ln_state = ND6_LLINFO_STALE;
777 ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
778 nd6_gctimer);
779 }
780 RT_UNLOCK(rt);
781 break;
782
783 case ND6_LLINFO_STALE:
784 case ND6_LLINFO_PURGE:
785 /* Garbage Collection(RFC 2461 5.3) */
786 if (ln->ln_expire) {
787 RT_UNLOCK(rt);
788 lck_mtx_unlock(rnh_lock);
789 nd6_free(rt);
790 lck_mtx_assert(rnh_lock,
791 LCK_MTX_ASSERT_NOTOWNED);
792 goto again;
793 } else {
794 RT_UNLOCK(rt);
795 }
796 break;
797
798 case ND6_LLINFO_DELAY:
799 if ((flags & ND6_IFF_PERFORMNUD) != 0) {
800 /* We need NUD */
801 ln->ln_asked = 1;
802 ln->ln_state = ND6_LLINFO_PROBE;
803 ln->ln_expire = timenow.tv_sec + retrans / 1000;
804 RT_ADDREF_LOCKED(rt);
805 RT_UNLOCK(rt);
806 lck_mtx_unlock(rnh_lock);
807 nd6_ns_output(ifp, &dst->sin6_addr,
808 &dst->sin6_addr, ln, 0);
809 lck_mtx_assert(rnh_lock,
810 LCK_MTX_ASSERT_NOTOWNED);
811 RT_REMREF(rt);
812 goto again;
813 }
814 ln->ln_state = ND6_LLINFO_STALE; /* XXX */
815 ln->ln_expire = rt_expiry(rt, timenow.tv_sec,
816 nd6_gctimer);
817 RT_UNLOCK(rt);
818 break;
819
820 case ND6_LLINFO_PROBE:
821 if (ln->ln_asked < nd6_umaxtries) {
822 ln->ln_asked++;
823 ln->ln_expire = timenow.tv_sec + retrans / 1000;
824 RT_ADDREF_LOCKED(rt);
825 RT_UNLOCK(rt);
826 lck_mtx_unlock(rnh_lock);
827 nd6_ns_output(ifp, &dst->sin6_addr,
828 &dst->sin6_addr, ln, 0);
829 RT_REMREF(rt);
830 } else {
831 RT_UNLOCK(rt);
832 lck_mtx_unlock(rnh_lock);
833 nd6_free(rt);
834 }
835 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
836 goto again;
837
838 default:
839 RT_UNLOCK(rt);
840 break;
841 }
842 ln = next;
843 }
844 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
845
846 /* Now clear the flag from all entries */
847 ln = llinfo_nd6.ln_next;
848 while (ln != NULL && ln != &llinfo_nd6) {
849 struct rtentry *rt = ln->ln_rt;
850 struct llinfo_nd6 *next = ln->ln_next;
851
852 RT_LOCK_SPIN(rt);
853 if (ln->ln_flags & ND6_LNF_TIMER_SKIP)
854 ln->ln_flags &= ~ND6_LNF_TIMER_SKIP;
855 RT_UNLOCK(rt);
856 ln = next;
857 }
858 lck_mtx_unlock(rnh_lock);
859
860 /* expire default router list */
861 lck_mtx_lock(nd6_mutex);
862 dr = TAILQ_FIRST(&nd_defrouter);
863 while (dr) {
864 if (dr->expire && dr->expire < timenow.tv_sec) {
865 struct nd_defrouter *t;
866 t = TAILQ_NEXT(dr, dr_entry);
867 defrtrlist_del(dr);
868 dr = t;
869 } else {
870 dr = TAILQ_NEXT(dr, dr_entry);
871 }
872 }
873 lck_mtx_unlock(nd6_mutex);
874
875 /*
876 * expire interface addresses.
877 * in the past the loop was inside prefix expiry processing.
878 * However, from a stricter speci-confrmance standpoint, we should
879 * rather separate address lifetimes and prefix lifetimes.
880 */
881 addrloop:
882 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
883 for (ia6 = in6_ifaddrs; ia6; ia6 = nia6) {
884 nia6 = ia6->ia_next;
885 IFA_LOCK(&ia6->ia_ifa);
886 /*
887 * Extra reference for ourselves; it's no-op if
888 * we don't have to regenerate temporary address,
889 * otherwise it protects the address from going
890 * away since we drop in6_ifaddr_rwlock below.
891 */
892 IFA_ADDREF_LOCKED(&ia6->ia_ifa);
893 /* check address lifetime */
894 lt6 = &ia6->ia6_lifetime;
895 if (IFA6_IS_INVALID(ia6)) {
896 /*
897 * If the expiring address is temporary, try
898 * regenerating a new one. This would be useful when
899 * we suspended a laptop PC, then turned it on after a
900 * period that could invalidate all temporary
901 * addresses. Although we may have to restart the
902 * loop (see below), it must be after purging the
903 * address. Otherwise, we'd see an infinite loop of
904 * regeneration.
905 */
906 if (ip6_use_tempaddr &&
907 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
908 /*
909 * NOTE: We have to drop the lock here
910 * because regen_tmpaddr() eventually calls
911 * in6_update_ifa(), which must take the lock
912 * and would otherwise cause a hang. This is
913 * safe because the goto addrloop leads to a
914 * re-evaluation of the in6_ifaddrs list
915 */
916 IFA_UNLOCK(&ia6->ia_ifa);
917 lck_rw_done(&in6_ifaddr_rwlock);
918 (void) regen_tmpaddr(ia6);
919 } else {
920 IFA_UNLOCK(&ia6->ia_ifa);
921 lck_rw_done(&in6_ifaddr_rwlock);
922 }
923
924 /*
925 * Purging the address would have caused
926 * in6_ifaddr_rwlock to be dropped and reacquired;
927 * therefore search again from the beginning
928 * of in6_ifaddrs list.
929 */
930 in6_purgeaddr(&ia6->ia_ifa);
931
932 /* Release extra reference taken above */
933 IFA_REMREF(&ia6->ia_ifa);
934 goto addrloop;
935 }
936 IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa);
937 if (IFA6_IS_DEPRECATED(ia6)) {
938 int oldflags = ia6->ia6_flags;
939
940 ia6->ia6_flags |= IN6_IFF_DEPRECATED;
941
942 /*
943 * If a temporary address has just become deprecated,
944 * regenerate a new one if possible.
945 */
946 if (ip6_use_tempaddr &&
947 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
948 (oldflags & IN6_IFF_DEPRECATED) == 0) {
949
950 /* see NOTE above */
951 IFA_UNLOCK(&ia6->ia_ifa);
952 lck_rw_done(&in6_ifaddr_rwlock);
953 if (regen_tmpaddr(ia6) == 0) {
954 /*
955 * A new temporary address is
956 * generated.
957 * XXX: this means the address chain
958 * has changed while we are still in
959 * the loop. Although the change
960 * would not cause disaster (because
961 * it's not a deletion, but an
962 * addition,) we'd rather restart the
963 * loop just for safety. Or does this
964 * significantly reduce performance??
965 */
966 /* Release extra reference */
967 IFA_REMREF(&ia6->ia_ifa);
968 goto addrloop;
969 }
970 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
971 } else {
972 IFA_UNLOCK(&ia6->ia_ifa);
973 }
974 } else {
975 /*
976 * A new RA might have made a deprecated address
977 * preferred.
978 */
979 ia6->ia6_flags &= ~IN6_IFF_DEPRECATED;
980 IFA_UNLOCK(&ia6->ia_ifa);
981 }
982 lck_rw_assert(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
983 /* Release extra reference taken above */
984 IFA_REMREF(&ia6->ia_ifa);
985 }
986 lck_rw_done(&in6_ifaddr_rwlock);
987
988 lck_mtx_lock(nd6_mutex);
989 /*
990 * Since we drop the nd6_mutex in prelist_remove, we want to run this
991 * section single threaded.
992 */
993 while (nd6_drain_busy) {
994 nd6_drain_waiters++;
995 msleep(nd6_drain_waitchan, nd6_mutex, (PZERO-1),
996 __func__, NULL);
997 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
998 }
999 nd6_drain_busy = TRUE;
1000
1001 /* expire prefix list */
1002 pr = nd_prefix.lh_first;
1003 while (pr) {
1004 /*
1005 * check prefix lifetime.
1006 * since pltime is just for autoconf, pltime processing for
1007 * prefix is not necessary.
1008 */
1009 NDPR_LOCK(pr);
1010 if (pr->ndpr_stateflags & NDPRF_PROCESSED) {
1011 NDPR_UNLOCK(pr);
1012 pr = pr->ndpr_next;
1013 continue;
1014 }
1015 if (pr->ndpr_expire && pr->ndpr_expire < timenow.tv_sec) {
1016 /*
1017 * address expiration and prefix expiration are
1018 * separate. NEVER perform in6_purgeaddr here.
1019 */
1020 pr->ndpr_stateflags |= NDPRF_PROCESSED;
1021 NDPR_ADDREF_LOCKED(pr);
1022 prelist_remove(pr);
1023 NDPR_UNLOCK(pr);
1024 NDPR_REMREF(pr);
1025 pr = nd_prefix.lh_first;
1026 } else {
1027 pr->ndpr_stateflags |= NDPRF_PROCESSED;
1028 NDPR_UNLOCK(pr);
1029 pr = pr->ndpr_next;
1030 }
1031 }
1032 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
1033 NDPR_LOCK(pr);
1034 pr->ndpr_stateflags &= ~NDPRF_PROCESSED;
1035 NDPR_UNLOCK(pr);
1036 }
1037 nd6_drain_busy = FALSE;
1038 if (nd6_drain_waiters > 0) {
1039 nd6_drain_waiters = 0;
1040 wakeup(nd6_drain_waitchan);
1041 }
1042 lck_mtx_unlock(nd6_mutex);
1043 }
1044
1045 /*
1046 * ND6 router advertisement kernel notification
1047 */
1048 void
1049 nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list,
1050 u_int32_t list_length, u_int32_t mtu, char *dl_addr, u_int32_t dl_addr_len)
1051 {
1052 struct kev_msg ev_msg;
1053 struct kev_nd6_ra_data nd6_ra_msg_data;
1054 struct nd_prefix_list *itr = prefix_list;
1055
1056 bzero(&ev_msg, sizeof(struct kev_msg));
1057 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1058 ev_msg.kev_class = KEV_NETWORK_CLASS;
1059 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
1060 ev_msg.event_code = code;
1061
1062 bzero(&nd6_ra_msg_data, sizeof(nd6_ra_msg_data));
1063 nd6_ra_msg_data.lladdrlen = (dl_addr_len <= ND6_ROUTER_LL_SIZE) ?
1064 dl_addr_len : ND6_ROUTER_LL_SIZE;
1065 bcopy(dl_addr, &nd6_ra_msg_data.lladdr, nd6_ra_msg_data.lladdrlen);
1066
1067 if (mtu > 0 && mtu >= IPV6_MMTU) {
1068 nd6_ra_msg_data.mtu = mtu;
1069 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_MTU;
1070 }
1071
1072 if (list_length > 0 && prefix_list != NULL) {
1073 nd6_ra_msg_data.list_length = list_length;
1074 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_PREFIX;
1075 }
1076
1077 while (itr != NULL && nd6_ra_msg_data.list_index < list_length) {
1078 bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix,
1079 sizeof (nd6_ra_msg_data.prefix.prefix));
1080 nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf;
1081 nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen;
1082 nd6_ra_msg_data.prefix.origin = PR_ORIG_RA;
1083 nd6_ra_msg_data.prefix.vltime = itr->pr.ndpr_vltime;
1084 nd6_ra_msg_data.prefix.pltime = itr->pr.ndpr_pltime;
1085 nd6_ra_msg_data.prefix.expire = itr->pr.ndpr_expire;
1086 nd6_ra_msg_data.prefix.flags = itr->pr.ndpr_stateflags;
1087 nd6_ra_msg_data.prefix.refcnt = itr->pr.ndpr_addrcnt;
1088 nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index;
1089
1090 /* send the message up */
1091 ev_msg.dv[0].data_ptr = &nd6_ra_msg_data;
1092 ev_msg.dv[0].data_length = sizeof(nd6_ra_msg_data);
1093 ev_msg.dv[1].data_length = 0;
1094 kev_post_msg(&ev_msg);
1095
1096 /* clean up for the next prefix */
1097 bzero(&nd6_ra_msg_data.prefix, sizeof(nd6_ra_msg_data.prefix));
1098 itr = itr->next;
1099 nd6_ra_msg_data.list_index++;
1100 }
1101 }
1102
1103 /*
1104 * ND6 timer routine to expire default route list and prefix list
1105 */
1106 void
1107 nd6_timer(__unused void *ignored_arg)
1108 {
1109 nd6_drain(NULL);
1110 timeout(nd6_timer, (caddr_t)0, nd6_prune * hz);
1111 }
1112
1113 static int
1114 regen_tmpaddr(
1115 struct in6_ifaddr *ia6) /* deprecated/invalidated temporary address */
1116 {
1117 struct ifaddr *ifa;
1118 struct ifnet *ifp;
1119 struct in6_ifaddr *public_ifa6 = NULL;
1120 struct timeval timenow;
1121
1122 getmicrotime(&timenow);
1123
1124 ifp = ia6->ia_ifa.ifa_ifp;
1125 ifnet_lock_shared(ifp);
1126 for (ifa = ifp->if_addrlist.tqh_first; ifa;
1127 ifa = ifa->ifa_list.tqe_next)
1128 {
1129 struct in6_ifaddr *it6;
1130
1131 IFA_LOCK(ifa);
1132 if (ifa->ifa_addr->sa_family != AF_INET6) {
1133 IFA_UNLOCK(ifa);
1134 continue;
1135 }
1136 it6 = (struct in6_ifaddr *)ifa;
1137
1138 /* ignore no autoconf addresses. */
1139 if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
1140 IFA_UNLOCK(ifa);
1141 continue;
1142 }
1143 /* ignore autoconf addresses with different prefixes. */
1144 if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr) {
1145 IFA_UNLOCK(ifa);
1146 continue;
1147 }
1148 /*
1149 * Now we are looking at an autoconf address with the same
1150 * prefix as ours. If the address is temporary and is still
1151 * preferred, do not create another one. It would be rare, but
1152 * could happen, for example, when we resume a laptop PC after
1153 * a long period.
1154 */
1155 if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
1156 !IFA6_IS_DEPRECATED(it6)) {
1157 IFA_UNLOCK(ifa);
1158 if (public_ifa6 != NULL)
1159 IFA_REMREF(&public_ifa6->ia_ifa);
1160 public_ifa6 = NULL;
1161 break;
1162 }
1163
1164 /*
1165 * This is a public autoconf address that has the same prefix
1166 * as ours. If it is preferred, keep it. We can't break the
1167 * loop here, because there may be a still-preferred temporary
1168 * address with the prefix.
1169 */
1170 if (!IFA6_IS_DEPRECATED(it6)) {
1171 IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */
1172 IFA_UNLOCK(ifa);
1173 if (public_ifa6 != NULL)
1174 IFA_REMREF(&public_ifa6->ia_ifa);
1175 public_ifa6 = it6;
1176 } else {
1177 IFA_UNLOCK(ifa);
1178 }
1179 }
1180 ifnet_lock_done(ifp);
1181
1182 if (public_ifa6 != NULL) {
1183 int e;
1184
1185 if ((e = in6_tmpifadd(public_ifa6, 0, M_WAITOK)) != 0) {
1186 log(LOG_NOTICE, "regen_tmpaddr: failed to create a new"
1187 " tmp addr,errno=%d\n", e);
1188 IFA_REMREF(&public_ifa6->ia_ifa);
1189 return(-1);
1190 }
1191 IFA_REMREF(&public_ifa6->ia_ifa);
1192 return(0);
1193 }
1194
1195 return(-1);
1196 }
1197
1198 /*
1199 * Nuke neighbor cache/prefix/default router management table, right before
1200 * ifp goes away.
1201 */
1202 void
1203 nd6_purge(
1204 struct ifnet *ifp)
1205 {
1206 struct llinfo_nd6 *ln;
1207 struct nd_defrouter *dr, *ndr;
1208 struct nd_prefix *pr, *npr;
1209
1210 /* Nuke default router list entries toward ifp */
1211 lck_mtx_lock(nd6_mutex);
1212 if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) {
1213 /*
1214 * The first entry of the list may be stored in
1215 * the routing table, so we'll delete it later.
1216 */
1217 for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = ndr) {
1218 ndr = TAILQ_NEXT(dr, dr_entry);
1219 if (dr->stateflags & NDDRF_INSTALLED)
1220 continue;
1221 if (dr->ifp == ifp)
1222 defrtrlist_del(dr);
1223 }
1224 dr = TAILQ_FIRST(&nd_defrouter);
1225 if (dr->ifp == ifp)
1226 defrtrlist_del(dr);
1227 }
1228
1229 for (dr = TAILQ_FIRST(&nd_defrouter); dr; dr = ndr) {
1230 ndr = TAILQ_NEXT(dr, dr_entry);
1231 if (!(dr->stateflags & NDDRF_INSTALLED))
1232 continue;
1233
1234 if (dr->ifp == ifp)
1235 defrtrlist_del(dr);
1236 }
1237
1238 /* Nuke prefix list entries toward ifp */
1239 for (pr = nd_prefix.lh_first; pr; pr = npr) {
1240 npr = pr->ndpr_next;
1241 NDPR_LOCK(pr);
1242 if (pr->ndpr_ifp == ifp) {
1243 /*
1244 * Because if_detach() does *not* release prefixes
1245 * while purging addresses the reference count will
1246 * still be above zero. We therefore reset it to
1247 * make sure that the prefix really gets purged.
1248 */
1249 pr->ndpr_addrcnt = 0;
1250
1251 /*
1252 * Previously, pr->ndpr_addr is removed as well,
1253 * but I strongly believe we don't have to do it.
1254 * nd6_purge() is only called from in6_ifdetach(),
1255 * which removes all the associated interface addresses
1256 * by itself.
1257 * (jinmei@kame.net 20010129)
1258 */
1259 NDPR_ADDREF_LOCKED(pr);
1260 prelist_remove(pr);
1261 NDPR_UNLOCK(pr);
1262 NDPR_REMREF(pr);
1263 } else {
1264 NDPR_UNLOCK(pr);
1265 }
1266 }
1267 lck_mtx_unlock(nd6_mutex);
1268
1269 /* cancel default outgoing interface setting */
1270 if (nd6_defifindex == ifp->if_index) {
1271 nd6_setdefaultiface(0);
1272 }
1273
1274 /*
1275 * Perform default router selection even when we are a router,
1276 * if Scoped Routing is enabled.
1277 */
1278 if (ip6_doscopedroute || !ip6_forwarding) {
1279 lck_mtx_lock(nd6_mutex);
1280 /* refresh default router list */
1281 defrouter_select(ifp);
1282 lck_mtx_unlock(nd6_mutex);
1283 }
1284
1285 /*
1286 * Nuke neighbor cache entries for the ifp.
1287 * Note that rt->rt_ifp may not be the same as ifp,
1288 * due to KAME goto ours hack. See RTM_RESOLVE case in
1289 * nd6_rtrequest(), and ip6_input().
1290 */
1291 again:
1292 lck_mtx_lock(rnh_lock);
1293 ln = llinfo_nd6.ln_next;
1294 while (ln != NULL && ln != &llinfo_nd6) {
1295 struct rtentry *rt;
1296 struct llinfo_nd6 *nln;
1297
1298 nln = ln->ln_next;
1299 rt = ln->ln_rt;
1300 RT_LOCK(rt);
1301 if (rt->rt_gateway != NULL &&
1302 rt->rt_gateway->sa_family == AF_LINK &&
1303 SDL(rt->rt_gateway)->sdl_index == ifp->if_index) {
1304 RT_UNLOCK(rt);
1305 lck_mtx_unlock(rnh_lock);
1306 /*
1307 * See comments on nd6_timer() for reasons why
1308 * this loop is repeated; we bite the costs of
1309 * going thru the same llinfo_nd6 more than once
1310 * here, since this purge happens during detach,
1311 * and that unlike the timer case, it's possible
1312 * there's more than one purges happening at the
1313 * same time (thus a flag wouldn't buy anything).
1314 */
1315 nd6_free(rt);
1316 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1317 goto again;
1318 } else {
1319 RT_UNLOCK(rt);
1320 }
1321 ln = nln;
1322 }
1323 lck_mtx_unlock(rnh_lock);
1324 }
1325
1326 /*
1327 * Upon success, the returned route will be locked and the caller is
1328 * responsible for releasing the reference and doing RT_UNLOCK(rt).
1329 * This routine does not require rnh_lock to be held by the caller,
1330 * although it needs to be indicated of such a case in order to call
1331 * the correct variant of the relevant routing routines.
1332 */
1333 struct rtentry *
1334 nd6_lookup(
1335 struct in6_addr *addr6,
1336 int create,
1337 struct ifnet *ifp,
1338 int rt_locked)
1339 {
1340 struct rtentry *rt;
1341 struct sockaddr_in6 sin6;
1342 unsigned int ifscope;
1343
1344 bzero(&sin6, sizeof(sin6));
1345 sin6.sin6_len = sizeof(struct sockaddr_in6);
1346 sin6.sin6_family = AF_INET6;
1347 sin6.sin6_addr = *addr6;
1348
1349 ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
1350 if (rt_locked) {
1351 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1352 rt = rtalloc1_scoped_locked((struct sockaddr *)&sin6,
1353 create, 0, ifscope);
1354 } else {
1355 rt = rtalloc1_scoped((struct sockaddr *)&sin6,
1356 create, 0, ifscope);
1357 }
1358
1359 if (rt != NULL) {
1360 RT_LOCK(rt);
1361 if ((rt->rt_flags & RTF_LLINFO) == 0) {
1362 /*
1363 * This is the case for the default route.
1364 * If we want to create a neighbor cache for the
1365 * address, we should free the route for the
1366 * destination and allocate an interface route.
1367 */
1368 if (create) {
1369 RT_UNLOCK(rt);
1370 if (rt_locked)
1371 rtfree_locked(rt);
1372 else
1373 rtfree(rt);
1374 rt = NULL;
1375 }
1376 }
1377 }
1378 if (rt == NULL) {
1379 if (create && ifp) {
1380 struct ifaddr *ifa;
1381 u_int32_t ifa_flags;
1382 int e;
1383
1384 /*
1385 * If no route is available and create is set,
1386 * we allocate a host route for the destination
1387 * and treat it like an interface route.
1388 * This hack is necessary for a neighbor which can't
1389 * be covered by our own prefix.
1390 */
1391 ifa = ifaof_ifpforaddr((struct sockaddr *)&sin6, ifp);
1392 if (ifa == NULL)
1393 return(NULL);
1394
1395 /*
1396 * Create a new route. RTF_LLINFO is necessary
1397 * to create a Neighbor Cache entry for the
1398 * destination in nd6_rtrequest which will be
1399 * called in rtrequest via ifa->ifa_rtrequest.
1400 */
1401 if (!rt_locked)
1402 lck_mtx_lock(rnh_lock);
1403 IFA_LOCK_SPIN(ifa);
1404 ifa_flags = ifa->ifa_flags;
1405 IFA_UNLOCK(ifa);
1406 if ((e = rtrequest_scoped_locked(RTM_ADD,
1407 (struct sockaddr *)&sin6, ifa->ifa_addr,
1408 (struct sockaddr *)&all1_sa,
1409 (ifa_flags | RTF_HOST | RTF_LLINFO) &
1410 ~RTF_CLONING, &rt, ifscope)) != 0) {
1411 if (e != EEXIST)
1412 log(LOG_ERR, "%s: failed to add route "
1413 "for a neighbor(%s), errno=%d\n",
1414 __func__, ip6_sprintf(addr6), e);
1415 }
1416 if (!rt_locked)
1417 lck_mtx_unlock(rnh_lock);
1418 IFA_REMREF(ifa);
1419 if (rt == NULL)
1420 return(NULL);
1421
1422 RT_LOCK(rt);
1423 if (rt->rt_llinfo) {
1424 struct llinfo_nd6 *ln = rt->rt_llinfo;
1425 ln->ln_state = ND6_LLINFO_NOSTATE;
1426 }
1427 } else {
1428 return(NULL);
1429 }
1430 }
1431 RT_LOCK_ASSERT_HELD(rt);
1432 /*
1433 * Validation for the entry.
1434 * Note that the check for rt_llinfo is necessary because a cloned
1435 * route from a parent route that has the L flag (e.g. the default
1436 * route to a p2p interface) may have the flag, too, while the
1437 * destination is not actually a neighbor.
1438 * XXX: we can't use rt->rt_ifp to check for the interface, since
1439 * it might be the loopback interface if the entry is for our
1440 * own address on a non-loopback interface. Instead, we should
1441 * use rt->rt_ifa->ifa_ifp, which would specify the REAL
1442 * interface.
1443 * Note also that ifa_ifp and ifp may differ when we connect two
1444 * interfaces to a same link, install a link prefix to an interface,
1445 * and try to install a neighbor cache on an interface that does not
1446 * have a route to the prefix.
1447 *
1448 * If the address is from a proxied prefix, the ifa_ifp and ifp might
1449 * not match, because nd6_na_input() could have modified the ifp
1450 * of the route to point to the interface where the NA arrived on,
1451 * hence the test for RTF_PROXY.
1452 */
1453 if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 ||
1454 rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL ||
1455 (ifp && rt->rt_ifa->ifa_ifp != ifp &&
1456 !(rt->rt_flags & RTF_PROXY))) {
1457 RT_REMREF_LOCKED(rt);
1458 RT_UNLOCK(rt);
1459 if (create) {
1460 log(LOG_DEBUG, "%s: failed to lookup %s "
1461 "(if = %s)\n", __func__, ip6_sprintf(addr6),
1462 ifp ? if_name(ifp) : "unspec");
1463 /* xxx more logs... kazu */
1464 }
1465 return(NULL);
1466 }
1467 /*
1468 * Caller needs to release reference and call RT_UNLOCK(rt).
1469 */
1470 return(rt);
1471 }
1472
1473 /*
1474 * Test whether a given IPv6 address is a neighbor or not, ignoring
1475 * the actual neighbor cache. The neighbor cache is ignored in order
1476 * to not reenter the routing code from within itself.
1477 */
1478 static int
1479 nd6_is_new_addr_neighbor(
1480 struct sockaddr_in6 *addr,
1481 struct ifnet *ifp)
1482 {
1483 struct nd_prefix *pr;
1484 struct ifaddr *dstaddr;
1485
1486 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
1487
1488 /*
1489 * A link-local address is always a neighbor.
1490 * XXX: a link does not necessarily specify a single interface.
1491 */
1492 if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
1493 struct sockaddr_in6 sin6_copy;
1494 u_int32_t zone;
1495
1496 /*
1497 * We need sin6_copy since sa6_recoverscope() may modify the
1498 * content (XXX).
1499 */
1500 sin6_copy = *addr;
1501 if (sa6_recoverscope(&sin6_copy, FALSE))
1502 return (0); /* XXX: should be impossible */
1503 if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone))
1504 return (0);
1505 if (sin6_copy.sin6_scope_id == zone)
1506 return (1);
1507 else
1508 return (0);
1509 }
1510
1511 /*
1512 * If the address matches one of our addresses,
1513 * it should be a neighbor.
1514 * If the address matches one of our on-link prefixes, it should be a
1515 * neighbor.
1516 */
1517 for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
1518 NDPR_LOCK(pr);
1519 if (pr->ndpr_ifp != ifp) {
1520 NDPR_UNLOCK(pr);
1521 continue;
1522 }
1523 if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) {
1524 NDPR_UNLOCK(pr);
1525 continue;
1526 }
1527 if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
1528 &addr->sin6_addr, &pr->ndpr_mask)) {
1529 NDPR_UNLOCK(pr);
1530 return (1);
1531 }
1532 NDPR_UNLOCK(pr);
1533 }
1534
1535 /*
1536 * If the address is assigned on the node of the other side of
1537 * a p2p interface, the address should be a neighbor.
1538 */
1539 dstaddr = ifa_ifwithdstaddr((struct sockaddr *)addr);
1540 if (dstaddr != NULL) {
1541 if (dstaddr->ifa_ifp == ifp) {
1542 IFA_REMREF(dstaddr);
1543 return (1);
1544 }
1545 IFA_REMREF(dstaddr);
1546 dstaddr = NULL;
1547 }
1548
1549 /*
1550 * If the default router list is empty, all addresses are regarded
1551 * as on-link, and thus, as a neighbor.
1552 * XXX: we restrict the condition to hosts, because routers usually do
1553 * not have the "default router list".
1554 * XXX: this block should eventually be removed (it is disabled when
1555 * Scoped Routing is in effect); treating all destinations as on-link
1556 * in the absence of a router is rather harmful.
1557 */
1558 if (!ip6_doscopedroute && !ip6_forwarding &&
1559 TAILQ_FIRST(&nd_defrouter) == NULL &&
1560 nd6_defifindex == ifp->if_index) {
1561 return (1);
1562 }
1563
1564 return (0);
1565 }
1566
1567
1568 /*
1569 * Detect if a given IPv6 address identifies a neighbor on a given link.
1570 * XXX: should take care of the destination of a p2p link?
1571 */
1572 int
1573 nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp, int rt_locked)
1574 {
1575 struct rtentry *rt;
1576
1577 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED);
1578 lck_mtx_lock(nd6_mutex);
1579 if (nd6_is_new_addr_neighbor(addr, ifp)) {
1580 lck_mtx_unlock(nd6_mutex);
1581 return (1);
1582 }
1583 lck_mtx_unlock(nd6_mutex);
1584
1585 /*
1586 * Even if the address matches none of our addresses, it might be
1587 * in the neighbor cache.
1588 */
1589 if ((rt = nd6_lookup(&addr->sin6_addr, 0, ifp, rt_locked)) != NULL) {
1590 RT_LOCK_ASSERT_HELD(rt);
1591 RT_REMREF_LOCKED(rt);
1592 RT_UNLOCK(rt);
1593 return (1);
1594 }
1595
1596 return (0);
1597 }
1598
1599 /*
1600 * Free an nd6 llinfo entry.
1601 * Since the function would cause significant changes in the kernel, DO NOT
1602 * make it global, unless you have a strong reason for the change, and are sure
1603 * that the change is safe.
1604 */
1605 void
1606 nd6_free(
1607 struct rtentry *rt)
1608 {
1609 struct llinfo_nd6 *ln;
1610 struct in6_addr in6;
1611 struct nd_defrouter *dr;
1612
1613 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1614 RT_LOCK_ASSERT_NOTHELD(rt);
1615 lck_mtx_lock(nd6_mutex);
1616
1617 RT_LOCK(rt);
1618 RT_ADDREF_LOCKED(rt); /* Extra ref */
1619 ln = rt->rt_llinfo;
1620 in6 = ((struct sockaddr_in6 *)(void *)rt_key(rt))->sin6_addr;
1621
1622 /*
1623 * Prevent another thread from modifying rt_key, rt_gateway
1624 * via rt_setgate() after the rt_lock is dropped by marking
1625 * the route as defunct.
1626 */
1627 rt->rt_flags |= RTF_CONDEMNED;
1628
1629 /*
1630 * We used to have pfctlinput(PRC_HOSTDEAD) here. Even though it is
1631 * not harmful, it was not really necessary. Perform default router
1632 * selection even when we are a router, if Scoped Routing is enabled.
1633 */
1634 if (ip6_doscopedroute || !ip6_forwarding) {
1635 dr = defrouter_lookup(&((struct sockaddr_in6 *)(void *)
1636 rt_key(rt))->sin6_addr, rt->rt_ifp);
1637
1638 if ((ln && ln->ln_router) || dr) {
1639 /*
1640 * rt6_flush must be called whether or not the neighbor
1641 * is in the Default Router List.
1642 * See a corresponding comment in nd6_na_input().
1643 */
1644 RT_UNLOCK(rt);
1645 lck_mtx_unlock(nd6_mutex);
1646 rt6_flush(&in6, rt->rt_ifp);
1647 lck_mtx_lock(nd6_mutex);
1648 } else {
1649 RT_UNLOCK(rt);
1650 }
1651
1652 if (dr) {
1653 NDDR_REMREF(dr);
1654 /*
1655 * Unreachablity of a router might affect the default
1656 * router selection and on-link detection of advertised
1657 * prefixes.
1658 */
1659
1660 /*
1661 * Temporarily fake the state to choose a new default
1662 * router and to perform on-link determination of
1663 * prefixes correctly.
1664 * Below the state will be set correctly,
1665 * or the entry itself will be deleted.
1666 */
1667 RT_LOCK_SPIN(rt);
1668 ln->ln_state = ND6_LLINFO_INCOMPLETE;
1669
1670 /*
1671 * Since defrouter_select() does not affect the
1672 * on-link determination and MIP6 needs the check
1673 * before the default router selection, we perform
1674 * the check now.
1675 */
1676 RT_UNLOCK(rt);
1677 pfxlist_onlink_check();
1678
1679 /*
1680 * refresh default router list
1681 */
1682 defrouter_select(rt->rt_ifp);
1683 }
1684 RT_LOCK_ASSERT_NOTHELD(rt);
1685 } else {
1686 RT_UNLOCK(rt);
1687 }
1688
1689 lck_mtx_unlock(nd6_mutex);
1690 /*
1691 * Detach the route from the routing tree and the list of neighbor
1692 * caches, and disable the route entry not to be used in already
1693 * cached routes.
1694 */
1695 (void) rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0,
1696 rt_mask(rt), 0, (struct rtentry **)0);
1697
1698 /* Extra ref held above; now free it */
1699 rtfree(rt);
1700 }
1701
1702 /*
1703 * Upper-layer reachability hint for Neighbor Unreachability Detection.
1704 *
1705 * XXX cost-effective methods?
1706 */
1707 void
1708 nd6_nud_hint(
1709 struct rtentry *rt,
1710 struct in6_addr *dst6,
1711 int force)
1712 {
1713 struct llinfo_nd6 *ln;
1714 struct timeval timenow;
1715
1716 getmicrotime(&timenow);
1717
1718 /*
1719 * If the caller specified "rt", use that. Otherwise, resolve the
1720 * routing table by supplied "dst6".
1721 */
1722 if (!rt) {
1723 if (!dst6)
1724 return;
1725 /* Callee returns a locked route upon success */
1726 if ((rt = nd6_lookup(dst6, 0, NULL, 0)) == NULL)
1727 return;
1728 RT_LOCK_ASSERT_HELD(rt);
1729 } else {
1730 RT_LOCK(rt);
1731 RT_ADDREF_LOCKED(rt);
1732 }
1733
1734 if ((rt->rt_flags & RTF_GATEWAY) != 0 ||
1735 (rt->rt_flags & RTF_LLINFO) == 0 ||
1736 !rt->rt_llinfo || !rt->rt_gateway ||
1737 rt->rt_gateway->sa_family != AF_LINK) {
1738 /* This is not a host route. */
1739 goto done;
1740 }
1741
1742 ln = rt->rt_llinfo;
1743 if (ln->ln_state < ND6_LLINFO_REACHABLE)
1744 goto done;
1745
1746 /*
1747 * if we get upper-layer reachability confirmation many times,
1748 * it is possible we have false information.
1749 */
1750 if (!force) {
1751 ln->ln_byhint++;
1752 if (ln->ln_byhint > nd6_maxnudhint)
1753 goto done;
1754 }
1755
1756 ln->ln_state = ND6_LLINFO_REACHABLE;
1757 if (ln->ln_expire) {
1758 struct nd_ifinfo *ndi;
1759
1760 lck_rw_lock_shared(nd_if_rwlock);
1761 ndi = ND_IFINFO(rt->rt_ifp);
1762 VERIFY(ndi != NULL && ndi->initialized);
1763 lck_mtx_lock(&ndi->lock);
1764 ln->ln_expire = timenow.tv_sec + ndi->reachable;
1765 lck_mtx_unlock(&ndi->lock);
1766 lck_rw_done(nd_if_rwlock);
1767 }
1768 done:
1769 RT_REMREF_LOCKED(rt);
1770 RT_UNLOCK(rt);
1771 }
1772
1773 void
1774 nd6_rtrequest(
1775 int req,
1776 struct rtentry *rt,
1777 __unused struct sockaddr *sa)
1778 {
1779 struct sockaddr *gate = rt->rt_gateway;
1780 struct llinfo_nd6 *ln = rt->rt_llinfo;
1781 static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK, 0, 0, 0, 0, 0,
1782 {0,0,0,0,0,0,0,0,0,0,0,0,} };
1783 struct ifnet *ifp = rt->rt_ifp;
1784 struct ifaddr *ifa;
1785 struct timeval timenow;
1786
1787 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1788 RT_LOCK_ASSERT_HELD(rt);
1789
1790 if ((rt->rt_flags & RTF_GATEWAY))
1791 return;
1792
1793 if (nd6_need_cache(ifp) == 0 && (rt->rt_flags & RTF_HOST) == 0) {
1794 /*
1795 * This is probably an interface direct route for a link
1796 * which does not need neighbor caches (e.g. fe80::%lo0/64).
1797 * We do not need special treatment below for such a route.
1798 * Moreover, the RTF_LLINFO flag which would be set below
1799 * would annoy the ndp(8) command.
1800 */
1801 return;
1802 }
1803
1804 if (req == RTM_RESOLVE) {
1805 int no_nd_cache;
1806
1807 if (!nd6_need_cache(ifp)) { /* stf case */
1808 no_nd_cache = 1;
1809 } else {
1810 struct sockaddr_in6 sin6;
1811
1812 rtkey_to_sa6(rt, &sin6);
1813 /*
1814 * nd6_is_addr_neighbor() may call nd6_lookup(),
1815 * therefore we drop rt_lock to avoid deadlock
1816 * during the lookup.
1817 */
1818 RT_ADDREF_LOCKED(rt);
1819 RT_UNLOCK(rt);
1820 no_nd_cache = !nd6_is_addr_neighbor(&sin6, ifp, 1);
1821 RT_LOCK(rt);
1822 RT_REMREF_LOCKED(rt);
1823 }
1824
1825 /*
1826 * FreeBSD and BSD/OS often make a cloned host route based
1827 * on a less-specific route (e.g. the default route).
1828 * If the less specific route does not have a "gateway"
1829 * (this is the case when the route just goes to a p2p or an
1830 * stf interface), we'll mistakenly make a neighbor cache for
1831 * the host route, and will see strange neighbor solicitation
1832 * for the corresponding destination. In order to avoid the
1833 * confusion, we check if the destination of the route is
1834 * a neighbor in terms of neighbor discovery, and stop the
1835 * process if not. Additionally, we remove the LLINFO flag
1836 * so that ndp(8) will not try to get the neighbor information
1837 * of the destination.
1838 */
1839 if (no_nd_cache) {
1840 rt->rt_flags &= ~RTF_LLINFO;
1841 return;
1842 }
1843 }
1844
1845 getmicrotime(&timenow);
1846 switch (req) {
1847 case RTM_ADD:
1848 /*
1849 * There is no backward compatibility :)
1850 *
1851 * if ((rt->rt_flags & RTF_HOST) == 0 &&
1852 * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff)
1853 * rt->rt_flags |= RTF_CLONING;
1854 */
1855 if ((rt->rt_flags & RTF_CLONING) ||
1856 ((rt->rt_flags & RTF_LLINFO) && ln == NULL)) {
1857 /*
1858 * Case 1: This route should come from a route to
1859 * interface (RTF_CLONING case) or the route should be
1860 * treated as on-link but is currently not
1861 * (RTF_LLINFO && ln == NULL case).
1862 */
1863 if (rt_setgate(rt, rt_key(rt),
1864 (struct sockaddr *)&null_sdl) == 0) {
1865 gate = rt->rt_gateway;
1866 SDL(gate)->sdl_type = ifp->if_type;
1867 SDL(gate)->sdl_index = ifp->if_index;
1868 /*
1869 * In case we're called before 1.0 sec.
1870 * has elapsed.
1871 */
1872 if (ln != NULL)
1873 ln->ln_expire =
1874 (ifp->if_eflags & IFEF_IPV6_ND6ALT)
1875 ? 0 : MAX(timenow.tv_sec, 1);
1876 }
1877 if ((rt->rt_flags & RTF_CLONING))
1878 break;
1879 }
1880 /*
1881 * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here.
1882 * We don't do that here since llinfo is not ready yet.
1883 *
1884 * There are also couple of other things to be discussed:
1885 * - unsolicited NA code needs improvement beforehand
1886 * - RFC2461 says we MAY send multicast unsolicited NA
1887 * (7.2.6 paragraph 4), however, it also says that we
1888 * SHOULD provide a mechanism to prevent multicast NA storm.
1889 * we don't have anything like it right now.
1890 * note that the mechanism needs a mutual agreement
1891 * between proxies, which means that we need to implement
1892 * a new protocol, or a new kludge.
1893 * - from RFC2461 6.2.4, host MUST NOT send an unsolicited NA.
1894 * we need to check ip6forwarding before sending it.
1895 * (or should we allow proxy ND configuration only for
1896 * routers? there's no mention about proxy ND from hosts)
1897 */
1898 /* FALLTHROUGH */
1899 case RTM_RESOLVE:
1900 if ((ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) == 0) {
1901 /*
1902 * Address resolution isn't necessary for a point to
1903 * point link, so we can skip this test for a p2p link.
1904 */
1905 if (gate->sa_family != AF_LINK ||
1906 gate->sa_len < sizeof(null_sdl)) {
1907 /* Don't complain in case of RTM_ADD */
1908 if (req == RTM_RESOLVE) {
1909 log(LOG_DEBUG,
1910 "nd6_rtrequest: bad gateway "
1911 "value: %s\n", if_name(ifp));
1912 }
1913 break;
1914 }
1915 SDL(gate)->sdl_type = ifp->if_type;
1916 SDL(gate)->sdl_index = ifp->if_index;
1917 }
1918 if (ln != NULL)
1919 break; /* This happens on a route change */
1920 /*
1921 * Case 2: This route may come from cloning, or a manual route
1922 * add with a LL address.
1923 */
1924 rt->rt_llinfo = ln = nd6_llinfo_alloc();
1925 if (ln == NULL) {
1926 log(LOG_DEBUG, "nd6_rtrequest: malloc failed\n");
1927 break;
1928 }
1929 rt->rt_llinfo_get_ri = nd6_llinfo_get_ri;
1930 rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri;
1931 rt->rt_llinfo_purge = nd6_llinfo_purge;
1932 rt->rt_llinfo_free = nd6_llinfo_free;
1933
1934 nd6_inuse++;
1935 nd6_allocated++;
1936 Bzero(ln, sizeof(*ln));
1937 ln->ln_rt = rt;
1938 /* this is required for "ndp" command. - shin */
1939 if (req == RTM_ADD) {
1940 /*
1941 * gate should have some valid AF_LINK entry,
1942 * and ln->ln_expire should have some lifetime
1943 * which is specified by ndp command.
1944 */
1945 ln->ln_state = ND6_LLINFO_REACHABLE;
1946 ln->ln_byhint = 0;
1947 } else {
1948 /*
1949 * When req == RTM_RESOLVE, rt is created and
1950 * initialized in rtrequest(), so rt_expire is 0.
1951 */
1952 ln->ln_state = ND6_LLINFO_NOSTATE;
1953
1954 /* In case we're called before 1.0 sec. has elapsed */
1955 ln->ln_expire = (ifp->if_eflags & IFEF_IPV6_ND6ALT)
1956 ? 0 : MAX(timenow.tv_sec, 1);
1957 }
1958 rt->rt_flags |= RTF_LLINFO;
1959 LN_INSERTHEAD(ln);
1960
1961 /*
1962 * If we have too many cache entries, initiate immediate
1963 * purging for some "less recently used" entries. Note that
1964 * we cannot directly call nd6_free() here because it would
1965 * cause re-entering rtable related routines triggering an LOR
1966 * problem.
1967 */
1968 if (ip6_neighborgcthresh >= 0 &&
1969 nd6_inuse >= ip6_neighborgcthresh) {
1970 int i;
1971
1972 for (i = 0; i < 10 && llinfo_nd6.ln_prev != ln; i++) {
1973 struct llinfo_nd6 *ln_end = llinfo_nd6.ln_prev;
1974 struct rtentry *rt_end = ln_end->ln_rt;
1975
1976 /* Move this entry to the head */
1977 RT_LOCK(rt_end);
1978 LN_DEQUEUE(ln_end);
1979 LN_INSERTHEAD(ln_end);
1980
1981 if (ln_end->ln_expire == 0) {
1982 RT_UNLOCK(rt_end);
1983 continue;
1984 }
1985 if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE)
1986 ln_end->ln_state = ND6_LLINFO_STALE;
1987 else
1988 ln_end->ln_state = ND6_LLINFO_PURGE;
1989 ln_end->ln_expire = timenow.tv_sec;
1990 RT_UNLOCK(rt_end);
1991 }
1992 }
1993
1994 /*
1995 * check if rt_key(rt) is one of my address assigned
1996 * to the interface.
1997 */
1998 ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp,
1999 &SIN6(rt_key(rt))->sin6_addr);
2000 if (ifa) {
2001 caddr_t macp = nd6_ifptomac(ifp);
2002 ln->ln_expire = 0;
2003 ln->ln_state = ND6_LLINFO_REACHABLE;
2004 ln->ln_byhint = 0;
2005 if (macp) {
2006 Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen);
2007 SDL(gate)->sdl_alen = ifp->if_addrlen;
2008 }
2009 if (nd6_useloopback) {
2010 if (rt->rt_ifp != lo_ifp) {
2011 /*
2012 * Purge any link-layer info caching.
2013 */
2014 if (rt->rt_llinfo_purge != NULL)
2015 rt->rt_llinfo_purge(rt);
2016
2017 /*
2018 * Adjust route ref count for the
2019 * interfaces.
2020 */
2021 if (rt->rt_if_ref_fn != NULL) {
2022 rt->rt_if_ref_fn(lo_ifp, 1);
2023 rt->rt_if_ref_fn(rt->rt_ifp, -1);
2024 }
2025 }
2026 rt->rt_ifp = lo_ifp; /* XXX */
2027 /*
2028 * Make sure rt_ifa be equal to the ifaddr
2029 * corresponding to the address.
2030 * We need this because when we refer
2031 * rt_ifa->ia6_flags in ip6_input, we assume
2032 * that the rt_ifa points to the address instead
2033 * of the loopback address.
2034 */
2035 if (ifa != rt->rt_ifa) {
2036 rtsetifa(rt, ifa);
2037 }
2038 }
2039 IFA_REMREF(ifa);
2040 } else if (rt->rt_flags & RTF_ANNOUNCE) {
2041 ln->ln_expire = 0;
2042 ln->ln_state = ND6_LLINFO_REACHABLE;
2043 ln->ln_byhint = 0;
2044
2045 /* join solicited node multicast for proxy ND */
2046 if (ifp->if_flags & IFF_MULTICAST) {
2047 struct in6_addr llsol;
2048 struct in6_multi *in6m;
2049 int error;
2050
2051 llsol = SIN6(rt_key(rt))->sin6_addr;
2052 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
2053 llsol.s6_addr32[1] = 0;
2054 llsol.s6_addr32[2] = htonl(1);
2055 llsol.s6_addr8[12] = 0xff;
2056 if (in6_setscope(&llsol, ifp, NULL))
2057 break;
2058 error = in6_mc_join(ifp, &llsol, NULL, &in6m, 0);
2059 if (error) {
2060 nd6log((LOG_ERR, "%s: failed to join "
2061 "%s (errno=%d)\n", if_name(ifp),
2062 ip6_sprintf(&llsol), error));
2063 } else {
2064 IN6M_REMREF(in6m);
2065 }
2066 }
2067 }
2068 break;
2069
2070 case RTM_DELETE:
2071 if (ln == NULL)
2072 break;
2073 /* leave from solicited node multicast for proxy ND */
2074 if ((rt->rt_flags & RTF_ANNOUNCE) != 0 &&
2075 (ifp->if_flags & IFF_MULTICAST) != 0) {
2076 struct in6_addr llsol;
2077 struct in6_multi *in6m;
2078
2079 llsol = SIN6(rt_key(rt))->sin6_addr;
2080 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
2081 llsol.s6_addr32[1] = 0;
2082 llsol.s6_addr32[2] = htonl(1);
2083 llsol.s6_addr8[12] = 0xff;
2084 if (in6_setscope(&llsol, ifp, NULL) == 0) {
2085 in6_multihead_lock_shared();
2086 IN6_LOOKUP_MULTI(&llsol, ifp, in6m);
2087 in6_multihead_lock_done();
2088 if (in6m != NULL) {
2089 in6_mc_leave(in6m, NULL);
2090 IN6M_REMREF(in6m);
2091 }
2092 }
2093 }
2094 nd6_inuse--;
2095 /*
2096 * Unchain it but defer the actual freeing until the route
2097 * itself is to be freed. rt->rt_llinfo still points to
2098 * llinfo_nd6, and likewise, ln->ln_rt stil points to this
2099 * route entry, except that RTF_LLINFO is now cleared.
2100 */
2101 if (ln->ln_flags & ND6_LNF_IN_USE)
2102 LN_DEQUEUE(ln);
2103
2104 /*
2105 * Purge any link-layer info caching.
2106 */
2107 if (rt->rt_llinfo_purge != NULL)
2108 rt->rt_llinfo_purge(rt);
2109
2110 rt->rt_flags &= ~RTF_LLINFO;
2111 if (ln->ln_hold != NULL) {
2112 m_freem(ln->ln_hold);
2113 ln->ln_hold = NULL;
2114 }
2115 }
2116 }
2117
2118 static int
2119 nd6_siocgdrlst(void *data, int data_is_64)
2120 {
2121 struct in6_drlist_32 *drl_32;
2122 struct nd_defrouter *dr;
2123 int i = 0;
2124
2125 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2126
2127 dr = TAILQ_FIRST(&nd_defrouter);
2128
2129 /* For 64-bit process */
2130 if (data_is_64) {
2131 struct in6_drlist_64 *drl_64;
2132
2133 drl_64 = _MALLOC(sizeof (*drl_64), M_TEMP, M_WAITOK|M_ZERO);
2134 if (drl_64 == NULL)
2135 return (ENOMEM);
2136
2137 /* preserve the interface name */
2138 bcopy(data, drl_64, sizeof (drl_64->ifname));
2139
2140 while (dr && i < DRLSTSIZ) {
2141 drl_64->defrouter[i].rtaddr = dr->rtaddr;
2142 if (IN6_IS_ADDR_LINKLOCAL(&drl_64->defrouter[i].rtaddr)) {
2143 /* XXX: need to this hack for KAME stack */
2144 drl_64->defrouter[i].rtaddr.s6_addr16[1] = 0;
2145 } else {
2146 log(LOG_ERR,
2147 "default router list contains a "
2148 "non-linklocal address(%s)\n",
2149 ip6_sprintf(&drl_64->defrouter[i].rtaddr));
2150 }
2151 drl_64->defrouter[i].flags = dr->flags;
2152 drl_64->defrouter[i].rtlifetime = dr->rtlifetime;
2153 drl_64->defrouter[i].expire = dr->expire;
2154 drl_64->defrouter[i].if_index = dr->ifp->if_index;
2155 i++;
2156 dr = TAILQ_NEXT(dr, dr_entry);
2157 }
2158 bcopy(drl_64, data, sizeof (*drl_64));
2159 _FREE(drl_64, M_TEMP);
2160 return (0);
2161 }
2162
2163 /* For 32-bit process */
2164 drl_32 = _MALLOC(sizeof (*drl_32), M_TEMP, M_WAITOK|M_ZERO);
2165 if (drl_32 == NULL)
2166 return (ENOMEM);
2167
2168 /* preserve the interface name */
2169 bcopy(data, drl_32, sizeof (drl_32->ifname));
2170
2171 while (dr && i < DRLSTSIZ) {
2172 drl_32->defrouter[i].rtaddr = dr->rtaddr;
2173 if (IN6_IS_ADDR_LINKLOCAL(&drl_32->defrouter[i].rtaddr)) {
2174 /* XXX: need to this hack for KAME stack */
2175 drl_32->defrouter[i].rtaddr.s6_addr16[1] = 0;
2176 } else {
2177 log(LOG_ERR,
2178 "default router list contains a "
2179 "non-linklocal address(%s)\n",
2180 ip6_sprintf(&drl_32->defrouter[i].rtaddr));
2181 }
2182 drl_32->defrouter[i].flags = dr->flags;
2183 drl_32->defrouter[i].rtlifetime = dr->rtlifetime;
2184 drl_32->defrouter[i].expire = dr->expire;
2185 drl_32->defrouter[i].if_index = dr->ifp->if_index;
2186 i++;
2187 dr = TAILQ_NEXT(dr, dr_entry);
2188 }
2189 bcopy(drl_32, data, sizeof (*drl_32));
2190 _FREE(drl_32, M_TEMP);
2191 return (0);
2192 }
2193
2194 /*
2195 * XXX meaning of fields, especialy "raflags", is very
2196 * differnet between RA prefix list and RR/static prefix list.
2197 * how about separating ioctls into two?
2198 */
2199 static int
2200 nd6_siocgprlst(void *data, int data_is_64)
2201 {
2202 struct in6_prlist_32 *prl_32;
2203 struct nd_prefix *pr;
2204 int i = 0;
2205
2206 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2207
2208 pr = nd_prefix.lh_first;
2209
2210 /* For 64-bit process */
2211 if (data_is_64) {
2212 struct in6_prlist_64 *prl_64;
2213
2214 prl_64 = _MALLOC(sizeof (*prl_64), M_TEMP, M_WAITOK|M_ZERO);
2215 if (prl_64 == NULL)
2216 return (ENOMEM);
2217
2218 /* preserve the interface name */
2219 bcopy(data, prl_64, sizeof (prl_64->ifname));
2220
2221 while (pr && i < PRLSTSIZ) {
2222 struct nd_pfxrouter *pfr;
2223 int j;
2224
2225 NDPR_LOCK(pr);
2226 (void) in6_embedscope(&prl_64->prefix[i].prefix,
2227 &pr->ndpr_prefix, NULL, NULL, NULL);
2228 prl_64->prefix[i].raflags = pr->ndpr_raf;
2229 prl_64->prefix[i].prefixlen = pr->ndpr_plen;
2230 prl_64->prefix[i].vltime = pr->ndpr_vltime;
2231 prl_64->prefix[i].pltime = pr->ndpr_pltime;
2232 prl_64->prefix[i].if_index = pr->ndpr_ifp->if_index;
2233 prl_64->prefix[i].expire = pr->ndpr_expire;
2234
2235 pfr = pr->ndpr_advrtrs.lh_first;
2236 j = 0;
2237 while (pfr) {
2238 if (j < DRLSTSIZ) {
2239 #define RTRADDR prl_64->prefix[i].advrtr[j]
2240 RTRADDR = pfr->router->rtaddr;
2241 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
2242 /* XXX: hack for KAME */
2243 RTRADDR.s6_addr16[1] = 0;
2244 } else {
2245 log(LOG_ERR,
2246 "a router(%s) advertises "
2247 "a prefix with "
2248 "non-link local address\n",
2249 ip6_sprintf(&RTRADDR));
2250 }
2251 #undef RTRADDR
2252 }
2253 j++;
2254 pfr = pfr->pfr_next;
2255 }
2256 prl_64->prefix[i].advrtrs = j;
2257 prl_64->prefix[i].origin = PR_ORIG_RA;
2258 NDPR_UNLOCK(pr);
2259
2260 i++;
2261 pr = pr->ndpr_next;
2262 }
2263 bcopy(prl_64, data, sizeof (*prl_64));
2264 _FREE(prl_64, M_TEMP);
2265 return (0);
2266 }
2267
2268 /* For 32-bit process */
2269 prl_32 = _MALLOC(sizeof (*prl_32), M_TEMP, M_WAITOK|M_ZERO);
2270 if (prl_32 == NULL)
2271 return (ENOMEM);
2272
2273 /* preserve the interface name */
2274 bcopy(data, prl_32, sizeof (prl_32->ifname));
2275
2276 while (pr && i < PRLSTSIZ) {
2277 struct nd_pfxrouter *pfr;
2278 int j;
2279
2280 NDPR_LOCK(pr);
2281 (void) in6_embedscope(&prl_32->prefix[i].prefix,
2282 &pr->ndpr_prefix, NULL, NULL, NULL);
2283 prl_32->prefix[i].raflags = pr->ndpr_raf;
2284 prl_32->prefix[i].prefixlen = pr->ndpr_plen;
2285 prl_32->prefix[i].vltime = pr->ndpr_vltime;
2286 prl_32->prefix[i].pltime = pr->ndpr_pltime;
2287 prl_32->prefix[i].if_index = pr->ndpr_ifp->if_index;
2288 prl_32->prefix[i].expire = pr->ndpr_expire;
2289
2290 pfr = pr->ndpr_advrtrs.lh_first;
2291 j = 0;
2292 while (pfr) {
2293 if (j < DRLSTSIZ) {
2294 #define RTRADDR prl_32->prefix[i].advrtr[j]
2295 RTRADDR = pfr->router->rtaddr;
2296 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
2297 /* XXX: hack for KAME */
2298 RTRADDR.s6_addr16[1] = 0;
2299 } else {
2300 log(LOG_ERR,
2301 "a router(%s) advertises "
2302 "a prefix with "
2303 "non-link local address\n",
2304 ip6_sprintf(&RTRADDR));
2305 }
2306 #undef RTRADDR
2307 }
2308 j++;
2309 pfr = pfr->pfr_next;
2310 }
2311 prl_32->prefix[i].advrtrs = j;
2312 prl_32->prefix[i].origin = PR_ORIG_RA;
2313 NDPR_UNLOCK(pr);
2314
2315 i++;
2316 pr = pr->ndpr_next;
2317 }
2318 bcopy(prl_32, data, sizeof (*prl_32));
2319 _FREE(prl_32, M_TEMP);
2320 return (0);
2321 }
2322
2323 int
2324 nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
2325 {
2326 struct nd_defrouter *dr;
2327 struct nd_prefix *pr;
2328 struct rtentry *rt;
2329 int i = ifp->if_index, error = 0;
2330
2331 switch (cmd) {
2332 case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */
2333 case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */
2334 /*
2335 * obsolete API, use sysctl under net.inet6.icmp6
2336 */
2337 lck_mtx_lock(nd6_mutex);
2338 error = nd6_siocgdrlst(data, cmd == SIOCGDRLST_IN6_64);
2339 lck_mtx_unlock(nd6_mutex);
2340 break;
2341
2342 case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */
2343 case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */
2344 /*
2345 * obsolete API, use sysctl under net.inet6.icmp6
2346 */
2347 lck_mtx_lock(nd6_mutex);
2348 error = nd6_siocgprlst(data, cmd == SIOCGPRLST_IN6_64);
2349 lck_mtx_unlock(nd6_mutex);
2350 break;
2351
2352 case OSIOCGIFINFO_IN6: /* struct in6_ondireq */
2353 case SIOCGIFINFO_IN6: { /* struct in6_ondireq */
2354 u_int32_t linkmtu;
2355 struct in6_ondireq *ondi = (struct in6_ondireq *)(void *)data;
2356 struct nd_ifinfo *ndi;
2357 /*
2358 * SIOCGIFINFO_IN6 ioctl is encoded with in6_ondireq
2359 * instead of in6_ndireq, so we treat it as such.
2360 */
2361 lck_rw_lock_shared(nd_if_rwlock);
2362 ndi = ND_IFINFO(ifp);
2363 if (!nd_ifinfo || i >= nd_ifinfo_indexlim ||
2364 !ndi->initialized) {
2365 lck_rw_done(nd_if_rwlock);
2366 error = EINVAL;
2367 break;
2368 }
2369 lck_mtx_lock(&ndi->lock);
2370 linkmtu = IN6_LINKMTU(ifp);
2371 bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof (linkmtu));
2372 bcopy(&nd_ifinfo[i].maxmtu, &ondi->ndi.maxmtu,
2373 sizeof (u_int32_t));
2374 bcopy(&nd_ifinfo[i].basereachable, &ondi->ndi.basereachable,
2375 sizeof (u_int32_t));
2376 bcopy(&nd_ifinfo[i].reachable, &ondi->ndi.reachable,
2377 sizeof (u_int32_t));
2378 bcopy(&nd_ifinfo[i].retrans, &ondi->ndi.retrans,
2379 sizeof (u_int32_t));
2380 bcopy(&nd_ifinfo[i].flags, &ondi->ndi.flags,
2381 sizeof (u_int32_t));
2382 bcopy(&nd_ifinfo[i].recalctm, &ondi->ndi.recalctm,
2383 sizeof (int));
2384 ondi->ndi.chlim = nd_ifinfo[i].chlim;
2385 ondi->ndi.receivedra = 0;
2386 lck_mtx_unlock(&ndi->lock);
2387 lck_rw_done(nd_if_rwlock);
2388 break;
2389 }
2390
2391 case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */
2392 struct in6_ndireq *cndi = (struct in6_ndireq *)(void *)data;
2393 u_int32_t oflags, flags;
2394 struct nd_ifinfo *ndi;
2395
2396 /* XXX: almost all other fields of cndi->ndi is unused */
2397 lck_rw_lock_shared(nd_if_rwlock);
2398 ndi = ND_IFINFO(ifp);
2399 if (!nd_ifinfo || i >= nd_ifinfo_indexlim ||
2400 !ndi->initialized) {
2401 lck_rw_done(nd_if_rwlock);
2402 error = EINVAL;
2403 break;
2404 }
2405 lck_mtx_lock(&ndi->lock);
2406 oflags = nd_ifinfo[i].flags;
2407 bcopy(&cndi->ndi.flags, &nd_ifinfo[i].flags, sizeof (flags));
2408 flags = nd_ifinfo[i].flags;
2409 lck_mtx_unlock(&ndi->lock);
2410 lck_rw_done(nd_if_rwlock);
2411
2412 if (oflags == flags)
2413 break;
2414
2415 error = nd6_setifinfo(ifp, oflags, flags);
2416 break;
2417 }
2418
2419 case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */
2420 /* flush default router list */
2421 /*
2422 * xxx sumikawa: should not delete route if default
2423 * route equals to the top of default router list
2424 */
2425 lck_mtx_lock(nd6_mutex);
2426 defrouter_reset();
2427 defrouter_select(ifp);
2428 lck_mtx_unlock(nd6_mutex);
2429 /* xxx sumikawa: flush prefix list */
2430 break;
2431
2432 case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */
2433 /* flush all the prefix advertised by routers */
2434 struct nd_prefix *next;
2435
2436 lck_mtx_lock(nd6_mutex);
2437 for (pr = nd_prefix.lh_first; pr; pr = next) {
2438 struct in6_ifaddr *ia;
2439
2440 next = pr->ndpr_next;
2441
2442 NDPR_LOCK(pr);
2443 if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr)) {
2444 NDPR_UNLOCK(pr);
2445 continue; /* XXX */
2446 }
2447 if (ifp != lo_ifp && pr->ndpr_ifp != ifp) {
2448 NDPR_UNLOCK(pr);
2449 continue;
2450 }
2451 /* do we really have to remove addresses as well? */
2452 NDPR_ADDREF_LOCKED(pr);
2453 NDPR_UNLOCK(pr);
2454 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
2455 ia = in6_ifaddrs;
2456 while (ia != NULL) {
2457 IFA_LOCK(&ia->ia_ifa);
2458 if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
2459 IFA_UNLOCK(&ia->ia_ifa);
2460 ia = ia->ia_next;
2461 continue;
2462 }
2463
2464 if (ia->ia6_ndpr == pr) {
2465 IFA_ADDREF_LOCKED(&ia->ia_ifa);
2466 IFA_UNLOCK(&ia->ia_ifa);
2467 lck_rw_done(&in6_ifaddr_rwlock);
2468 lck_mtx_unlock(nd6_mutex);
2469 in6_purgeaddr(&ia->ia_ifa);
2470 IFA_REMREF(&ia->ia_ifa);
2471 lck_mtx_lock(nd6_mutex);
2472 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
2473 /*
2474 * Purging the address caused
2475 * in6_ifaddr_rwlock to be
2476 * dropped and
2477 * reacquired; therefore search again
2478 * from the beginning of in6_ifaddrs.
2479 * The same applies for the prefix list.
2480 */
2481 ia = in6_ifaddrs;
2482 next = nd_prefix.lh_first;
2483 continue;
2484
2485 }
2486 IFA_UNLOCK(&ia->ia_ifa);
2487 ia = ia->ia_next;
2488 }
2489 lck_rw_done(&in6_ifaddr_rwlock);
2490 NDPR_LOCK(pr);
2491 prelist_remove(pr);
2492 NDPR_UNLOCK(pr);
2493 /*
2494 * If we were trying to restart this loop
2495 * above by changing the value of 'next', we might
2496 * end up freeing the only element on the list
2497 * when we call NDPR_REMREF().
2498 * When this happens, we also have get out of this
2499 * loop because we have nothing else to do.
2500 */
2501 if (pr == next)
2502 next = NULL;
2503 NDPR_REMREF(pr);
2504 }
2505 lck_mtx_unlock(nd6_mutex);
2506 break;
2507 }
2508
2509 case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */
2510 /* flush all the default routers */
2511 struct nd_defrouter *next;
2512
2513 lck_mtx_lock(nd6_mutex);
2514 if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) {
2515 /*
2516 * The first entry of the list may be stored in
2517 * the routing table, so we'll delete it later.
2518 */
2519 for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) {
2520 next = TAILQ_NEXT(dr, dr_entry);
2521 if (ifp == lo_ifp || dr->ifp == ifp)
2522 defrtrlist_del(dr);
2523 }
2524 if (ifp == lo_ifp ||
2525 TAILQ_FIRST(&nd_defrouter)->ifp == ifp)
2526 defrtrlist_del(TAILQ_FIRST(&nd_defrouter));
2527 }
2528 lck_mtx_unlock(nd6_mutex);
2529 break;
2530 }
2531
2532 case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */
2533 struct llinfo_nd6 *ln;
2534 struct in6_nbrinfo_32 nbi_32;
2535 struct in6_addr nb_addr; /* make local for safety */
2536
2537 bcopy(data, &nbi_32, sizeof (nbi_32));
2538 nb_addr = nbi_32.addr;
2539 /*
2540 * XXX: KAME specific hack for scoped addresses
2541 * XXXX: for other scopes than link-local?
2542 */
2543 if (IN6_IS_ADDR_LINKLOCAL(&nbi_32.addr) ||
2544 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_32.addr)) {
2545 u_int16_t *idp =
2546 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
2547
2548 if (*idp == 0)
2549 *idp = htons(ifp->if_index);
2550 }
2551
2552 /* Callee returns a locked route upon success */
2553 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
2554 error = EINVAL;
2555 break;
2556 }
2557 RT_LOCK_ASSERT_HELD(rt);
2558 ln = rt->rt_llinfo;
2559 nbi_32.state = ln->ln_state;
2560 nbi_32.asked = ln->ln_asked;
2561 nbi_32.isrouter = ln->ln_router;
2562 nbi_32.expire = ln->ln_expire;
2563 RT_REMREF_LOCKED(rt);
2564 RT_UNLOCK(rt);
2565 bcopy(&nbi_32, data, sizeof (nbi_32));
2566 break;
2567 }
2568
2569 case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */
2570 struct llinfo_nd6 *ln;
2571 struct in6_nbrinfo_64 nbi_64;
2572 struct in6_addr nb_addr; /* make local for safety */
2573
2574 bcopy(data, &nbi_64, sizeof (nbi_64));
2575 nb_addr = nbi_64.addr;
2576 /*
2577 * XXX: KAME specific hack for scoped addresses
2578 * XXXX: for other scopes than link-local?
2579 */
2580 if (IN6_IS_ADDR_LINKLOCAL(&nbi_64.addr) ||
2581 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_64.addr)) {
2582 u_int16_t *idp =
2583 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
2584
2585 if (*idp == 0)
2586 *idp = htons(ifp->if_index);
2587 }
2588
2589 /* Callee returns a locked route upon success */
2590 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
2591 error = EINVAL;
2592 break;
2593 }
2594 RT_LOCK_ASSERT_HELD(rt);
2595 ln = rt->rt_llinfo;
2596 nbi_64.state = ln->ln_state;
2597 nbi_64.asked = ln->ln_asked;
2598 nbi_64.isrouter = ln->ln_router;
2599 nbi_64.expire = ln->ln_expire;
2600 RT_REMREF_LOCKED(rt);
2601 RT_UNLOCK(rt);
2602 bcopy(&nbi_64, data, sizeof (nbi_64));
2603 break;
2604 }
2605
2606 case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
2607 case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
2608 struct in6_ndifreq_64 *ndif_64 =
2609 (struct in6_ndifreq_64 *)(void *)data;
2610 struct in6_ndifreq_32 *ndif_32 =
2611 (struct in6_ndifreq_32 *)(void *)data;
2612
2613 if (cmd == SIOCGDEFIFACE_IN6_64) {
2614 u_int64_t j = nd6_defifindex;
2615 bcopy(&j, &ndif_64->ifindex, sizeof (j));
2616 } else {
2617 bcopy(&nd6_defifindex, &ndif_32->ifindex,
2618 sizeof (u_int32_t));
2619 }
2620 break;
2621 }
2622
2623 case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
2624 case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
2625 struct in6_ndifreq_64 *ndif_64 =
2626 (struct in6_ndifreq_64 *)(void *)data;
2627 struct in6_ndifreq_32 *ndif_32 =
2628 (struct in6_ndifreq_32 *)(void *)data;
2629 u_int32_t idx;
2630
2631 if (cmd == SIOCSDEFIFACE_IN6_64) {
2632 u_int64_t j;
2633 bcopy(&ndif_64->ifindex, &j, sizeof (j));
2634 idx = (u_int32_t)j;
2635 } else {
2636 bcopy(&ndif_32->ifindex, &idx, sizeof (idx));
2637 }
2638
2639 error = nd6_setdefaultiface(idx);
2640 return (error);
2641 /* NOTREACHED */
2642 }
2643 }
2644 return (error);
2645 }
2646
2647 /*
2648 * Create neighbor cache entry and cache link-layer address,
2649 * on reception of inbound ND6 packets. (RS/RA/NS/redirect)
2650 */
2651 void
2652 nd6_cache_lladdr(
2653 struct ifnet *ifp,
2654 struct in6_addr *from,
2655 char *lladdr,
2656 __unused int lladdrlen,
2657 int type, /* ICMP6 type */
2658 int code) /* type dependent information */
2659 {
2660 struct rtentry *rt = NULL;
2661 struct llinfo_nd6 *ln = NULL;
2662 int is_newentry;
2663 struct sockaddr_dl *sdl = NULL;
2664 int do_update;
2665 int olladdr;
2666 int llchange;
2667 int newstate = 0;
2668 struct timeval timenow;
2669
2670 if (ifp == NULL)
2671 panic("ifp == NULL in nd6_cache_lladdr");
2672 if (from == NULL)
2673 panic("from == NULL in nd6_cache_lladdr");
2674
2675 /* nothing must be updated for unspecified address */
2676 if (IN6_IS_ADDR_UNSPECIFIED(from))
2677 return;
2678
2679 /*
2680 * Validation about ifp->if_addrlen and lladdrlen must be done in
2681 * the caller.
2682 *
2683 * XXX If the link does not have link-layer adderss, what should
2684 * we do? (ifp->if_addrlen == 0)
2685 * Spec says nothing in sections for RA, RS and NA. There's small
2686 * description on it in NS section (RFC 2461 7.2.3).
2687 */
2688 getmicrotime(&timenow);
2689
2690 rt = nd6_lookup(from, 0, ifp, 0);
2691 if (rt == NULL) {
2692 if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL)
2693 return;
2694 RT_LOCK_ASSERT_HELD(rt);
2695 is_newentry = 1;
2696 } else {
2697 RT_LOCK_ASSERT_HELD(rt);
2698 /* do nothing if static ndp is set */
2699 if (rt->rt_flags & RTF_STATIC) {
2700 RT_REMREF_LOCKED(rt);
2701 RT_UNLOCK(rt);
2702 return;
2703 }
2704 is_newentry = 0;
2705 }
2706
2707 if (rt == NULL)
2708 return;
2709 if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) {
2710 fail:
2711 RT_UNLOCK(rt);
2712 nd6_free(rt);
2713 rtfree(rt);
2714 return;
2715 }
2716 ln = (struct llinfo_nd6 *)rt->rt_llinfo;
2717 if (ln == NULL)
2718 goto fail;
2719 if (rt->rt_gateway == NULL)
2720 goto fail;
2721 if (rt->rt_gateway->sa_family != AF_LINK)
2722 goto fail;
2723 sdl = SDL(rt->rt_gateway);
2724
2725 olladdr = (sdl->sdl_alen) ? 1 : 0;
2726 if (olladdr && lladdr) {
2727 if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen))
2728 llchange = 1;
2729 else
2730 llchange = 0;
2731 } else
2732 llchange = 0;
2733
2734 /*
2735 * newentry olladdr lladdr llchange (*=record)
2736 * 0 n n -- (1)
2737 * 0 y n -- (2)
2738 * 0 n y -- (3) * STALE
2739 * 0 y y n (4) *
2740 * 0 y y y (5) * STALE
2741 * 1 -- n -- (6) NOSTATE(= PASSIVE)
2742 * 1 -- y -- (7) * STALE
2743 */
2744
2745 if (lladdr) { /* (3-5) and (7) */
2746 /*
2747 * Record source link-layer address
2748 * XXX is it dependent to ifp->if_type?
2749 */
2750 sdl->sdl_alen = ifp->if_addrlen;
2751 bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen);
2752
2753 /* cache the gateway (sender HW) address */
2754 nd6_llreach_alloc(rt, ifp, LLADDR(sdl), sdl->sdl_alen, FALSE);
2755 }
2756
2757 if (!is_newentry) {
2758 if ((!olladdr && lladdr != NULL) || /* (3) */
2759 (olladdr && lladdr != NULL && llchange)) { /* (5) */
2760 do_update = 1;
2761 newstate = ND6_LLINFO_STALE;
2762 } else /* (1-2,4) */
2763 do_update = 0;
2764 } else {
2765 do_update = 1;
2766 if (lladdr == NULL) /* (6) */
2767 newstate = ND6_LLINFO_NOSTATE;
2768 else /* (7) */
2769 newstate = ND6_LLINFO_STALE;
2770 }
2771
2772 if (do_update) {
2773 /*
2774 * Update the state of the neighbor cache.
2775 */
2776 ln->ln_state = newstate;
2777
2778 if (ln->ln_state == ND6_LLINFO_STALE) {
2779 struct mbuf *m = ln->ln_hold;
2780 /*
2781 * XXX: since nd6_output() below will cause
2782 * state tansition to DELAY and reset the timer,
2783 * we must set the timer now, although it is actually
2784 * meaningless.
2785 */
2786 ln->ln_expire = timenow.tv_sec + nd6_gctimer;
2787 ln->ln_hold = NULL;
2788
2789 if (m != NULL) {
2790 struct sockaddr_in6 sin6;
2791
2792 rtkey_to_sa6(rt, &sin6);
2793 /*
2794 * we assume ifp is not a p2p here, so just
2795 * set the 2nd argument as the 1st one.
2796 */
2797 RT_UNLOCK(rt);
2798 nd6_output(ifp, ifp, m, &sin6, rt, NULL);
2799 RT_LOCK(rt);
2800 }
2801 } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
2802 /* probe right away */
2803 ln->ln_expire = timenow.tv_sec;
2804 }
2805 }
2806
2807 /*
2808 * ICMP6 type dependent behavior.
2809 *
2810 * NS: clear IsRouter if new entry
2811 * RS: clear IsRouter
2812 * RA: set IsRouter if there's lladdr
2813 * redir: clear IsRouter if new entry
2814 *
2815 * RA case, (1):
2816 * The spec says that we must set IsRouter in the following cases:
2817 * - If lladdr exist, set IsRouter. This means (1-5).
2818 * - If it is old entry (!newentry), set IsRouter. This means (7).
2819 * So, based on the spec, in (1-5) and (7) cases we must set IsRouter.
2820 * A quetion arises for (1) case. (1) case has no lladdr in the
2821 * neighbor cache, this is similar to (6).
2822 * This case is rare but we figured that we MUST NOT set IsRouter.
2823 *
2824 * newentry olladdr lladdr llchange NS RS RA redir
2825 * D R
2826 * 0 n n -- (1) c ? s
2827 * 0 y n -- (2) c s s
2828 * 0 n y -- (3) c s s
2829 * 0 y y n (4) c s s
2830 * 0 y y y (5) c s s
2831 * 1 -- n -- (6) c c c s
2832 * 1 -- y -- (7) c c s c s
2833 *
2834 * (c=clear s=set)
2835 */
2836 switch (type & 0xff) {
2837 case ND_NEIGHBOR_SOLICIT:
2838 /*
2839 * New entry must have is_router flag cleared.
2840 */
2841 if (is_newentry) /* (6-7) */
2842 ln->ln_router = 0;
2843 break;
2844 case ND_REDIRECT:
2845 /*
2846 * If the icmp is a redirect to a better router, always set the
2847 * is_router flag. Otherwise, if the entry is newly created,
2848 * clear the flag. [RFC 2461, sec 8.3]
2849 */
2850 if (code == ND_REDIRECT_ROUTER)
2851 ln->ln_router = 1;
2852 else if (is_newentry) /* (6-7) */
2853 ln->ln_router = 0;
2854 break;
2855 case ND_ROUTER_SOLICIT:
2856 /*
2857 * is_router flag must always be cleared.
2858 */
2859 ln->ln_router = 0;
2860 break;
2861 case ND_ROUTER_ADVERT:
2862 /*
2863 * Mark an entry with lladdr as a router.
2864 */
2865 if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */
2866 (is_newentry && lladdr)) { /* (7) */
2867 ln->ln_router = 1;
2868 }
2869 break;
2870 }
2871
2872 /*
2873 * When the link-layer address of a router changes, select the
2874 * best router again. In particular, when the neighbor entry is newly
2875 * created, it might affect the selection policy.
2876 * Question: can we restrict the first condition to the "is_newentry"
2877 * case?
2878 *
2879 * Note: Perform default router selection even when we are a router,
2880 * if Scoped Routing is enabled.
2881 */
2882 if (do_update && ln->ln_router &&
2883 (ip6_doscopedroute || !ip6_forwarding)) {
2884 RT_REMREF_LOCKED(rt);
2885 RT_UNLOCK(rt);
2886 lck_mtx_lock(nd6_mutex);
2887 defrouter_select(ifp);
2888 lck_mtx_unlock(nd6_mutex);
2889 } else {
2890 RT_REMREF_LOCKED(rt);
2891 RT_UNLOCK(rt);
2892 }
2893 }
2894
2895 static void
2896 nd6_slowtimo(
2897 __unused void *ignored_arg)
2898 {
2899 int i;
2900 struct nd_ifinfo *nd6if;
2901
2902 lck_rw_lock_shared(nd_if_rwlock);
2903 for (i = 1; i < if_index + 1; i++) {
2904 if (!nd_ifinfo || i >= nd_ifinfo_indexlim)
2905 break;
2906 nd6if = &nd_ifinfo[i];
2907 if (!nd6if->initialized)
2908 break;
2909 lck_mtx_lock(&nd6if->lock);
2910 if (nd6if->basereachable && /* already initialized */
2911 (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
2912 /*
2913 * Since reachable time rarely changes by router
2914 * advertisements, we SHOULD insure that a new random
2915 * value gets recomputed at least once every few hours.
2916 * (RFC 2461, 6.3.4)
2917 */
2918 nd6if->recalctm = nd6_recalc_reachtm_interval;
2919 nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable);
2920 }
2921 lck_mtx_unlock(&nd6if->lock);
2922 }
2923 lck_rw_done(nd_if_rwlock);
2924 timeout(nd6_slowtimo, (caddr_t)0, ND6_SLOWTIMER_INTERVAL * hz);
2925 }
2926
2927 #define senderr(e) { error = (e); goto bad;}
2928 int
2929 nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
2930 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
2931 {
2932 struct mbuf *m = m0;
2933 struct rtentry *rt = hint0, *hint = hint0;
2934 struct llinfo_nd6 *ln = NULL;
2935 int error = 0;
2936 struct timeval timenow;
2937 struct rtentry *rtrele = NULL;
2938 struct nd_ifinfo *ndi;
2939
2940 if (rt != NULL) {
2941 RT_LOCK_SPIN(rt);
2942 RT_ADDREF_LOCKED(rt);
2943 }
2944
2945 if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr) || !nd6_need_cache(ifp)) {
2946 if (rt != NULL)
2947 RT_UNLOCK(rt);
2948 goto sendpkt;
2949 }
2950
2951 /*
2952 * Next hop determination. Because we may involve the gateway route
2953 * in addition to the original route, locking is rather complicated.
2954 * The general concept is that regardless of whether the route points
2955 * to the original route or to the gateway route, this routine takes
2956 * an extra reference on such a route. This extra reference will be
2957 * released at the end.
2958 *
2959 * Care must be taken to ensure that the "hint0" route never gets freed
2960 * via rtfree(), since the caller may have stored it inside a struct
2961 * route with a reference held for that placeholder.
2962 *
2963 * This logic is similar to, though not exactly the same as the one
2964 * used by route_to_gwroute().
2965 */
2966 if (rt != NULL) {
2967 /*
2968 * We have a reference to "rt" by now (or below via rtalloc1),
2969 * which will either be released or freed at the end of this
2970 * routine.
2971 */
2972 RT_LOCK_ASSERT_HELD(rt);
2973 if (!(rt->rt_flags & RTF_UP)) {
2974 RT_REMREF_LOCKED(rt);
2975 RT_UNLOCK(rt);
2976 if ((hint = rt = rtalloc1_scoped((struct sockaddr *)dst,
2977 1, 0, ifp->if_index)) != NULL) {
2978 RT_LOCK_SPIN(rt);
2979 if (rt->rt_ifp != ifp) {
2980 /* XXX: loop care? */
2981 RT_UNLOCK(rt);
2982 error = nd6_output(ifp, origifp, m0,
2983 dst, rt, adv);
2984 rtfree(rt);
2985 return (error);
2986 }
2987 } else {
2988 senderr(EHOSTUNREACH);
2989 }
2990 }
2991
2992 if (rt->rt_flags & RTF_GATEWAY) {
2993 struct rtentry *gwrt;
2994 struct in6_ifaddr *ia6 = NULL;
2995 struct sockaddr_in6 gw6;
2996
2997 rtgw_to_sa6(rt, &gw6);
2998 /*
2999 * Must drop rt_lock since nd6_is_addr_neighbor()
3000 * calls nd6_lookup() and acquires rnh_lock.
3001 */
3002 RT_UNLOCK(rt);
3003
3004 /*
3005 * We skip link-layer address resolution and NUD
3006 * if the gateway is not a neighbor from ND point
3007 * of view, regardless of the value of nd_ifinfo.flags.
3008 * The second condition is a bit tricky; we skip
3009 * if the gateway is our own address, which is
3010 * sometimes used to install a route to a p2p link.
3011 */
3012 if (!nd6_is_addr_neighbor(&gw6, ifp, 0) ||
3013 (ia6 = in6ifa_ifpwithaddr(ifp, &gw6.sin6_addr))) {
3014 /*
3015 * We allow this kind of tricky route only
3016 * when the outgoing interface is p2p.
3017 * XXX: we may need a more generic rule here.
3018 */
3019 if (ia6 != NULL)
3020 IFA_REMREF(&ia6->ia_ifa);
3021 if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
3022 senderr(EHOSTUNREACH);
3023 goto sendpkt;
3024 }
3025
3026 RT_LOCK_SPIN(rt);
3027 gw6 = *((struct sockaddr_in6 *)(void *)rt->rt_gateway);
3028
3029 /* If hint is now down, give up */
3030 if (!(rt->rt_flags & RTF_UP)) {
3031 RT_UNLOCK(rt);
3032 senderr(EHOSTUNREACH);
3033 }
3034
3035 /* If there's no gateway route, look it up */
3036 if ((gwrt = rt->rt_gwroute) == NULL) {
3037 RT_UNLOCK(rt);
3038 goto lookup;
3039 }
3040 /* Become a regular mutex */
3041 RT_CONVERT_LOCK(rt);
3042
3043 /*
3044 * Take gwrt's lock while holding route's lock;
3045 * this is okay since gwrt never points back
3046 * to rt, so no lock ordering issues.
3047 */
3048 RT_LOCK_SPIN(gwrt);
3049 if (!(gwrt->rt_flags & RTF_UP)) {
3050 rt->rt_gwroute = NULL;
3051 RT_UNLOCK(gwrt);
3052 RT_UNLOCK(rt);
3053 rtfree(gwrt);
3054 lookup:
3055 lck_mtx_lock(rnh_lock);
3056 gwrt = rtalloc1_scoped_locked(
3057 (struct sockaddr *)&gw6, 1, 0,
3058 ifp->if_index);
3059
3060 RT_LOCK(rt);
3061 /*
3062 * Bail out if the route is down, no route
3063 * to gateway, circular route, or if the
3064 * gateway portion of "rt" has changed.
3065 */
3066 if (!(rt->rt_flags & RTF_UP) ||
3067 gwrt == NULL || gwrt == rt ||
3068 !equal(SA(&gw6), rt->rt_gateway)) {
3069 if (gwrt == rt) {
3070 RT_REMREF_LOCKED(gwrt);
3071 gwrt = NULL;
3072 }
3073 RT_UNLOCK(rt);
3074 if (gwrt != NULL)
3075 rtfree_locked(gwrt);
3076 lck_mtx_unlock(rnh_lock);
3077 senderr(EHOSTUNREACH);
3078 }
3079 VERIFY(gwrt != NULL);
3080 /*
3081 * Set gateway route; callee adds ref to gwrt;
3082 * gwrt has an extra ref from rtalloc1() for
3083 * this routine.
3084 */
3085 rt_set_gwroute(rt, rt_key(rt), gwrt);
3086 RT_UNLOCK(rt);
3087 lck_mtx_unlock(rnh_lock);
3088 /* Remember to release/free "rt" at the end */
3089 rtrele = rt;
3090 rt = gwrt;
3091 } else {
3092 RT_ADDREF_LOCKED(gwrt);
3093 RT_UNLOCK(gwrt);
3094 RT_UNLOCK(rt);
3095 /* Remember to release/free "rt" at the end */
3096 rtrele = rt;
3097 rt = gwrt;
3098 }
3099 VERIFY(rt == gwrt);
3100
3101 /*
3102 * This is an opportunity to revalidate the parent
3103 * route's gwroute, in case it now points to a dead
3104 * route entry. Parent route won't go away since the
3105 * clone (hint) holds a reference to it. rt == gwrt.
3106 */
3107 RT_LOCK_SPIN(hint);
3108 if ((hint->rt_flags & (RTF_WASCLONED | RTF_UP)) ==
3109 (RTF_WASCLONED | RTF_UP)) {
3110 struct rtentry *prt = hint->rt_parent;
3111 VERIFY(prt != NULL);
3112
3113 RT_CONVERT_LOCK(hint);
3114 RT_ADDREF(prt);
3115 RT_UNLOCK(hint);
3116 rt_revalidate_gwroute(prt, rt);
3117 RT_REMREF(prt);
3118 } else {
3119 RT_UNLOCK(hint);
3120 }
3121
3122 RT_LOCK_SPIN(rt);
3123 /* rt == gwrt; if it is now down, give up */
3124 if (!(rt->rt_flags & RTF_UP)) {
3125 RT_UNLOCK(rt);
3126 rtfree(rt);
3127 rt = NULL;
3128 /* "rtrele" == original "rt" */
3129 senderr(EHOSTUNREACH);
3130 }
3131 }
3132
3133 /* Become a regular mutex */
3134 RT_CONVERT_LOCK(rt);
3135 }
3136
3137 /*
3138 * Address resolution or Neighbor Unreachability Detection
3139 * for the next hop.
3140 * At this point, the destination of the packet must be a unicast
3141 * or an anycast address(i.e. not a multicast).
3142 */
3143
3144 /* Look up the neighbor cache for the nexthop */
3145 if (rt && (rt->rt_flags & RTF_LLINFO) != 0) {
3146 ln = rt->rt_llinfo;
3147 } else {
3148 struct sockaddr_in6 sin6;
3149 /*
3150 * Clear out Scope ID field in case it is set.
3151 */
3152 sin6 = *dst;
3153 sin6.sin6_scope_id = 0;
3154 /*
3155 * Since nd6_is_addr_neighbor() internally calls nd6_lookup(),
3156 * the condition below is not very efficient. But we believe
3157 * it is tolerable, because this should be a rare case.
3158 * Must drop rt_lock since nd6_is_addr_neighbor() calls
3159 * nd6_lookup() and acquires rnh_lock.
3160 */
3161 if (rt != NULL)
3162 RT_UNLOCK(rt);
3163 if (nd6_is_addr_neighbor(&sin6, ifp, 0)) {
3164 /* "rtrele" may have been used, so clean up "rt" now */
3165 if (rt != NULL) {
3166 /* Don't free "hint0" */
3167 if (rt == hint0)
3168 RT_REMREF(rt);
3169 else
3170 rtfree(rt);
3171 }
3172 /* Callee returns a locked route upon success */
3173 rt = nd6_lookup(&dst->sin6_addr, 1, ifp, 0);
3174 if (rt != NULL) {
3175 RT_LOCK_ASSERT_HELD(rt);
3176 ln = rt->rt_llinfo;
3177 }
3178 } else if (rt != NULL) {
3179 RT_LOCK(rt);
3180 }
3181 }
3182
3183 if (!ln || !rt) {
3184 if (rt != NULL)
3185 RT_UNLOCK(rt);
3186 lck_rw_lock_shared(nd_if_rwlock);
3187 ndi = ND_IFINFO(ifp);
3188 VERIFY(ndi != NULL && ndi->initialized);
3189 lck_mtx_lock(&ndi->lock);
3190 if ((ifp->if_flags & IFF_POINTOPOINT) == 0 &&
3191 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
3192 lck_mtx_unlock(&ndi->lock);
3193 lck_rw_done(nd_if_rwlock);
3194 log(LOG_DEBUG,
3195 "nd6_output: can't allocate llinfo for %s "
3196 "(ln=%p, rt=%p)\n",
3197 ip6_sprintf(&dst->sin6_addr), ln, rt);
3198 senderr(EIO); /* XXX: good error? */
3199 }
3200 lck_mtx_unlock(&ndi->lock);
3201 lck_rw_done(nd_if_rwlock);
3202
3203 goto sendpkt; /* send anyway */
3204 }
3205
3206 getmicrotime(&timenow);
3207
3208 /* We don't have to do link-layer address resolution on a p2p link. */
3209 if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
3210 ln->ln_state < ND6_LLINFO_REACHABLE) {
3211 ln->ln_state = ND6_LLINFO_STALE;
3212 ln->ln_expire = rt_expiry(rt, timenow.tv_sec, nd6_gctimer);
3213 }
3214
3215 /*
3216 * The first time we send a packet to a neighbor whose entry is
3217 * STALE, we have to change the state to DELAY and a sets a timer to
3218 * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do
3219 * neighbor unreachability detection on expiration.
3220 * (RFC 2461 7.3.3)
3221 */
3222 if (ln->ln_state == ND6_LLINFO_STALE) {
3223 ln->ln_asked = 0;
3224 ln->ln_state = ND6_LLINFO_DELAY;
3225 ln->ln_expire = rt_expiry(rt, timenow.tv_sec, nd6_delay);
3226 }
3227
3228 /*
3229 * If the neighbor cache entry has a state other than INCOMPLETE
3230 * (i.e. its link-layer address is already resolved), just
3231 * send the packet.
3232 */
3233 if (ln->ln_state > ND6_LLINFO_INCOMPLETE) {
3234 RT_UNLOCK(rt);
3235 /*
3236 * Move this entry to the head of the queue so that it is
3237 * less likely for this entry to be a target of forced
3238 * garbage collection (see nd6_rtrequest()).
3239 */
3240 lck_mtx_lock(rnh_lock);
3241 RT_LOCK_SPIN(rt);
3242 if (ln->ln_flags & ND6_LNF_IN_USE) {
3243 LN_DEQUEUE(ln);
3244 LN_INSERTHEAD(ln);
3245 }
3246 RT_UNLOCK(rt);
3247 lck_mtx_unlock(rnh_lock);
3248 goto sendpkt;
3249 }
3250
3251 /*
3252 * There is a neighbor cache entry, but no ethernet address
3253 * response yet. Replace the held mbuf (if any) with this
3254 * latest one.
3255 *
3256 * This code conforms to the rate-limiting rule described in Section
3257 * 7.2.2 of RFC 2461, because the timer is set correctly after sending
3258 * an NS below.
3259 */
3260 if (ln->ln_state == ND6_LLINFO_NOSTATE)
3261 ln->ln_state = ND6_LLINFO_INCOMPLETE;
3262 if (ln->ln_hold)
3263 m_freem(ln->ln_hold);
3264 ln->ln_hold = m;
3265 if (ln->ln_expire && ln->ln_asked < nd6_mmaxtries &&
3266 ln->ln_expire < timenow.tv_sec) {
3267 ln->ln_asked++;
3268 lck_rw_lock_shared(nd_if_rwlock);
3269 ndi = ND_IFINFO(ifp);
3270 VERIFY(ndi != NULL && ndi->initialized);
3271 lck_mtx_lock(&ndi->lock);
3272 ln->ln_expire = timenow.tv_sec + ndi->retrans / 1000;
3273 lck_mtx_unlock(&ndi->lock);
3274 lck_rw_done(nd_if_rwlock);
3275 RT_UNLOCK(rt);
3276 /* We still have a reference on rt (for ln) */
3277 if (ip6_forwarding)
3278 nd6_prproxy_ns_output(ifp, NULL, &dst->sin6_addr, ln);
3279 else
3280 nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, 0);
3281 } else {
3282 RT_UNLOCK(rt);
3283 }
3284 /*
3285 * Move this entry to the head of the queue so that it is
3286 * less likely for this entry to be a target of forced
3287 * garbage collection (see nd6_rtrequest()).
3288 */
3289 lck_mtx_lock(rnh_lock);
3290 RT_LOCK_SPIN(rt);
3291 if (ln->ln_flags & ND6_LNF_IN_USE) {
3292 LN_DEQUEUE(ln);
3293 LN_INSERTHEAD(ln);
3294 }
3295 /* Clean up "rt" now while we can */
3296 if (rt == hint0) {
3297 RT_REMREF_LOCKED(rt);
3298 RT_UNLOCK(rt);
3299 } else {
3300 RT_UNLOCK(rt);
3301 rtfree_locked(rt);
3302 }
3303 rt = NULL; /* "rt" has been taken care of */
3304 lck_mtx_unlock(rnh_lock);
3305
3306 error = 0;
3307 goto release;
3308
3309 sendpkt:
3310 if (rt != NULL)
3311 RT_LOCK_ASSERT_NOTHELD(rt);
3312
3313 /* discard the packet if IPv6 operation is disabled on the interface */
3314 lck_rw_lock_shared(nd_if_rwlock);
3315 ndi = ND_IFINFO(ifp);
3316 VERIFY(ndi != NULL && ndi->initialized);
3317 /* test is done here without holding ndi lock, for performance */
3318 if (ndi->flags & ND6_IFF_IFDISABLED) {
3319 lck_rw_done(nd_if_rwlock);
3320 error = ENETDOWN; /* better error? */
3321 goto bad;
3322 }
3323 lck_rw_done(nd_if_rwlock);
3324
3325 if ((ifp->if_flags & IFF_LOOPBACK) != 0) {
3326 /* forwarding rules require the original scope_id */
3327 m->m_pkthdr.rcvif = origifp;
3328 error = dlil_output(origifp, PF_INET6, m, (caddr_t)rt,
3329 (struct sockaddr *)dst, 0, adv);
3330 goto release;
3331 } else {
3332 /* Do not allow loopback address to wind up on a wire */
3333 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
3334
3335 if ((IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) ||
3336 IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst))) {
3337 ip6stat.ip6s_badscope++;
3338 /*
3339 * Do not simply drop the packet just like a
3340 * firewall -- we want the the application to feel
3341 * the pain. Return ENETUNREACH like ip6_output
3342 * does in some similar cases. This can startle
3343 * the otherwise clueless process that specifies
3344 * loopback as the source address.
3345 */
3346 error = ENETUNREACH;
3347 goto bad;
3348 }
3349 }
3350
3351 if (rt != NULL) {
3352 RT_LOCK_SPIN(rt);
3353 /* Mark use timestamp */
3354 if (rt->rt_llinfo != NULL)
3355 nd6_llreach_use(rt->rt_llinfo);
3356 RT_UNLOCK(rt);
3357 }
3358
3359 if (hint && nstat_collect)
3360 nstat_route_tx(hint, 1, m->m_pkthdr.len, 0);
3361
3362 m->m_pkthdr.rcvif = NULL;
3363 error = dlil_output(ifp, PF_INET6, m, (caddr_t)rt,
3364 (struct sockaddr *)dst, 0, adv);
3365 goto release;
3366
3367 bad:
3368 if (m != NULL)
3369 m_freem(m);
3370
3371 release:
3372 /* Clean up "rt" unless it's already been done */
3373 if (rt != NULL) {
3374 RT_LOCK_SPIN(rt);
3375 if (rt == hint0) {
3376 RT_REMREF_LOCKED(rt);
3377 RT_UNLOCK(rt);
3378 } else {
3379 RT_UNLOCK(rt);
3380 rtfree(rt);
3381 }
3382 }
3383 /* And now clean up "rtrele" if there is any */
3384 if (rtrele != NULL) {
3385 RT_LOCK_SPIN(rtrele);
3386 if (rtrele == hint0) {
3387 RT_REMREF_LOCKED(rtrele);
3388 RT_UNLOCK(rtrele);
3389 } else {
3390 RT_UNLOCK(rtrele);
3391 rtfree(rtrele);
3392 }
3393 }
3394 return (error);
3395 }
3396 #undef senderr
3397
3398 int
3399 nd6_need_cache(
3400 struct ifnet *ifp)
3401 {
3402 /*
3403 * XXX: we currently do not make neighbor cache on any interface
3404 * other than ARCnet, Ethernet, FDDI and GIF.
3405 *
3406 * RFC2893 says:
3407 * - unidirectional tunnels needs no ND
3408 */
3409 switch (ifp->if_type) {
3410 case IFT_ARCNET:
3411 case IFT_ETHER:
3412 case IFT_FDDI:
3413 case IFT_IEEE1394:
3414 case IFT_L2VLAN:
3415 case IFT_IEEE8023ADLAG:
3416 #if IFT_IEEE80211
3417 case IFT_IEEE80211:
3418 #endif
3419 case IFT_GIF: /* XXX need more cases? */
3420 case IFT_PPP:
3421 #if IFT_TUNNEL
3422 case IFT_TUNNEL:
3423 #endif
3424 case IFT_BRIDGE:
3425 case IFT_CELLULAR:
3426 return(1);
3427 default:
3428 return(0);
3429 }
3430 }
3431
3432 int
3433 nd6_storelladdr(
3434 struct ifnet *ifp,
3435 struct rtentry *rt,
3436 struct mbuf *m,
3437 struct sockaddr *dst,
3438 u_char *desten)
3439 {
3440 int i;
3441 struct sockaddr_dl *sdl;
3442
3443 if (m->m_flags & M_MCAST) {
3444 switch (ifp->if_type) {
3445 case IFT_ETHER:
3446 case IFT_FDDI:
3447 case IFT_L2VLAN:
3448 case IFT_IEEE8023ADLAG:
3449 #if IFT_IEEE80211
3450 case IFT_IEEE80211:
3451 #endif
3452 case IFT_BRIDGE:
3453 ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr,
3454 desten);
3455 return(1);
3456 case IFT_IEEE1394:
3457 for (i = 0; i < ifp->if_addrlen; i++)
3458 desten[i] = ~0;
3459 return(1);
3460 case IFT_ARCNET:
3461 *desten = 0;
3462 return(1);
3463 default:
3464 return(0); /* caller will free mbuf */
3465 }
3466 }
3467
3468 if (rt == NULL) {
3469 /* this could happen, if we could not allocate memory */
3470 return(0); /* caller will free mbuf */
3471 }
3472 RT_LOCK(rt);
3473 if (rt->rt_gateway->sa_family != AF_LINK) {
3474 printf("nd6_storelladdr: something odd happens\n");
3475 RT_UNLOCK(rt);
3476 return(0); /* caller will free mbuf */
3477 }
3478 sdl = SDL(rt->rt_gateway);
3479 if (sdl->sdl_alen == 0) {
3480 /* this should be impossible, but we bark here for debugging */
3481 printf("nd6_storelladdr: sdl_alen == 0\n");
3482 RT_UNLOCK(rt);
3483 return(0); /* caller will free mbuf */
3484 }
3485
3486 bcopy(LLADDR(sdl), desten, sdl->sdl_alen);
3487 RT_UNLOCK(rt);
3488 return(1);
3489 }
3490
3491 /*
3492 * This is the ND pre-output routine; care must be taken to ensure that
3493 * the "hint" route never gets freed via rtfree(), since the caller may
3494 * have stored it inside a struct route with a reference held for that
3495 * placeholder.
3496 */
3497 errno_t
3498 nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest,
3499 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
3500 mbuf_t packet)
3501 {
3502 route_t route = hint;
3503 errno_t result = 0;
3504 struct sockaddr_dl *sdl = NULL;
3505 size_t copy_len;
3506
3507 if (ip6_dest->sin6_family != AF_INET6)
3508 return (EAFNOSUPPORT);
3509
3510 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
3511 return (ENETDOWN);
3512
3513 if (hint != NULL) {
3514 /*
3515 * Callee holds a reference on the route and returns
3516 * with the route entry locked, upon success.
3517 */
3518 result = route_to_gwroute((const struct sockaddr *)ip6_dest,
3519 hint, &route);
3520 if (result != 0)
3521 return (result);
3522 if (route != NULL)
3523 RT_LOCK_ASSERT_HELD(route);
3524 }
3525
3526 if ((packet->m_flags & M_MCAST) != 0) {
3527 if (route != NULL)
3528 RT_UNLOCK(route);
3529 result = dlil_resolve_multi(ifp,
3530 (const struct sockaddr*)ip6_dest,
3531 (struct sockaddr *)ll_dest, ll_dest_len);
3532 if (route != NULL)
3533 RT_LOCK(route);
3534 goto release;
3535 }
3536
3537 if (route == NULL) {
3538 /*
3539 * This could happen, if we could not allocate memory or
3540 * if route_to_gwroute() didn't return a route.
3541 */
3542 result = ENOBUFS;
3543 goto release;
3544 }
3545
3546 if (route->rt_gateway->sa_family != AF_LINK) {
3547 printf("nd6_lookup_ipv6: gateway address not AF_LINK\n");
3548 result = EADDRNOTAVAIL;
3549 goto release;
3550 }
3551
3552 sdl = SDL(route->rt_gateway);
3553 if (sdl->sdl_alen == 0) {
3554 /* this should be impossible, but we bark here for debugging */
3555 printf("nd6_lookup_ipv6: sdl_alen == 0\n");
3556 result = EHOSTUNREACH;
3557 goto release;
3558 }
3559
3560 copy_len = sdl->sdl_len <= ll_dest_len ? sdl->sdl_len : ll_dest_len;
3561 bcopy(sdl, ll_dest, copy_len);
3562
3563 release:
3564 if (route != NULL) {
3565 if (route == hint) {
3566 RT_REMREF_LOCKED(route);
3567 RT_UNLOCK(route);
3568 } else {
3569 RT_UNLOCK(route);
3570 rtfree(route);
3571 }
3572 }
3573 return (result);
3574 }
3575
3576 int
3577 nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after)
3578 {
3579 /*
3580 * We only care about ND6_IFF_PROXY_PREFIXES for now.
3581 */
3582 before &= ND6_IFF_PROXY_PREFIXES;
3583 after &= ND6_IFF_PROXY_PREFIXES;
3584
3585 if (before == after)
3586 return (0);
3587
3588 return (nd6_if_prproxy(ifp, ((int32_t)(after - before) > 0)));
3589 }
3590
3591 SYSCTL_DECL(_net_inet6_icmp6);
3592
3593 static int
3594 nd6_sysctl_drlist SYSCTL_HANDLER_ARGS
3595 {
3596 #pragma unused(oidp, arg1, arg2)
3597 int error = 0;
3598 char buf[1024];
3599 struct nd_defrouter *dr;
3600 int p64 = proc_is64bit(req->p);
3601
3602 if (req->newptr)
3603 return (EPERM);
3604
3605 lck_mtx_lock(nd6_mutex);
3606 if (p64) {
3607 struct in6_defrouter_64 *d, *de;
3608
3609 for (dr = TAILQ_FIRST(&nd_defrouter);
3610 dr;
3611 dr = TAILQ_NEXT(dr, dr_entry)) {
3612 d = (struct in6_defrouter_64 *)(void *)buf;
3613 de = (struct in6_defrouter_64 *)
3614 (void *)(buf + sizeof (buf));
3615
3616 if (d + 1 <= de) {
3617 bzero(d, sizeof (*d));
3618 d->rtaddr.sin6_family = AF_INET6;
3619 d->rtaddr.sin6_len = sizeof (d->rtaddr);
3620 if (in6_recoverscope(&d->rtaddr, &dr->rtaddr,
3621 dr->ifp) != 0)
3622 log(LOG_ERR,
3623 "scope error in "
3624 "default router list (%s)\n",
3625 ip6_sprintf(&dr->rtaddr));
3626 d->flags = dr->flags;
3627 d->stateflags = dr->stateflags;
3628 d->stateflags &= ~NDDRF_PROCESSED;
3629 d->rtlifetime = dr->rtlifetime;
3630 d->expire = dr->expire;
3631 d->if_index = dr->ifp->if_index;
3632 } else {
3633 panic("buffer too short");
3634 }
3635 error = SYSCTL_OUT(req, buf, sizeof (*d));
3636 if (error)
3637 break;
3638 }
3639 } else {
3640 struct in6_defrouter_32 *d_32, *de_32;
3641
3642 for (dr = TAILQ_FIRST(&nd_defrouter);
3643 dr;
3644 dr = TAILQ_NEXT(dr, dr_entry)) {
3645 d_32 = (struct in6_defrouter_32 *)(void *)buf;
3646 de_32 = (struct in6_defrouter_32 *)
3647 (void *)(buf + sizeof (buf));
3648
3649 if (d_32 + 1 <= de_32) {
3650 bzero(d_32, sizeof (*d_32));
3651 d_32->rtaddr.sin6_family = AF_INET6;
3652 d_32->rtaddr.sin6_len = sizeof (d_32->rtaddr);
3653 if (in6_recoverscope(&d_32->rtaddr, &dr->rtaddr,
3654 dr->ifp) != 0)
3655 log(LOG_ERR,
3656 "scope error in "
3657 "default router list (%s)\n",
3658 ip6_sprintf(&dr->rtaddr));
3659 d_32->flags = dr->flags;
3660 d_32->stateflags = dr->stateflags;
3661 d_32->stateflags &= ~NDDRF_PROCESSED;
3662 d_32->rtlifetime = dr->rtlifetime;
3663 d_32->expire = dr->expire;
3664 d_32->if_index = dr->ifp->if_index;
3665 } else {
3666 panic("buffer too short");
3667 }
3668 error = SYSCTL_OUT(req, buf, sizeof (*d_32));
3669 if (error)
3670 break;
3671 }
3672 }
3673 lck_mtx_unlock(nd6_mutex);
3674 return (error);
3675 }
3676
3677 static int
3678 nd6_sysctl_prlist SYSCTL_HANDLER_ARGS
3679 {
3680 #pragma unused(oidp, arg1, arg2)
3681 int error = 0;
3682 char buf[1024];
3683 struct nd_prefix *pr;
3684 int p64 = proc_is64bit(req->p);
3685
3686 if (req->newptr)
3687 return (EPERM);
3688
3689 lck_mtx_lock(nd6_mutex);
3690 if (p64) {
3691 struct in6_prefix_64 *p, *pe;
3692
3693 for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
3694 u_short advrtrs = 0;
3695 size_t advance;
3696 struct sockaddr_in6 *sin6, *s6;
3697 struct nd_pfxrouter *pfr;
3698
3699 p = (struct in6_prefix_64 *)(void *)buf;
3700 pe = (struct in6_prefix_64 *)
3701 (void *)(buf + sizeof (buf));
3702
3703 if (p + 1 <= pe) {
3704 bzero(p, sizeof (*p));
3705 sin6 = (struct sockaddr_in6 *)(p + 1);
3706
3707 NDPR_LOCK(pr);
3708 p->prefix = pr->ndpr_prefix;
3709 if (in6_recoverscope(&p->prefix,
3710 &p->prefix.sin6_addr, pr->ndpr_ifp) != 0)
3711 log(LOG_ERR,
3712 "scope error in prefix list (%s)\n",
3713 ip6_sprintf(&p->prefix.sin6_addr));
3714 p->raflags = pr->ndpr_raf;
3715 p->prefixlen = pr->ndpr_plen;
3716 p->vltime = pr->ndpr_vltime;
3717 p->pltime = pr->ndpr_pltime;
3718 p->if_index = pr->ndpr_ifp->if_index;
3719 p->expire = pr->ndpr_expire;
3720 p->refcnt = pr->ndpr_addrcnt;
3721 p->flags = pr->ndpr_stateflags;
3722 p->origin = PR_ORIG_RA;
3723 advrtrs = 0;
3724 for (pfr = pr->ndpr_advrtrs.lh_first;
3725 pfr;
3726 pfr = pfr->pfr_next) {
3727 if ((void *)&sin6[advrtrs + 1] >
3728 (void *)pe) {
3729 advrtrs++;
3730 continue;
3731 }
3732 s6 = &sin6[advrtrs];
3733 bzero(s6, sizeof (*s6));
3734 s6->sin6_family = AF_INET6;
3735 s6->sin6_len = sizeof (*sin6);
3736 if (in6_recoverscope(s6,
3737 &pfr->router->rtaddr,
3738 pfr->router->ifp) != 0)
3739 log(LOG_ERR, "scope error in "
3740 "prefix list (%s)\n",
3741 ip6_sprintf(&pfr->router->
3742 rtaddr));
3743 advrtrs++;
3744 }
3745 p->advrtrs = advrtrs;
3746 NDPR_UNLOCK(pr);
3747 } else {
3748 panic("buffer too short");
3749 }
3750 advance = sizeof (*p) + sizeof (*sin6) * advrtrs;
3751 error = SYSCTL_OUT(req, buf, advance);
3752 if (error)
3753 break;
3754 }
3755 } else {
3756 struct in6_prefix_32 *p_32, *pe_32;
3757
3758 for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
3759 u_short advrtrs = 0;
3760 size_t advance;
3761 struct sockaddr_in6 *sin6, *s6;
3762 struct nd_pfxrouter *pfr;
3763
3764 p_32 = (struct in6_prefix_32 *)(void *)buf;
3765 pe_32 = (struct in6_prefix_32 *)
3766 (void *)(buf + sizeof (buf));
3767
3768 if (p_32 + 1 <= pe_32) {
3769 bzero(p_32, sizeof (*p_32));
3770 sin6 = (struct sockaddr_in6 *)(p_32 + 1);
3771
3772 NDPR_LOCK(pr);
3773 p_32->prefix = pr->ndpr_prefix;
3774 if (in6_recoverscope(&p_32->prefix,
3775 &p_32->prefix.sin6_addr, pr->ndpr_ifp) != 0)
3776 log(LOG_ERR, "scope error in prefix "
3777 "list (%s)\n", ip6_sprintf(&p_32->
3778 prefix.sin6_addr));
3779 p_32->raflags = pr->ndpr_raf;
3780 p_32->prefixlen = pr->ndpr_plen;
3781 p_32->vltime = pr->ndpr_vltime;
3782 p_32->pltime = pr->ndpr_pltime;
3783 p_32->if_index = pr->ndpr_ifp->if_index;
3784 p_32->expire = pr->ndpr_expire;
3785 p_32->refcnt = pr->ndpr_addrcnt;
3786 p_32->flags = pr->ndpr_stateflags;
3787 p_32->origin = PR_ORIG_RA;
3788 advrtrs = 0;
3789 for (pfr = pr->ndpr_advrtrs.lh_first;
3790 pfr;
3791 pfr = pfr->pfr_next) {
3792 if ((void *)&sin6[advrtrs + 1] >
3793 (void *)pe_32) {
3794 advrtrs++;
3795 continue;
3796 }
3797 s6 = &sin6[advrtrs];
3798 bzero(s6, sizeof (*s6));
3799 s6->sin6_family = AF_INET6;
3800 s6->sin6_len = sizeof (*sin6);
3801 if (in6_recoverscope(s6,
3802 &pfr->router->rtaddr,
3803 pfr->router->ifp) != 0)
3804 log(LOG_ERR, "scope error in "
3805 "prefix list (%s)\n",
3806 ip6_sprintf(&pfr->router->
3807 rtaddr));
3808 advrtrs++;
3809 }
3810 p_32->advrtrs = advrtrs;
3811 NDPR_UNLOCK(pr);
3812 } else {
3813 panic("buffer too short");
3814 }
3815 advance = sizeof (*p_32) + sizeof (*sin6) * advrtrs;
3816 error = SYSCTL_OUT(req, buf, advance);
3817 if (error)
3818 break;
3819 }
3820 }
3821 lck_mtx_unlock(nd6_mutex);
3822 return (error);
3823 }
3824 SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist,
3825 CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, nd6_sysctl_drlist, "S,in6_defrouter","");
3826 SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist,
3827 CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, nd6_sysctl_prlist, "S,in6_defrouter","");
3828