]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/nd6.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / netinet6 / nd6.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the project nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 /*
59 * XXX
60 * KAME 970409 note:
61 * BSD/OS version heavily modifies this code, related to llinfo.
62 * Since we don't have BSD/OS version of net/route.c in our hand,
63 * I left the code mostly as it was in 970310. -- itojun
64 */
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <sys/time.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/errno.h>
76 #include <sys/syslog.h>
77 #include <sys/protosw.h>
78 #include <sys/proc.h>
79 #include <sys/mcache.h>
80
81 #include <dev/random/randomdev.h>
82
83 #include <kern/queue.h>
84 #include <kern/zalloc.h>
85
86 #include <net/if.h>
87 #include <net/if_dl.h>
88 #include <net/if_types.h>
89 #include <net/if_llreach.h>
90 #include <net/route.h>
91 #include <net/dlil.h>
92 #include <net/ntstat.h>
93 #include <net/net_osdep.h>
94 #include <net/nwk_wq.h>
95
96 #include <netinet/in.h>
97 #include <netinet/in_arp.h>
98 #include <netinet/if_ether.h>
99 #include <netinet6/in6_var.h>
100 #include <netinet/ip6.h>
101 #include <netinet6/ip6_var.h>
102 #include <netinet6/nd6.h>
103 #include <netinet6/scope6_var.h>
104 #include <netinet/icmp6.h>
105
106 #include "loop.h"
107
108 #define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
109 #define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
110
111 #define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
112
113 /* timer values */
114 int nd6_prune = 1; /* walk list every 1 seconds */
115 int nd6_prune_lazy = 5; /* lazily walk list every 5 seconds */
116 int nd6_delay = 5; /* delay first probe time 5 second */
117 int nd6_umaxtries = 3; /* maximum unicast query */
118 int nd6_mmaxtries = 3; /* maximum multicast query */
119 int nd6_useloopback = 1; /* use loopback interface for local traffic */
120 int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */
121
122 /* preventing too many loops in ND option parsing */
123 int nd6_maxndopt = 10; /* max # of ND options allowed */
124
125 int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */
126
127 #if ND6_DEBUG
128 int nd6_debug = 1;
129 #else
130 int nd6_debug = 0;
131 #endif
132
133 int nd6_optimistic_dad =
134 (ND6_OPTIMISTIC_DAD_LINKLOCAL|ND6_OPTIMISTIC_DAD_AUTOCONF|
135 ND6_OPTIMISTIC_DAD_TEMPORARY|ND6_OPTIMISTIC_DAD_DYNAMIC|
136 ND6_OPTIMISTIC_DAD_SECURED|ND6_OPTIMISTIC_DAD_MANUAL);
137
138 /* for debugging? */
139 static int nd6_inuse, nd6_allocated;
140
141 /*
142 * Synchronization notes:
143 *
144 * The global list of ND entries are stored in llinfo_nd6; an entry
145 * gets inserted into the list when the route is created and gets
146 * removed from the list when it is deleted; this is done as part
147 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in nd6_rtrequest().
148 *
149 * Because rnh_lock and rt_lock for the entry are held during those
150 * operations, the same locks (and thus lock ordering) must be used
151 * elsewhere to access the relevant data structure fields:
152 *
153 * ln_next, ln_prev, ln_rt
154 *
155 * - Routing lock (rnh_lock)
156 *
157 * ln_hold, ln_asked, ln_expire, ln_state, ln_router, ln_flags,
158 * ln_llreach, ln_lastused
159 *
160 * - Routing entry lock (rt_lock)
161 *
162 * Due to the dependency on rt_lock, llinfo_nd6 has the same lifetime
163 * as the route entry itself. When a route is deleted (RTM_DELETE),
164 * it is simply removed from the global list but the memory is not
165 * freed until the route itself is freed.
166 */
167 struct llinfo_nd6 llinfo_nd6 = {
168 .ln_next = &llinfo_nd6,
169 .ln_prev = &llinfo_nd6,
170 };
171
172 static lck_grp_attr_t *nd_if_lock_grp_attr = NULL;
173 static lck_grp_t *nd_if_lock_grp = NULL;
174 static lck_attr_t *nd_if_lock_attr = NULL;
175
176 /* Protected by nd6_mutex */
177 struct nd_drhead nd_defrouter;
178 struct nd_prhead nd_prefix = { 0 };
179
180 /*
181 * nd6_timeout() is scheduled on a demand basis. nd6_timeout_run is used
182 * to indicate whether or not a timeout has been scheduled. The rnh_lock
183 * mutex is used to protect this scheduling; it is a natural choice given
184 * the work done in the timer callback. Unfortunately, there are cases
185 * when nd6_timeout() needs to be scheduled while rnh_lock cannot be easily
186 * held, due to lock ordering. In those cases, we utilize a "demand" counter
187 * nd6_sched_timeout_want which can be atomically incremented without
188 * having to hold rnh_lock. On places where we acquire rnh_lock, such as
189 * nd6_rtrequest(), we check this counter and schedule the timer if it is
190 * non-zero. The increment happens on various places when we allocate
191 * new ND entries, default routers, prefixes and addresses.
192 */
193 static int nd6_timeout_run; /* nd6_timeout is scheduled to run */
194 static void nd6_timeout(void *);
195 int nd6_sched_timeout_want; /* demand count for timer to be sched */
196 static boolean_t nd6_fast_timer_on = FALSE;
197
198 /* Serialization variables for nd6_service(), protected by rnh_lock */
199 static boolean_t nd6_service_busy;
200 static void *nd6_service_wc = &nd6_service_busy;
201 static int nd6_service_waiters = 0;
202
203 int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL;
204 static struct sockaddr_in6 all1_sa;
205
206 static int regen_tmpaddr(struct in6_ifaddr *);
207 extern lck_mtx_t *nd6_mutex;
208
209 static struct llinfo_nd6 *nd6_llinfo_alloc(int);
210 static void nd6_llinfo_free(void *);
211 static void nd6_llinfo_purge(struct rtentry *);
212 static void nd6_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
213 static void nd6_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
214 static void nd6_llinfo_refresh(struct rtentry *);
215 static uint64_t ln_getexpire(struct llinfo_nd6 *);
216
217 static void nd6_service(void *);
218 static void nd6_slowtimo(void *);
219 static int nd6_is_new_addr_neighbor(struct sockaddr_in6 *, struct ifnet *);
220 static int nd6_siocgdrlst(void *, int);
221 static int nd6_siocgprlst(void *, int);
222
223 static int nd6_sysctl_drlist SYSCTL_HANDLER_ARGS;
224 static int nd6_sysctl_prlist SYSCTL_HANDLER_ARGS;
225
226 /*
227 * Insertion and removal from llinfo_nd6 must be done with rnh_lock held.
228 */
229 #define LN_DEQUEUE(_ln) do { \
230 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \
231 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
232 (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \
233 (_ln)->ln_prev->ln_next = (_ln)->ln_next; \
234 (_ln)->ln_prev = (_ln)->ln_next = NULL; \
235 (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \
236 } while (0)
237
238 #define LN_INSERTHEAD(_ln) do { \
239 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \
240 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
241 (_ln)->ln_next = llinfo_nd6.ln_next; \
242 llinfo_nd6.ln_next = (_ln); \
243 (_ln)->ln_prev = &llinfo_nd6; \
244 (_ln)->ln_next->ln_prev = (_ln); \
245 (_ln)->ln_flags |= ND6_LNF_IN_USE; \
246 } while (0)
247
248 static struct zone *llinfo_nd6_zone;
249 #define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */
250 #define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */
251
252 extern int tvtohz(struct timeval *);
253
254 static int nd6_init_done;
255
256 SYSCTL_DECL(_net_inet6_icmp6);
257
258 SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist,
259 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
260 nd6_sysctl_drlist, "S,in6_defrouter", "");
261
262 SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist,
263 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
264 nd6_sysctl_prlist, "S,in6_defrouter", "");
265
266 SYSCTL_DECL(_net_inet6_ip6);
267
268 static int ip6_maxchainsent = 0;
269 SYSCTL_INT(_net_inet6_ip6, OID_AUTO, maxchainsent,
270 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxchainsent, 0,
271 "use dlil_output_list");
272
273 void
274 nd6_init(void)
275 {
276 int i;
277
278 VERIFY(!nd6_init_done);
279
280 all1_sa.sin6_family = AF_INET6;
281 all1_sa.sin6_len = sizeof (struct sockaddr_in6);
282 for (i = 0; i < sizeof (all1_sa.sin6_addr); i++)
283 all1_sa.sin6_addr.s6_addr[i] = 0xff;
284
285 /* initialization of the default router list */
286 TAILQ_INIT(&nd_defrouter);
287
288 nd_if_lock_grp_attr = lck_grp_attr_alloc_init();
289 nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr);
290 nd_if_lock_attr = lck_attr_alloc_init();
291
292 llinfo_nd6_zone = zinit(sizeof (struct llinfo_nd6),
293 LLINFO_ND6_ZONE_MAX * sizeof (struct llinfo_nd6), 0,
294 LLINFO_ND6_ZONE_NAME);
295 if (llinfo_nd6_zone == NULL)
296 panic("%s: failed allocating llinfo_nd6_zone", __func__);
297
298 zone_change(llinfo_nd6_zone, Z_EXPAND, TRUE);
299 zone_change(llinfo_nd6_zone, Z_CALLERACCT, FALSE);
300
301 nd6_nbr_init();
302 nd6_rtr_init();
303 nd6_prproxy_init();
304
305 nd6_init_done = 1;
306
307 /* start timer */
308 timeout(nd6_slowtimo, NULL, ND6_SLOWTIMER_INTERVAL * hz);
309 }
310
311 static struct llinfo_nd6 *
312 nd6_llinfo_alloc(int how)
313 {
314 struct llinfo_nd6 *ln;
315
316 ln = (how == M_WAITOK) ? zalloc(llinfo_nd6_zone) :
317 zalloc_noblock(llinfo_nd6_zone);
318 if (ln != NULL)
319 bzero(ln, sizeof (*ln));
320
321 return (ln);
322 }
323
324 static void
325 nd6_llinfo_free(void *arg)
326 {
327 struct llinfo_nd6 *ln = arg;
328
329 if (ln->ln_next != NULL || ln->ln_prev != NULL) {
330 panic("%s: trying to free %p when it is in use", __func__, ln);
331 /* NOTREACHED */
332 }
333
334 /* Just in case there's anything there, free it */
335 if (ln->ln_hold != NULL) {
336 m_freem_list(ln->ln_hold);
337 ln->ln_hold = NULL;
338 }
339
340 /* Purge any link-layer info caching */
341 VERIFY(ln->ln_rt->rt_llinfo == ln);
342 if (ln->ln_rt->rt_llinfo_purge != NULL)
343 ln->ln_rt->rt_llinfo_purge(ln->ln_rt);
344
345 zfree(llinfo_nd6_zone, ln);
346 }
347
348 static void
349 nd6_llinfo_purge(struct rtentry *rt)
350 {
351 struct llinfo_nd6 *ln = rt->rt_llinfo;
352
353 RT_LOCK_ASSERT_HELD(rt);
354 VERIFY(rt->rt_llinfo_purge == nd6_llinfo_purge && ln != NULL);
355
356 if (ln->ln_llreach != NULL) {
357 RT_CONVERT_LOCK(rt);
358 ifnet_llreach_free(ln->ln_llreach);
359 ln->ln_llreach = NULL;
360 }
361 ln->ln_lastused = 0;
362 }
363
364 static void
365 nd6_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
366 {
367 struct llinfo_nd6 *ln = rt->rt_llinfo;
368 struct if_llreach *lr = ln->ln_llreach;
369
370 if (lr == NULL) {
371 bzero(ri, sizeof (*ri));
372 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
373 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
374 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
375 } else {
376 IFLR_LOCK(lr);
377 /* Export to rt_reach_info structure */
378 ifnet_lr2ri(lr, ri);
379 /* Export ND6 send expiration (calendar) time */
380 ri->ri_snd_expire =
381 ifnet_llreach_up2calexp(lr, ln->ln_lastused);
382 IFLR_UNLOCK(lr);
383 }
384 }
385
386 static void
387 nd6_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
388 {
389 struct llinfo_nd6 *ln = rt->rt_llinfo;
390 struct if_llreach *lr = ln->ln_llreach;
391
392 if (lr == NULL) {
393 bzero(iflri, sizeof (*iflri));
394 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
395 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
396 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
397 } else {
398 IFLR_LOCK(lr);
399 /* Export to ifnet_llreach_info structure */
400 ifnet_lr2iflri(lr, iflri);
401 /* Export ND6 send expiration (uptime) time */
402 iflri->iflri_snd_expire =
403 ifnet_llreach_up2upexp(lr, ln->ln_lastused);
404 IFLR_UNLOCK(lr);
405 }
406 }
407
408 static void
409 nd6_llinfo_refresh(struct rtentry *rt)
410 {
411 struct llinfo_nd6 *ln = rt->rt_llinfo;
412 uint64_t timenow = net_uptime();
413 /*
414 * Can't refresh permanent, static or entries that are
415 * not direct host entries
416 */
417 if (!ln || ln->ln_expire == 0 ||
418 (rt->rt_flags & RTF_STATIC) ||
419 !(rt->rt_flags & RTF_LLINFO)) {
420 return;
421 }
422
423 if ((ln->ln_state > ND6_LLINFO_INCOMPLETE) &&
424 (ln->ln_state < ND6_LLINFO_PROBE)) {
425 if (ln->ln_expire > timenow) {
426 ln_setexpire(ln, timenow);
427 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PROBE);
428 }
429 }
430 return;
431 }
432
433 const char *
434 ndcache_state2str(short ndp_state)
435 {
436 const char *ndp_state_str = "UNKNOWN";
437 switch (ndp_state) {
438 case ND6_LLINFO_PURGE:
439 ndp_state_str = "ND6_LLINFO_PURGE";
440 break;
441 case ND6_LLINFO_NOSTATE:
442 ndp_state_str = "ND6_LLINFO_NOSTATE";
443 break;
444 case ND6_LLINFO_INCOMPLETE:
445 ndp_state_str = "ND6_LLINFO_INCOMPLETE";
446 break;
447 case ND6_LLINFO_REACHABLE:
448 ndp_state_str = "ND6_LLINFO_REACHABLE";
449 break;
450 case ND6_LLINFO_STALE:
451 ndp_state_str = "ND6_LLINFO_STALE";
452 break;
453 case ND6_LLINFO_DELAY:
454 ndp_state_str = "ND6_LLINFO_DELAY";
455 break;
456 case ND6_LLINFO_PROBE:
457 ndp_state_str = "ND6_LLINFO_PROBE";
458 break;
459 default:
460 /* Init'd to UNKNOWN */
461 break;
462 }
463 return ndp_state_str;
464 }
465
466 void
467 ln_setexpire(struct llinfo_nd6 *ln, uint64_t expiry)
468 {
469 ln->ln_expire = expiry;
470 }
471
472 static uint64_t
473 ln_getexpire(struct llinfo_nd6 *ln)
474 {
475 struct timeval caltime;
476 uint64_t expiry;
477
478 if (ln->ln_expire != 0) {
479 struct rtentry *rt = ln->ln_rt;
480
481 VERIFY(rt != NULL);
482 /* account for system time change */
483 getmicrotime(&caltime);
484
485 rt->base_calendartime +=
486 NET_CALCULATE_CLOCKSKEW(caltime,
487 rt->base_calendartime, net_uptime(), rt->base_uptime);
488
489 expiry = rt->base_calendartime +
490 ln->ln_expire - rt->base_uptime;
491 } else {
492 expiry = 0;
493 }
494 return (expiry);
495 }
496
497 void
498 nd6_ifreset(struct ifnet *ifp)
499 {
500 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
501 VERIFY(NULL != ndi);
502 VERIFY(ndi->initialized);
503
504 LCK_MTX_ASSERT(&ndi->lock, LCK_MTX_ASSERT_OWNED);
505 ndi->linkmtu = ifp->if_mtu;
506 ndi->chlim = IPV6_DEFHLIM;
507 ndi->basereachable = REACHABLE_TIME;
508 ndi->reachable = ND_COMPUTE_RTIME(ndi->basereachable);
509 ndi->retrans = RETRANS_TIMER;
510 }
511
512 void
513 nd6_ifattach(struct ifnet *ifp)
514 {
515 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
516
517 VERIFY(NULL != ndi);
518 if (!ndi->initialized) {
519 lck_mtx_init(&ndi->lock, nd_if_lock_grp, nd_if_lock_attr);
520 ndi->flags = ND6_IFF_PERFORMNUD;
521 ndi->flags |= ND6_IFF_DAD;
522 ndi->initialized = TRUE;
523 }
524
525 lck_mtx_lock(&ndi->lock);
526
527 if (!(ifp->if_flags & IFF_MULTICAST)) {
528 ndi->flags |= ND6_IFF_IFDISABLED;
529 }
530
531 nd6_ifreset(ifp);
532 lck_mtx_unlock(&ndi->lock);
533 nd6_setmtu(ifp);
534 return;
535 }
536
537 #if 0
538 /*
539 * XXX Look more into this. Especially since we recycle ifnets and do delayed
540 * cleanup
541 */
542 void
543 nd6_ifdetach(struct nd_ifinfo *nd)
544 {
545 /* XXX destroy nd's lock? */
546 FREE(nd, M_IP6NDP);
547 }
548 #endif
549
550 void
551 nd6_setmtu(struct ifnet *ifp)
552 {
553 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
554 u_int32_t oldmaxmtu, maxmtu;
555
556 if ((NULL == ndi) || (FALSE == ndi->initialized)) {
557 return;
558 }
559
560 lck_mtx_lock(&ndi->lock);
561 oldmaxmtu = ndi->maxmtu;
562
563 /*
564 * The ND level maxmtu is somewhat redundant to the interface MTU
565 * and is an implementation artifact of KAME. Instead of hard-
566 * limiting the maxmtu based on the interface type here, we simply
567 * take the if_mtu value since SIOCSIFMTU would have taken care of
568 * the sanity checks related to the maximum MTU allowed for the
569 * interface (a value that is known only by the interface layer),
570 * by sending the request down via ifnet_ioctl(). The use of the
571 * ND level maxmtu and linkmtu are done via IN6_LINKMTU() which
572 * does further checking against if_mtu.
573 */
574 maxmtu = ndi->maxmtu = ifp->if_mtu;
575
576 /*
577 * Decreasing the interface MTU under IPV6 minimum MTU may cause
578 * undesirable situation. We thus notify the operator of the change
579 * explicitly. The check for oldmaxmtu is necessary to restrict the
580 * log to the case of changing the MTU, not initializing it.
581 */
582 if (oldmaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
583 log(LOG_NOTICE, "nd6_setmtu: "
584 "new link MTU on %s (%u) is too small for IPv6\n",
585 if_name(ifp), (uint32_t)ndi->maxmtu);
586 }
587 ndi->linkmtu = ifp->if_mtu;
588 lck_mtx_unlock(&ndi->lock);
589
590 /* also adjust in6_maxmtu if necessary. */
591 if (maxmtu > in6_maxmtu) {
592 in6_setmaxmtu();
593 }
594 }
595
596 void
597 nd6_option_init(void *opt, int icmp6len, union nd_opts *ndopts)
598 {
599 bzero(ndopts, sizeof (*ndopts));
600 ndopts->nd_opts_search = (struct nd_opt_hdr *)opt;
601 ndopts->nd_opts_last =
602 (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len);
603
604 if (icmp6len == 0) {
605 ndopts->nd_opts_done = 1;
606 ndopts->nd_opts_search = NULL;
607 }
608 }
609
610 /*
611 * Take one ND option.
612 */
613 struct nd_opt_hdr *
614 nd6_option(union nd_opts *ndopts)
615 {
616 struct nd_opt_hdr *nd_opt;
617 int olen;
618
619 if (!ndopts)
620 panic("ndopts == NULL in nd6_option\n");
621 if (!ndopts->nd_opts_last)
622 panic("uninitialized ndopts in nd6_option\n");
623 if (!ndopts->nd_opts_search)
624 return (NULL);
625 if (ndopts->nd_opts_done)
626 return (NULL);
627
628 nd_opt = ndopts->nd_opts_search;
629
630 /* make sure nd_opt_len is inside the buffer */
631 if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) {
632 bzero(ndopts, sizeof (*ndopts));
633 return (NULL);
634 }
635
636 olen = nd_opt->nd_opt_len << 3;
637 if (olen == 0) {
638 /*
639 * Message validation requires that all included
640 * options have a length that is greater than zero.
641 */
642 bzero(ndopts, sizeof (*ndopts));
643 return (NULL);
644 }
645
646 ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen);
647 if (ndopts->nd_opts_search > ndopts->nd_opts_last) {
648 /* option overruns the end of buffer, invalid */
649 bzero(ndopts, sizeof (*ndopts));
650 return (NULL);
651 } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) {
652 /* reached the end of options chain */
653 ndopts->nd_opts_done = 1;
654 ndopts->nd_opts_search = NULL;
655 }
656 return (nd_opt);
657 }
658
659 /*
660 * Parse multiple ND options.
661 * This function is much easier to use, for ND routines that do not need
662 * multiple options of the same type.
663 */
664 int
665 nd6_options(union nd_opts *ndopts)
666 {
667 struct nd_opt_hdr *nd_opt;
668 int i = 0;
669
670 if (ndopts == NULL)
671 panic("ndopts == NULL in nd6_options");
672 if (ndopts->nd_opts_last == NULL)
673 panic("uninitialized ndopts in nd6_options");
674 if (ndopts->nd_opts_search == NULL)
675 return (0);
676
677 while (1) {
678 nd_opt = nd6_option(ndopts);
679 if (nd_opt == NULL && ndopts->nd_opts_last == NULL) {
680 /*
681 * Message validation requires that all included
682 * options have a length that is greater than zero.
683 */
684 icmp6stat.icp6s_nd_badopt++;
685 bzero(ndopts, sizeof (*ndopts));
686 return (-1);
687 }
688
689 if (nd_opt == NULL)
690 goto skip1;
691
692 switch (nd_opt->nd_opt_type) {
693 case ND_OPT_SOURCE_LINKADDR:
694 case ND_OPT_TARGET_LINKADDR:
695 case ND_OPT_MTU:
696 case ND_OPT_REDIRECTED_HEADER:
697 case ND_OPT_NONCE:
698 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
699 nd6log((LOG_INFO,
700 "duplicated ND6 option found (type=%d)\n",
701 nd_opt->nd_opt_type));
702 /* XXX bark? */
703 } else {
704 ndopts->nd_opt_array[nd_opt->nd_opt_type] =
705 nd_opt;
706 }
707 break;
708 case ND_OPT_PREFIX_INFORMATION:
709 if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
710 ndopts->nd_opt_array[nd_opt->nd_opt_type] =
711 nd_opt;
712 }
713 ndopts->nd_opts_pi_end =
714 (struct nd_opt_prefix_info *)nd_opt;
715 break;
716 case ND_OPT_RDNSS:
717 case ND_OPT_DNSSL:
718 /* ignore */
719 break;
720 default:
721 /*
722 * Unknown options must be silently ignored,
723 * to accomodate future extension to the protocol.
724 */
725 nd6log((LOG_DEBUG,
726 "nd6_options: unsupported option %d - "
727 "option ignored\n", nd_opt->nd_opt_type));
728 }
729
730 skip1:
731 i++;
732 if (i > nd6_maxndopt) {
733 icmp6stat.icp6s_nd_toomanyopt++;
734 nd6log((LOG_INFO, "too many loop in nd opt\n"));
735 break;
736 }
737
738 if (ndopts->nd_opts_done)
739 break;
740 }
741
742 return (0);
743 }
744
745 struct nd6svc_arg {
746 int draining;
747 uint32_t killed;
748 uint32_t aging_lazy;
749 uint32_t aging;
750 uint32_t sticky;
751 uint32_t found;
752 };
753
754 /*
755 * ND6 service routine to expire default route list and prefix list
756 */
757 static void
758 nd6_service(void *arg)
759 {
760 struct nd6svc_arg *ap = arg;
761 struct llinfo_nd6 *ln;
762 struct nd_defrouter *dr = NULL;
763 struct nd_prefix *pr = NULL;
764 struct ifnet *ifp = NULL;
765 struct in6_ifaddr *ia6, *nia6;
766 uint64_t timenow;
767 boolean_t send_nc_failure_kev = FALSE;
768 struct nd_drhead nd_defrouter_tmp;
769 struct nd_defrouter *ndr = NULL;
770 struct radix_node_head *rnh = rt_tables[AF_INET6];
771
772 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
773 /*
774 * Since we may drop rnh_lock and nd6_mutex below, we want
775 * to run this entire operation single threaded.
776 */
777 while (nd6_service_busy) {
778 nd6log2((LOG_DEBUG, "%s: %s is blocked by %d waiters\n",
779 __func__, ap->draining ? "drainer" : "timer",
780 nd6_service_waiters));
781 nd6_service_waiters++;
782 (void) msleep(nd6_service_wc, rnh_lock, (PZERO-1),
783 __func__, NULL);
784 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
785 }
786
787 /* We are busy now; tell everyone else to go away */
788 nd6_service_busy = TRUE;
789
790 net_update_uptime();
791 timenow = net_uptime();
792 again:
793 /*
794 * send_nc_failure_kev gets set when default router's IPv6 address
795 * can't be resolved.
796 * That can happen either:
797 * 1. When the entry has resolved once but can't be
798 * resolved later and the neighbor cache entry for gateway is deleted
799 * after max probe attempts.
800 *
801 * 2. When the entry is in ND6_LLINFO_INCOMPLETE but can not be resolved
802 * after max neighbor address resolution attempts.
803 *
804 * Both set send_nc_failure_kev to true. ifp is also set to the previous
805 * neighbor cache entry's route's ifp.
806 * Once we are done sending the notification, set send_nc_failure_kev
807 * to false to stop sending false notifications for non default router
808 * neighbors.
809 *
810 * We may to send more information like Gateway's IP that could not be
811 * resolved, however right now we do not install more than one default
812 * route per interface in the routing table.
813 */
814 if (send_nc_failure_kev && ifp != NULL &&
815 ifp->if_addrlen == IF_LLREACH_MAXLEN) {
816 struct kev_msg ev_msg;
817 struct kev_nd6_ndfailure nd6_ndfailure;
818 bzero(&ev_msg, sizeof(ev_msg));
819 bzero(&nd6_ndfailure, sizeof(nd6_ndfailure));
820 ev_msg.vendor_code = KEV_VENDOR_APPLE;
821 ev_msg.kev_class = KEV_NETWORK_CLASS;
822 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
823 ev_msg.event_code = KEV_ND6_NDFAILURE;
824
825 nd6_ndfailure.link_data.if_family = ifp->if_family;
826 nd6_ndfailure.link_data.if_unit = ifp->if_unit;
827 strlcpy(nd6_ndfailure.link_data.if_name,
828 ifp->if_name,
829 sizeof(nd6_ndfailure.link_data.if_name));
830 ev_msg.dv[0].data_ptr = &nd6_ndfailure;
831 ev_msg.dv[0].data_length =
832 sizeof(nd6_ndfailure);
833 dlil_post_complete_msg(NULL, &ev_msg);
834 }
835
836 send_nc_failure_kev = FALSE;
837 ifp = NULL;
838 /*
839 * The global list llinfo_nd6 is modified by nd6_request() and is
840 * therefore protected by rnh_lock. For obvious reasons, we cannot
841 * hold rnh_lock across calls that might lead to code paths which
842 * attempt to acquire rnh_lock, else we deadlock. Hence for such
843 * cases we drop rt_lock and rnh_lock, make the calls, and repeat the
844 * loop. To ensure that we don't process the same entry more than
845 * once in a single timeout, we mark the "already-seen" entries with
846 * ND6_LNF_TIMER_SKIP flag. At the end of the loop, we do a second
847 * pass thru the entries and clear the flag so they can be processed
848 * during the next timeout.
849 */
850 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
851
852 ln = llinfo_nd6.ln_next;
853 while (ln != NULL && ln != &llinfo_nd6) {
854 struct rtentry *rt;
855 struct sockaddr_in6 *dst;
856 struct llinfo_nd6 *next;
857 u_int32_t retrans, flags;
858 struct nd_ifinfo *ndi = NULL;
859 boolean_t is_router = FALSE;
860
861 /* ln_next/prev/rt is protected by rnh_lock */
862 next = ln->ln_next;
863 rt = ln->ln_rt;
864 RT_LOCK(rt);
865
866 /* We've seen this already; skip it */
867 if (ln->ln_flags & ND6_LNF_TIMER_SKIP) {
868 RT_UNLOCK(rt);
869 ln = next;
870 continue;
871 }
872 ap->found++;
873
874 /* rt->rt_ifp should never be NULL */
875 if ((ifp = rt->rt_ifp) == NULL) {
876 panic("%s: ln(%p) rt(%p) rt_ifp == NULL", __func__,
877 ln, rt);
878 /* NOTREACHED */
879 }
880
881 /* rt_llinfo must always be equal to ln */
882 if ((struct llinfo_nd6 *)rt->rt_llinfo != ln) {
883 panic("%s: rt_llinfo(%p) is not equal to ln(%p)",
884 __func__, rt->rt_llinfo, ln);
885 /* NOTREACHED */
886 }
887
888 /* rt_key should never be NULL */
889 dst = SIN6(rt_key(rt));
890 if (dst == NULL) {
891 panic("%s: rt(%p) key is NULL ln(%p)", __func__,
892 rt, ln);
893 /* NOTREACHED */
894 }
895
896 /* Set the flag in case we jump to "again" */
897 ln->ln_flags |= ND6_LNF_TIMER_SKIP;
898
899 if (ln->ln_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
900 ap->sticky++;
901 } else if (ap->draining && (rt->rt_refcnt == 0)) {
902 /*
903 * If we are draining, immediately purge non-static
904 * entries without oustanding route refcnt.
905 */
906 if (ln->ln_state > ND6_LLINFO_INCOMPLETE)
907 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
908 else
909 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PURGE);
910 ln_setexpire(ln, timenow);
911 }
912
913 /*
914 * If the entry has not expired, skip it. Take note on the
915 * state, as entries that are in the STALE state are simply
916 * waiting to be garbage collected, in which case we can
917 * relax the callout scheduling (use nd6_prune_lazy).
918 */
919 if (ln->ln_expire > timenow) {
920 switch (ln->ln_state) {
921 case ND6_LLINFO_STALE:
922 ap->aging_lazy++;
923 break;
924 default:
925 ap->aging++;
926 break;
927 }
928 RT_UNLOCK(rt);
929 ln = next;
930 continue;
931 }
932
933 ndi = ND_IFINFO(ifp);
934 VERIFY(ndi->initialized);
935 retrans = ndi->retrans;
936 flags = ndi->flags;
937
938 RT_LOCK_ASSERT_HELD(rt);
939 is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
940
941 switch (ln->ln_state) {
942 case ND6_LLINFO_INCOMPLETE:
943 if (ln->ln_asked < nd6_mmaxtries) {
944 struct ifnet *exclifp = ln->ln_exclifp;
945 ln->ln_asked++;
946 ln_setexpire(ln, timenow + retrans / 1000);
947 RT_ADDREF_LOCKED(rt);
948 RT_UNLOCK(rt);
949 lck_mtx_unlock(rnh_lock);
950 if (ip6_forwarding) {
951 nd6_prproxy_ns_output(ifp, exclifp,
952 NULL, &dst->sin6_addr, ln);
953 } else {
954 nd6_ns_output(ifp, NULL,
955 &dst->sin6_addr, ln, NULL);
956 }
957 RT_REMREF(rt);
958 ap->aging++;
959 lck_mtx_lock(rnh_lock);
960 } else {
961 struct mbuf *m = ln->ln_hold;
962 ln->ln_hold = NULL;
963 send_nc_failure_kev = is_router;
964 if (m != NULL) {
965 RT_ADDREF_LOCKED(rt);
966 RT_UNLOCK(rt);
967 lck_mtx_unlock(rnh_lock);
968
969 struct mbuf *mnext;
970 while (m) {
971 mnext = m->m_nextpkt;
972 m->m_nextpkt = NULL;
973 m->m_pkthdr.rcvif = ifp;
974 icmp6_error_flag(m, ICMP6_DST_UNREACH,
975 ICMP6_DST_UNREACH_ADDR, 0, 0);
976 m = mnext;
977 }
978 } else {
979 RT_ADDREF_LOCKED(rt);
980 RT_UNLOCK(rt);
981 lck_mtx_unlock(rnh_lock);
982 }
983
984 /*
985 * Enqueue work item to invoke callback for
986 * this route entry
987 */
988 route_event_enqueue_nwk_wq_entry(rt, NULL,
989 ROUTE_LLENTRY_UNREACH, NULL, FALSE);
990 nd6_free(rt);
991 ap->killed++;
992 lck_mtx_lock(rnh_lock);
993 /*
994 * nd6_free above would flush out the routing table of
995 * any cloned routes with same next-hop.
996 * Walk the tree anyways as there could be static routes
997 * left.
998 *
999 * We also already have a reference to rt that gets freed right
1000 * after the block below executes. Don't need an extra reference
1001 * on rt here.
1002 */
1003 if (is_router) {
1004 struct route_event rt_ev;
1005 route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_UNREACH);
1006 (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
1007 }
1008 rtfree_locked(rt);
1009 }
1010 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1011 goto again;
1012
1013 case ND6_LLINFO_REACHABLE:
1014 if (ln->ln_expire != 0) {
1015 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
1016 ln_setexpire(ln, timenow + nd6_gctimer);
1017 ap->aging_lazy++;
1018 /*
1019 * Enqueue work item to invoke callback for
1020 * this route entry
1021 */
1022 route_event_enqueue_nwk_wq_entry(rt, NULL,
1023 ROUTE_LLENTRY_STALE, NULL, TRUE);
1024
1025 RT_ADDREF_LOCKED(rt);
1026 RT_UNLOCK(rt);
1027 if (is_router) {
1028 struct route_event rt_ev;
1029 route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_STALE);
1030 (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
1031 }
1032 rtfree_locked(rt);
1033 } else {
1034 RT_UNLOCK(rt);
1035 }
1036 break;
1037
1038 case ND6_LLINFO_STALE:
1039 case ND6_LLINFO_PURGE:
1040 /* Garbage Collection(RFC 4861 5.3) */
1041 if (ln->ln_expire != 0) {
1042 RT_ADDREF_LOCKED(rt);
1043 RT_UNLOCK(rt);
1044 lck_mtx_unlock(rnh_lock);
1045 nd6_free(rt);
1046 ap->killed++;
1047 lck_mtx_lock(rnh_lock);
1048 rtfree_locked(rt);
1049 goto again;
1050 } else {
1051 RT_UNLOCK(rt);
1052 }
1053 break;
1054
1055 case ND6_LLINFO_DELAY:
1056 if ((flags & ND6_IFF_PERFORMNUD) != 0) {
1057 /* We need NUD */
1058 ln->ln_asked = 1;
1059 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PROBE);
1060 ln_setexpire(ln, timenow + retrans / 1000);
1061 RT_ADDREF_LOCKED(rt);
1062 RT_UNLOCK(rt);
1063 lck_mtx_unlock(rnh_lock);
1064 nd6_ns_output(ifp, &dst->sin6_addr,
1065 &dst->sin6_addr, ln, NULL);
1066 RT_REMREF(rt);
1067 ap->aging++;
1068 lck_mtx_lock(rnh_lock);
1069 goto again;
1070 }
1071 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); /* XXX */
1072 ln_setexpire(ln, timenow + nd6_gctimer);
1073 RT_UNLOCK(rt);
1074 ap->aging_lazy++;
1075 break;
1076
1077 case ND6_LLINFO_PROBE:
1078 if (ln->ln_asked < nd6_umaxtries) {
1079 ln->ln_asked++;
1080 ln_setexpire(ln, timenow + retrans / 1000);
1081 RT_ADDREF_LOCKED(rt);
1082 RT_UNLOCK(rt);
1083 lck_mtx_unlock(rnh_lock);
1084 nd6_ns_output(ifp, &dst->sin6_addr,
1085 &dst->sin6_addr, ln, NULL);
1086 RT_REMREF(rt);
1087 ap->aging++;
1088 lck_mtx_lock(rnh_lock);
1089 } else {
1090 is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
1091 send_nc_failure_kev = is_router;
1092 RT_ADDREF_LOCKED(rt);
1093 RT_UNLOCK(rt);
1094 lck_mtx_unlock(rnh_lock);
1095 nd6_free(rt);
1096 ap->killed++;
1097
1098 /*
1099 * Enqueue work item to invoke callback for
1100 * this route entry
1101 */
1102 route_event_enqueue_nwk_wq_entry(rt, NULL,
1103 ROUTE_LLENTRY_UNREACH, NULL, FALSE);
1104
1105 lck_mtx_lock(rnh_lock);
1106 /*
1107 * nd6_free above would flush out the routing table of
1108 * any cloned routes with same next-hop.
1109 * Walk the tree anyways as there could be static routes
1110 * left.
1111 *
1112 * We also already have a reference to rt that gets freed right
1113 * after the block below executes. Don't need an extra reference
1114 * on rt here.
1115 */
1116 if (is_router) {
1117 struct route_event rt_ev;
1118 route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_UNREACH);
1119 (void) rnh->rnh_walktree(rnh,
1120 route_event_walktree, (void *)&rt_ev);
1121 }
1122 rtfree_locked(rt);
1123 }
1124 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1125 goto again;
1126
1127 default:
1128 RT_UNLOCK(rt);
1129 break;
1130 }
1131 ln = next;
1132 }
1133 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1134
1135 /* Now clear the flag from all entries */
1136 ln = llinfo_nd6.ln_next;
1137 while (ln != NULL && ln != &llinfo_nd6) {
1138 struct rtentry *rt = ln->ln_rt;
1139 struct llinfo_nd6 *next = ln->ln_next;
1140
1141 RT_LOCK_SPIN(rt);
1142 if (ln->ln_flags & ND6_LNF_TIMER_SKIP)
1143 ln->ln_flags &= ~ND6_LNF_TIMER_SKIP;
1144 RT_UNLOCK(rt);
1145 ln = next;
1146 }
1147 lck_mtx_unlock(rnh_lock);
1148
1149 /* expire default router list */
1150 TAILQ_INIT(&nd_defrouter_tmp);
1151
1152 lck_mtx_lock(nd6_mutex);
1153 TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) {
1154 ap->found++;
1155 if (dr->expire != 0 && dr->expire < timenow) {
1156 VERIFY(dr->ifp != NULL);
1157 in6_ifstat_inc(dr->ifp, ifs6_defrtr_expiry_cnt);
1158 in6_event_enqueue_nwk_wq_entry(IN6_NDP_RTR_EXPIRY, dr->ifp,
1159 &dr->rtaddr, dr->rtlifetime);
1160 if (dr->ifp != NULL &&
1161 dr->ifp->if_type == IFT_CELLULAR) {
1162 /*
1163 * Some buggy cellular gateways may not send
1164 * periodic router advertisements.
1165 * Or they may send it with router lifetime
1166 * value that is less than the configured Max and Min
1167 * Router Advertisement interval.
1168 * To top that an idle device may not wake up
1169 * when periodic RA is received on cellular
1170 * interface.
1171 * We could send RS on every wake but RFC
1172 * 4861 precludes that.
1173 * The addresses are of infinite lifetimes
1174 * and are tied to the lifetime of the bearer,
1175 * so keeping the addresses and just getting rid of
1176 * the router does not help us anyways.
1177 * If there's network renumbering, a lifetime with
1178 * value 0 would remove the default router.
1179 * Also it will get deleted as part of purge when
1180 * the PDP context is torn down and configured again.
1181 * For that reason, do not expire the default router
1182 * learned on cellular interface. Ever.
1183 */
1184 dr->expire += dr->rtlifetime;
1185 nd6log2((LOG_DEBUG,
1186 "%s: Refreshing expired default router entry "
1187 "%s for interface %s\n", __func__,
1188 ip6_sprintf(&dr->rtaddr), if_name(dr->ifp)));
1189 } else {
1190 ap->killed++;
1191 /*
1192 * Remove the entry from default router list
1193 * and add it to the temp list.
1194 * nd_defrouter_tmp will be a local temporary
1195 * list as no one else can get the same
1196 * removed entry once it is removed from default
1197 * router list.
1198 * Remove the reference after calling defrtrlist_del
1199 */
1200 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
1201 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
1202 }
1203 } else {
1204 if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC))
1205 ap->sticky++;
1206 else
1207 ap->aging_lazy++;
1208 }
1209 }
1210
1211 /*
1212 * Keep the following separate from the above
1213 * iteration of nd_defrouter because it's not safe
1214 * to call defrtrlist_del while iterating global default
1215 * router list. Global list has to be traversed
1216 * while holding nd6_mutex throughout.
1217 *
1218 * The following call to defrtrlist_del should be
1219 * safe as we are iterating a local list of
1220 * default routers.
1221 */
1222 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) {
1223 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
1224 defrtrlist_del(dr);
1225 NDDR_REMREF(dr); /* remove list reference */
1226 }
1227 lck_mtx_unlock(nd6_mutex);
1228
1229 /*
1230 * expire interface addresses.
1231 * in the past the loop was inside prefix expiry processing.
1232 * However, from a stricter speci-confrmance standpoint, we should
1233 * rather separate address lifetimes and prefix lifetimes.
1234 */
1235 addrloop:
1236 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
1237 for (ia6 = in6_ifaddrs; ia6; ia6 = nia6) {
1238 int oldflags = ia6->ia6_flags;
1239 ap->found++;
1240 nia6 = ia6->ia_next;
1241 IFA_LOCK(&ia6->ia_ifa);
1242 /*
1243 * Extra reference for ourselves; it's no-op if
1244 * we don't have to regenerate temporary address,
1245 * otherwise it protects the address from going
1246 * away since we drop in6_ifaddr_rwlock below.
1247 */
1248 IFA_ADDREF_LOCKED(&ia6->ia_ifa);
1249 /* check address lifetime */
1250 if (IFA6_IS_INVALID(ia6, timenow)) {
1251 /*
1252 * If the expiring address is temporary, try
1253 * regenerating a new one. This would be useful when
1254 * we suspended a laptop PC, then turned it on after a
1255 * period that could invalidate all temporary
1256 * addresses. Although we may have to restart the
1257 * loop (see below), it must be after purging the
1258 * address. Otherwise, we'd see an infinite loop of
1259 * regeneration.
1260 */
1261 if (ip6_use_tempaddr &&
1262 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
1263 /*
1264 * NOTE: We have to drop the lock here
1265 * because regen_tmpaddr() eventually calls
1266 * in6_update_ifa(), which must take the lock
1267 * and would otherwise cause a hang. This is
1268 * safe because the goto addrloop leads to a
1269 * re-evaluation of the in6_ifaddrs list
1270 */
1271 IFA_UNLOCK(&ia6->ia_ifa);
1272 lck_rw_done(&in6_ifaddr_rwlock);
1273 (void) regen_tmpaddr(ia6);
1274 } else {
1275 IFA_UNLOCK(&ia6->ia_ifa);
1276 lck_rw_done(&in6_ifaddr_rwlock);
1277 }
1278
1279 /*
1280 * Purging the address would have caused
1281 * in6_ifaddr_rwlock to be dropped and reacquired;
1282 * therefore search again from the beginning
1283 * of in6_ifaddrs list.
1284 */
1285 in6_purgeaddr(&ia6->ia_ifa);
1286 ap->killed++;
1287
1288 if ((ia6->ia6_flags & IN6_IFF_TEMPORARY) == 0) {
1289 in6_ifstat_inc(ia6->ia_ifa.ifa_ifp, ifs6_addr_expiry_cnt);
1290 in6_event_enqueue_nwk_wq_entry(IN6_NDP_ADDR_EXPIRY,
1291 ia6->ia_ifa.ifa_ifp, &ia6->ia_addr.sin6_addr,
1292 0);
1293 }
1294 /* Release extra reference taken above */
1295 IFA_REMREF(&ia6->ia_ifa);
1296 goto addrloop;
1297 }
1298 /*
1299 * The lazy timer runs every nd6_prune_lazy seconds with at
1300 * most "2 * nd6_prune_lazy - 1" leeway. We consider the worst
1301 * case here and make sure we schedule the regular timer if an
1302 * interface address is about to expire.
1303 */
1304 if (IFA6_IS_INVALID(ia6, timenow + 3 * nd6_prune_lazy))
1305 ap->aging++;
1306 else
1307 ap->aging_lazy++;
1308 IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa);
1309 if (IFA6_IS_DEPRECATED(ia6, timenow)) {
1310 ia6->ia6_flags |= IN6_IFF_DEPRECATED;
1311
1312 if((oldflags & IN6_IFF_DEPRECATED) == 0) {
1313 /*
1314 * Only enqueue the Deprecated event when the address just
1315 * becomes deprecated.
1316 * Keep it limited to the stable address as it is common for
1317 * older temporary addresses to get deprecated while we generate
1318 * new ones.
1319 */
1320 if ((ia6->ia6_flags & IN6_IFF_TEMPORARY) == 0) {
1321 in6_event_enqueue_nwk_wq_entry(IN6_ADDR_MARKED_DEPRECATED,
1322 ia6->ia_ifa.ifa_ifp, &ia6->ia_addr.sin6_addr,
1323 0);
1324 }
1325 }
1326 /*
1327 * If a temporary address has just become deprecated,
1328 * regenerate a new one if possible.
1329 */
1330 if (ip6_use_tempaddr &&
1331 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
1332 (oldflags & IN6_IFF_DEPRECATED) == 0) {
1333
1334 /* see NOTE above */
1335 IFA_UNLOCK(&ia6->ia_ifa);
1336 lck_rw_done(&in6_ifaddr_rwlock);
1337 if (regen_tmpaddr(ia6) == 0) {
1338 /*
1339 * A new temporary address is
1340 * generated.
1341 * XXX: this means the address chain
1342 * has changed while we are still in
1343 * the loop. Although the change
1344 * would not cause disaster (because
1345 * it's not a deletion, but an
1346 * addition,) we'd rather restart the
1347 * loop just for safety. Or does this
1348 * significantly reduce performance??
1349 */
1350 /* Release extra reference */
1351 IFA_REMREF(&ia6->ia_ifa);
1352 goto addrloop;
1353 }
1354 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
1355 } else {
1356 IFA_UNLOCK(&ia6->ia_ifa);
1357 }
1358 } else {
1359 /*
1360 * A new RA might have made a deprecated address
1361 * preferred.
1362 */
1363 ia6->ia6_flags &= ~IN6_IFF_DEPRECATED;
1364 IFA_UNLOCK(&ia6->ia_ifa);
1365 }
1366 LCK_RW_ASSERT(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
1367 /* Release extra reference taken above */
1368 IFA_REMREF(&ia6->ia_ifa);
1369 }
1370 lck_rw_done(&in6_ifaddr_rwlock);
1371
1372 lck_mtx_lock(nd6_mutex);
1373 /* expire prefix list */
1374 pr = nd_prefix.lh_first;
1375 while (pr != NULL) {
1376 ap->found++;
1377 /*
1378 * check prefix lifetime.
1379 * since pltime is just for autoconf, pltime processing for
1380 * prefix is not necessary.
1381 */
1382 NDPR_LOCK(pr);
1383 if (pr->ndpr_stateflags & NDPRF_PROCESSED_SERVICE ||
1384 pr->ndpr_stateflags & NDPRF_DEFUNCT) {
1385 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
1386 NDPR_UNLOCK(pr);
1387 pr = pr->ndpr_next;
1388 continue;
1389 }
1390 if (pr->ndpr_expire != 0 && pr->ndpr_expire < timenow) {
1391 /*
1392 * address expiration and prefix expiration are
1393 * separate. NEVER perform in6_purgeaddr here.
1394 */
1395 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
1396 NDPR_ADDREF_LOCKED(pr);
1397 prelist_remove(pr);
1398 NDPR_UNLOCK(pr);
1399
1400 in6_ifstat_inc(pr->ndpr_ifp, ifs6_pfx_expiry_cnt);
1401 in6_event_enqueue_nwk_wq_entry(IN6_NDP_PFX_EXPIRY,
1402 pr->ndpr_ifp, &pr->ndpr_prefix.sin6_addr,
1403 0);
1404 NDPR_REMREF(pr);
1405 pfxlist_onlink_check();
1406 pr = nd_prefix.lh_first;
1407 ap->killed++;
1408 } else {
1409 if (pr->ndpr_expire == 0 ||
1410 (pr->ndpr_stateflags & NDPRF_STATIC))
1411 ap->sticky++;
1412 else
1413 ap->aging_lazy++;
1414 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
1415 NDPR_UNLOCK(pr);
1416 pr = pr->ndpr_next;
1417 }
1418 }
1419 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
1420 NDPR_LOCK(pr);
1421 pr->ndpr_stateflags &= ~NDPRF_PROCESSED_SERVICE;
1422 NDPR_UNLOCK(pr);
1423 }
1424 lck_mtx_unlock(nd6_mutex);
1425
1426 lck_mtx_lock(rnh_lock);
1427 /* We're done; let others enter */
1428 nd6_service_busy = FALSE;
1429 if (nd6_service_waiters > 0) {
1430 nd6_service_waiters = 0;
1431 wakeup(nd6_service_wc);
1432 }
1433 }
1434
1435
1436 static int nd6_need_draining = 0;
1437
1438 void
1439 nd6_drain(void *arg)
1440 {
1441 #pragma unused(arg)
1442 nd6log2((LOG_DEBUG, "%s: draining ND6 entries\n", __func__));
1443
1444 lck_mtx_lock(rnh_lock);
1445 nd6_need_draining = 1;
1446 nd6_sched_timeout(NULL, NULL);
1447 lck_mtx_unlock(rnh_lock);
1448 }
1449
1450 /*
1451 * We use the ``arg'' variable to decide whether or not the timer we're
1452 * running is the fast timer. We do this to reset the nd6_fast_timer_on
1453 * variable so that later we don't end up ignoring a ``fast timer''
1454 * request if the 5 second timer is running (see nd6_sched_timeout).
1455 */
1456 static void
1457 nd6_timeout(void *arg)
1458 {
1459 struct nd6svc_arg sarg;
1460 uint32_t buf;
1461
1462 lck_mtx_lock(rnh_lock);
1463 bzero(&sarg, sizeof (sarg));
1464 if (nd6_need_draining != 0) {
1465 nd6_need_draining = 0;
1466 sarg.draining = 1;
1467 }
1468 nd6_service(&sarg);
1469 nd6log2((LOG_DEBUG, "%s: found %u, aging_lazy %u, aging %u, "
1470 "sticky %u, killed %u\n", __func__, sarg.found, sarg.aging_lazy,
1471 sarg.aging, sarg.sticky, sarg.killed));
1472 /* re-arm the timer if there's work to do */
1473 nd6_timeout_run--;
1474 VERIFY(nd6_timeout_run >= 0 && nd6_timeout_run < 2);
1475 if (arg == &nd6_fast_timer_on)
1476 nd6_fast_timer_on = FALSE;
1477 if (sarg.aging_lazy > 0 || sarg.aging > 0 || nd6_sched_timeout_want) {
1478 struct timeval atv, ltv, *leeway;
1479 int lazy = nd6_prune_lazy;
1480
1481 if (sarg.aging > 0 || lazy < 1) {
1482 atv.tv_usec = 0;
1483 atv.tv_sec = nd6_prune;
1484 leeway = NULL;
1485 } else {
1486 VERIFY(lazy >= 1);
1487 atv.tv_usec = 0;
1488 atv.tv_sec = MAX(nd6_prune, lazy);
1489 ltv.tv_usec = 0;
1490 read_frandom(&buf, sizeof(buf));
1491 ltv.tv_sec = MAX(buf % lazy, 1) * 2;
1492 leeway = &ltv;
1493 }
1494 nd6_sched_timeout(&atv, leeway);
1495 } else if (nd6_debug) {
1496 nd6log2((LOG_DEBUG, "%s: not rescheduling timer\n", __func__));
1497 }
1498 lck_mtx_unlock(rnh_lock);
1499 }
1500
1501 void
1502 nd6_sched_timeout(struct timeval *atv, struct timeval *ltv)
1503 {
1504 struct timeval tv;
1505
1506 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1507 if (atv == NULL) {
1508 tv.tv_usec = 0;
1509 tv.tv_sec = MAX(nd6_prune, 1);
1510 atv = &tv;
1511 ltv = NULL; /* ignore leeway */
1512 }
1513 /* see comments on top of this file */
1514 if (nd6_timeout_run == 0) {
1515 if (ltv == NULL) {
1516 nd6log2((LOG_DEBUG, "%s: timer scheduled in "
1517 "T+%llus.%lluu (demand %d)\n", __func__,
1518 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1519 nd6_sched_timeout_want));
1520 nd6_fast_timer_on = TRUE;
1521 timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
1522 } else {
1523 nd6log2((LOG_DEBUG, "%s: timer scheduled in "
1524 "T+%llus.%lluu with %llus.%lluu leeway "
1525 "(demand %d)\n", __func__, (uint64_t)atv->tv_sec,
1526 (uint64_t)atv->tv_usec, (uint64_t)ltv->tv_sec,
1527 (uint64_t)ltv->tv_usec, nd6_sched_timeout_want));
1528 nd6_fast_timer_on = FALSE;
1529 timeout_with_leeway(nd6_timeout, NULL,
1530 tvtohz(atv), tvtohz(ltv));
1531 }
1532 nd6_timeout_run++;
1533 nd6_sched_timeout_want = 0;
1534 } else if (nd6_timeout_run == 1 && ltv == NULL &&
1535 nd6_fast_timer_on == FALSE) {
1536 nd6log2((LOG_DEBUG, "%s: fast timer scheduled in "
1537 "T+%llus.%lluu (demand %d)\n", __func__,
1538 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1539 nd6_sched_timeout_want));
1540 nd6_fast_timer_on = TRUE;
1541 nd6_sched_timeout_want = 0;
1542 nd6_timeout_run++;
1543 timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
1544 } else {
1545 if (ltv == NULL) {
1546 nd6log2((LOG_DEBUG, "%s: not scheduling timer: "
1547 "timers %d, fast_timer %d, T+%llus.%lluu\n",
1548 __func__, nd6_timeout_run, nd6_fast_timer_on,
1549 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec));
1550 } else {
1551 nd6log2((LOG_DEBUG, "%s: not scheduling timer: "
1552 "timers %d, fast_timer %d, T+%llus.%lluu "
1553 "with %llus.%lluu leeway\n", __func__,
1554 nd6_timeout_run, nd6_fast_timer_on,
1555 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1556 (uint64_t)ltv->tv_sec, (uint64_t)ltv->tv_usec));
1557 }
1558 }
1559 }
1560
1561 /*
1562 * ND6 router advertisement kernel notification
1563 */
1564 void
1565 nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list,
1566 u_int32_t list_length, u_int32_t mtu)
1567 {
1568 struct kev_msg ev_msg;
1569 struct kev_nd6_ra_data nd6_ra_msg_data;
1570 struct nd_prefix_list *itr = prefix_list;
1571
1572 bzero(&ev_msg, sizeof (struct kev_msg));
1573 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1574 ev_msg.kev_class = KEV_NETWORK_CLASS;
1575 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
1576 ev_msg.event_code = code;
1577
1578 bzero(&nd6_ra_msg_data, sizeof (nd6_ra_msg_data));
1579
1580 if (mtu > 0 && mtu >= IPV6_MMTU) {
1581 nd6_ra_msg_data.mtu = mtu;
1582 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_MTU;
1583 }
1584
1585 if (list_length > 0 && prefix_list != NULL) {
1586 nd6_ra_msg_data.list_length = list_length;
1587 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_PREFIX;
1588 }
1589
1590 while (itr != NULL && nd6_ra_msg_data.list_index < list_length) {
1591 bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix,
1592 sizeof (nd6_ra_msg_data.prefix.prefix));
1593 nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf;
1594 nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen;
1595 nd6_ra_msg_data.prefix.origin = PR_ORIG_RA;
1596 nd6_ra_msg_data.prefix.vltime = itr->pr.ndpr_vltime;
1597 nd6_ra_msg_data.prefix.pltime = itr->pr.ndpr_pltime;
1598 nd6_ra_msg_data.prefix.expire = ndpr_getexpire(&itr->pr);
1599 nd6_ra_msg_data.prefix.flags = itr->pr.ndpr_stateflags;
1600 nd6_ra_msg_data.prefix.refcnt = itr->pr.ndpr_addrcnt;
1601 nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index;
1602
1603 /* send the message up */
1604 ev_msg.dv[0].data_ptr = &nd6_ra_msg_data;
1605 ev_msg.dv[0].data_length = sizeof (nd6_ra_msg_data);
1606 ev_msg.dv[1].data_length = 0;
1607 dlil_post_complete_msg(NULL, &ev_msg);
1608
1609 /* clean up for the next prefix */
1610 bzero(&nd6_ra_msg_data.prefix, sizeof (nd6_ra_msg_data.prefix));
1611 itr = itr->next;
1612 nd6_ra_msg_data.list_index++;
1613 }
1614 }
1615
1616 /*
1617 * Regenerate deprecated/invalidated temporary address
1618 */
1619 static int
1620 regen_tmpaddr(struct in6_ifaddr *ia6)
1621 {
1622 struct ifaddr *ifa;
1623 struct ifnet *ifp;
1624 struct in6_ifaddr *public_ifa6 = NULL;
1625 uint64_t timenow = net_uptime();
1626
1627 ifp = ia6->ia_ifa.ifa_ifp;
1628 ifnet_lock_shared(ifp);
1629 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
1630 struct in6_ifaddr *it6;
1631
1632 IFA_LOCK(ifa);
1633 if (ifa->ifa_addr->sa_family != AF_INET6) {
1634 IFA_UNLOCK(ifa);
1635 continue;
1636 }
1637 it6 = (struct in6_ifaddr *)ifa;
1638
1639 /* ignore no autoconf addresses. */
1640 if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
1641 IFA_UNLOCK(ifa);
1642 continue;
1643 }
1644 /* ignore autoconf addresses with different prefixes. */
1645 if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr) {
1646 IFA_UNLOCK(ifa);
1647 continue;
1648 }
1649 /*
1650 * Now we are looking at an autoconf address with the same
1651 * prefix as ours. If the address is temporary and is still
1652 * preferred, do not create another one. It would be rare, but
1653 * could happen, for example, when we resume a laptop PC after
1654 * a long period.
1655 */
1656 if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
1657 !IFA6_IS_DEPRECATED(it6, timenow)) {
1658 IFA_UNLOCK(ifa);
1659 if (public_ifa6 != NULL)
1660 IFA_REMREF(&public_ifa6->ia_ifa);
1661 public_ifa6 = NULL;
1662 break;
1663 }
1664
1665 /*
1666 * This is a public autoconf address that has the same prefix
1667 * as ours. If it is preferred, keep it. We can't break the
1668 * loop here, because there may be a still-preferred temporary
1669 * address with the prefix.
1670 */
1671 if (!IFA6_IS_DEPRECATED(it6, timenow)) {
1672 IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */
1673 IFA_UNLOCK(ifa);
1674 if (public_ifa6 != NULL)
1675 IFA_REMREF(&public_ifa6->ia_ifa);
1676 public_ifa6 = it6;
1677 } else {
1678 IFA_UNLOCK(ifa);
1679 }
1680 }
1681 ifnet_lock_done(ifp);
1682
1683 if (public_ifa6 != NULL) {
1684 int e;
1685
1686 if ((e = in6_tmpifadd(public_ifa6, 0)) != 0) {
1687 log(LOG_NOTICE, "regen_tmpaddr: failed to create a new"
1688 " tmp addr,errno=%d\n", e);
1689 IFA_REMREF(&public_ifa6->ia_ifa);
1690 return (-1);
1691 }
1692 IFA_REMREF(&public_ifa6->ia_ifa);
1693 return (0);
1694 }
1695
1696 return (-1);
1697 }
1698
1699 /*
1700 * Nuke neighbor cache/prefix/default router management table, right before
1701 * ifp goes away.
1702 */
1703 void
1704 nd6_purge(struct ifnet *ifp)
1705 {
1706 struct llinfo_nd6 *ln;
1707 struct nd_defrouter *dr, *ndr;
1708 struct nd_prefix *pr, *npr;
1709 boolean_t removed;
1710 struct nd_drhead nd_defrouter_tmp;
1711
1712 TAILQ_INIT(&nd_defrouter_tmp);
1713
1714 /* Nuke default router list entries toward ifp */
1715 lck_mtx_lock(nd6_mutex);
1716 TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) {
1717 if (dr->ifp != ifp)
1718 continue;
1719 /*
1720 * Remove the entry from default router list
1721 * and add it to the temp list.
1722 * nd_defrouter_tmp will be a local temporary
1723 * list as no one else can get the same
1724 * removed entry once it is removed from default
1725 * router list.
1726 * Remove the reference after calling defrtrlist_del.
1727 *
1728 * The uninstalled entries have to be iterated first
1729 * when we call defrtrlist_del.
1730 * This is to ensure that we don't end up calling
1731 * default router selection when there are other
1732 * uninstalled candidate default routers on
1733 * the interface.
1734 * If we don't respect that order, we may end
1735 * up missing out on some entries.
1736 *
1737 * For that reason, installed ones must be inserted
1738 * at the tail and uninstalled ones at the head
1739 */
1740 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
1741
1742 if (dr->stateflags & NDDRF_INSTALLED)
1743 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
1744 else
1745 TAILQ_INSERT_HEAD(&nd_defrouter_tmp, dr, dr_entry);
1746 }
1747
1748 /*
1749 * The following call to defrtrlist_del should be
1750 * safe as we are iterating a local list of
1751 * default routers.
1752 *
1753 * We don't really need nd6_mutex here but keeping
1754 * it as it is to avoid changing assertios held in
1755 * the functions in the call-path.
1756 */
1757 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) {
1758 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
1759 defrtrlist_del(dr);
1760 NDDR_REMREF(dr); /* remove list reference */
1761 }
1762
1763 /* Nuke prefix list entries toward ifp */
1764 removed = FALSE;
1765 for (pr = nd_prefix.lh_first; pr; pr = npr) {
1766 NDPR_LOCK(pr);
1767 npr = pr->ndpr_next;
1768 if (pr->ndpr_ifp == ifp &&
1769 !(pr->ndpr_stateflags & NDPRF_DEFUNCT)) {
1770 /*
1771 * Because if_detach() does *not* release prefixes
1772 * while purging addresses the reference count will
1773 * still be above zero. We therefore reset it to
1774 * make sure that the prefix really gets purged.
1775 */
1776 pr->ndpr_addrcnt = 0;
1777
1778 /*
1779 * Previously, pr->ndpr_addr is removed as well,
1780 * but I strongly believe we don't have to do it.
1781 * nd6_purge() is only called from in6_ifdetach(),
1782 * which removes all the associated interface addresses
1783 * by itself.
1784 * (jinmei@kame.net 20010129)
1785 */
1786 NDPR_ADDREF_LOCKED(pr);
1787 prelist_remove(pr);
1788 NDPR_UNLOCK(pr);
1789 NDPR_REMREF(pr);
1790 removed = TRUE;
1791 npr = nd_prefix.lh_first;
1792 } else {
1793 NDPR_UNLOCK(pr);
1794 }
1795 }
1796 if (removed)
1797 pfxlist_onlink_check();
1798 lck_mtx_unlock(nd6_mutex);
1799
1800 /* cancel default outgoing interface setting */
1801 if (nd6_defifindex == ifp->if_index) {
1802 nd6_setdefaultiface(0);
1803 }
1804
1805 /*
1806 * Perform default router selection even when we are a router,
1807 * if Scoped Routing is enabled.
1808 */
1809 lck_mtx_lock(nd6_mutex);
1810 /* refresh default router list */
1811 defrouter_select(ifp);
1812 lck_mtx_unlock(nd6_mutex);
1813
1814 /*
1815 * Nuke neighbor cache entries for the ifp.
1816 * Note that rt->rt_ifp may not be the same as ifp,
1817 * due to KAME goto ours hack. See RTM_RESOLVE case in
1818 * nd6_rtrequest(), and ip6_input().
1819 */
1820 again:
1821 lck_mtx_lock(rnh_lock);
1822 ln = llinfo_nd6.ln_next;
1823 while (ln != NULL && ln != &llinfo_nd6) {
1824 struct rtentry *rt;
1825 struct llinfo_nd6 *nln;
1826
1827 nln = ln->ln_next;
1828 rt = ln->ln_rt;
1829 RT_LOCK(rt);
1830 if (rt->rt_gateway != NULL &&
1831 rt->rt_gateway->sa_family == AF_LINK &&
1832 SDL(rt->rt_gateway)->sdl_index == ifp->if_index) {
1833 RT_ADDREF_LOCKED(rt);
1834 RT_UNLOCK(rt);
1835 lck_mtx_unlock(rnh_lock);
1836 /*
1837 * See comments on nd6_service() for reasons why
1838 * this loop is repeated; we bite the costs of
1839 * going thru the same llinfo_nd6 more than once
1840 * here, since this purge happens during detach,
1841 * and that unlike the timer case, it's possible
1842 * there's more than one purges happening at the
1843 * same time (thus a flag wouldn't buy anything).
1844 */
1845 nd6_free(rt);
1846 RT_REMREF(rt);
1847 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1848 goto again;
1849 } else {
1850 RT_UNLOCK(rt);
1851 }
1852 ln = nln;
1853 }
1854 lck_mtx_unlock(rnh_lock);
1855 }
1856
1857 /*
1858 * Upon success, the returned route will be locked and the caller is
1859 * responsible for releasing the reference and doing RT_UNLOCK(rt).
1860 * This routine does not require rnh_lock to be held by the caller,
1861 * although it needs to be indicated of such a case in order to call
1862 * the correct variant of the relevant routing routines.
1863 */
1864 struct rtentry *
1865 nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked)
1866 {
1867 struct rtentry *rt;
1868 struct sockaddr_in6 sin6;
1869 unsigned int ifscope;
1870
1871 bzero(&sin6, sizeof (sin6));
1872 sin6.sin6_len = sizeof (struct sockaddr_in6);
1873 sin6.sin6_family = AF_INET6;
1874 sin6.sin6_addr = *addr6;
1875
1876 ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
1877 if (rt_locked) {
1878 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1879 rt = rtalloc1_scoped_locked(SA(&sin6), create, 0, ifscope);
1880 } else {
1881 rt = rtalloc1_scoped(SA(&sin6), create, 0, ifscope);
1882 }
1883
1884 if (rt != NULL) {
1885 RT_LOCK(rt);
1886 if ((rt->rt_flags & RTF_LLINFO) == 0) {
1887 /*
1888 * This is the case for the default route.
1889 * If we want to create a neighbor cache for the
1890 * address, we should free the route for the
1891 * destination and allocate an interface route.
1892 */
1893 if (create) {
1894 RT_UNLOCK(rt);
1895 if (rt_locked)
1896 rtfree_locked(rt);
1897 else
1898 rtfree(rt);
1899 rt = NULL;
1900 }
1901 }
1902 }
1903 if (rt == NULL) {
1904 if (create && ifp) {
1905 struct ifaddr *ifa;
1906 u_int32_t ifa_flags;
1907 int e;
1908
1909 /*
1910 * If no route is available and create is set,
1911 * we allocate a host route for the destination
1912 * and treat it like an interface route.
1913 * This hack is necessary for a neighbor which can't
1914 * be covered by our own prefix.
1915 */
1916 ifa = ifaof_ifpforaddr(SA(&sin6), ifp);
1917 if (ifa == NULL)
1918 return (NULL);
1919
1920 /*
1921 * Create a new route. RTF_LLINFO is necessary
1922 * to create a Neighbor Cache entry for the
1923 * destination in nd6_rtrequest which will be
1924 * called in rtrequest via ifa->ifa_rtrequest.
1925 */
1926 if (!rt_locked)
1927 lck_mtx_lock(rnh_lock);
1928 IFA_LOCK_SPIN(ifa);
1929 ifa_flags = ifa->ifa_flags;
1930 IFA_UNLOCK(ifa);
1931 if ((e = rtrequest_scoped_locked(RTM_ADD,
1932 SA(&sin6), ifa->ifa_addr, SA(&all1_sa),
1933 (ifa_flags | RTF_HOST | RTF_LLINFO) &
1934 ~RTF_CLONING, &rt, ifscope)) != 0) {
1935 if (e != EEXIST)
1936 log(LOG_ERR, "%s: failed to add route "
1937 "for a neighbor(%s), errno=%d\n",
1938 __func__, ip6_sprintf(addr6), e);
1939 }
1940 if (!rt_locked)
1941 lck_mtx_unlock(rnh_lock);
1942 IFA_REMREF(ifa);
1943 if (rt == NULL)
1944 return (NULL);
1945
1946 RT_LOCK(rt);
1947 if (rt->rt_llinfo) {
1948 struct llinfo_nd6 *ln = rt->rt_llinfo;
1949 struct nd_ifinfo *ndi = ND_IFINFO(rt->rt_ifp);
1950
1951 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
1952 /*
1953 * For interface's that do not perform NUD
1954 * neighbor cache entres must always be marked
1955 * reachable with no expiry
1956 */
1957 if (ndi->flags & ND6_IFF_PERFORMNUD) {
1958 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_NOSTATE);
1959 } else {
1960 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
1961 ln_setexpire(ln, 0);
1962 }
1963 }
1964 } else {
1965 return (NULL);
1966 }
1967 }
1968 RT_LOCK_ASSERT_HELD(rt);
1969 /*
1970 * Validation for the entry.
1971 * Note that the check for rt_llinfo is necessary because a cloned
1972 * route from a parent route that has the L flag (e.g. the default
1973 * route to a p2p interface) may have the flag, too, while the
1974 * destination is not actually a neighbor.
1975 * XXX: we can't use rt->rt_ifp to check for the interface, since
1976 * it might be the loopback interface if the entry is for our
1977 * own address on a non-loopback interface. Instead, we should
1978 * use rt->rt_ifa->ifa_ifp, which would specify the REAL
1979 * interface.
1980 * Note also that ifa_ifp and ifp may differ when we connect two
1981 * interfaces to a same link, install a link prefix to an interface,
1982 * and try to install a neighbor cache on an interface that does not
1983 * have a route to the prefix.
1984 *
1985 * If the address is from a proxied prefix, the ifa_ifp and ifp might
1986 * not match, because nd6_na_input() could have modified the ifp
1987 * of the route to point to the interface where the NA arrived on,
1988 * hence the test for RTF_PROXY.
1989 */
1990 if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 ||
1991 rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL ||
1992 (ifp && rt->rt_ifa->ifa_ifp != ifp &&
1993 !(rt->rt_flags & RTF_PROXY))) {
1994 RT_REMREF_LOCKED(rt);
1995 RT_UNLOCK(rt);
1996 if (create) {
1997 log(LOG_DEBUG, "%s: failed to lookup %s "
1998 "(if = %s)\n", __func__, ip6_sprintf(addr6),
1999 ifp ? if_name(ifp) : "unspec");
2000 /* xxx more logs... kazu */
2001 }
2002 return (NULL);
2003 }
2004 /*
2005 * Caller needs to release reference and call RT_UNLOCK(rt).
2006 */
2007 return (rt);
2008 }
2009
2010 /*
2011 * Test whether a given IPv6 address is a neighbor or not, ignoring
2012 * the actual neighbor cache. The neighbor cache is ignored in order
2013 * to not reenter the routing code from within itself.
2014 */
2015 static int
2016 nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp)
2017 {
2018 struct nd_prefix *pr;
2019 struct ifaddr *dstaddr;
2020
2021 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2022
2023 /*
2024 * A link-local address is always a neighbor.
2025 * XXX: a link does not necessarily specify a single interface.
2026 */
2027 if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
2028 struct sockaddr_in6 sin6_copy;
2029 u_int32_t zone;
2030
2031 /*
2032 * We need sin6_copy since sa6_recoverscope() may modify the
2033 * content (XXX).
2034 */
2035 sin6_copy = *addr;
2036 if (sa6_recoverscope(&sin6_copy, FALSE))
2037 return (0); /* XXX: should be impossible */
2038 if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone))
2039 return (0);
2040 if (sin6_copy.sin6_scope_id == zone)
2041 return (1);
2042 else
2043 return (0);
2044 }
2045
2046 /*
2047 * If the address matches one of our addresses,
2048 * it should be a neighbor.
2049 * If the address matches one of our on-link prefixes, it should be a
2050 * neighbor.
2051 */
2052 for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
2053 NDPR_LOCK(pr);
2054 if (pr->ndpr_ifp != ifp) {
2055 NDPR_UNLOCK(pr);
2056 continue;
2057 }
2058 if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) {
2059 NDPR_UNLOCK(pr);
2060 continue;
2061 }
2062 if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
2063 &addr->sin6_addr, &pr->ndpr_mask)) {
2064 NDPR_UNLOCK(pr);
2065 return (1);
2066 }
2067 NDPR_UNLOCK(pr);
2068 }
2069
2070 /*
2071 * If the address is assigned on the node of the other side of
2072 * a p2p interface, the address should be a neighbor.
2073 */
2074 dstaddr = ifa_ifwithdstaddr(SA(addr));
2075 if (dstaddr != NULL) {
2076 if (dstaddr->ifa_ifp == ifp) {
2077 IFA_REMREF(dstaddr);
2078 return (1);
2079 }
2080 IFA_REMREF(dstaddr);
2081 dstaddr = NULL;
2082 }
2083
2084 return (0);
2085 }
2086
2087
2088 /*
2089 * Detect if a given IPv6 address identifies a neighbor on a given link.
2090 * XXX: should take care of the destination of a p2p link?
2091 */
2092 int
2093 nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp,
2094 int rt_locked)
2095 {
2096 struct rtentry *rt;
2097
2098 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED);
2099 lck_mtx_lock(nd6_mutex);
2100 if (nd6_is_new_addr_neighbor(addr, ifp)) {
2101 lck_mtx_unlock(nd6_mutex);
2102 return (1);
2103 }
2104 lck_mtx_unlock(nd6_mutex);
2105
2106 /*
2107 * Even if the address matches none of our addresses, it might be
2108 * in the neighbor cache.
2109 */
2110 if ((rt = nd6_lookup(&addr->sin6_addr, 0, ifp, rt_locked)) != NULL) {
2111 RT_LOCK_ASSERT_HELD(rt);
2112 RT_REMREF_LOCKED(rt);
2113 RT_UNLOCK(rt);
2114 return (1);
2115 }
2116
2117 return (0);
2118 }
2119
2120 /*
2121 * Free an nd6 llinfo entry.
2122 * Since the function would cause significant changes in the kernel, DO NOT
2123 * make it global, unless you have a strong reason for the change, and are sure
2124 * that the change is safe.
2125 */
2126 void
2127 nd6_free(struct rtentry *rt)
2128 {
2129 struct llinfo_nd6 *ln;
2130 struct in6_addr in6;
2131 struct nd_defrouter *dr;
2132
2133 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2134 RT_LOCK_ASSERT_NOTHELD(rt);
2135 lck_mtx_lock(nd6_mutex);
2136
2137 RT_LOCK(rt);
2138 RT_ADDREF_LOCKED(rt); /* Extra ref */
2139 ln = rt->rt_llinfo;
2140 in6 = SIN6(rt_key(rt))->sin6_addr;
2141
2142 /*
2143 * Prevent another thread from modifying rt_key, rt_gateway
2144 * via rt_setgate() after the rt_lock is dropped by marking
2145 * the route as defunct.
2146 */
2147 rt->rt_flags |= RTF_CONDEMNED;
2148
2149 /*
2150 * We used to have pfctlinput(PRC_HOSTDEAD) here. Even though it is
2151 * not harmful, it was not really necessary. Perform default router
2152 * selection even when we are a router, if Scoped Routing is enabled.
2153 */
2154 dr = defrouter_lookup(&SIN6(rt_key(rt))->sin6_addr, rt->rt_ifp);
2155
2156 if ((ln && ln->ln_router) || dr) {
2157 /*
2158 * rt6_flush must be called whether or not the neighbor
2159 * is in the Default Router List.
2160 * See a corresponding comment in nd6_na_input().
2161 */
2162 RT_UNLOCK(rt);
2163 lck_mtx_unlock(nd6_mutex);
2164 rt6_flush(&in6, rt->rt_ifp);
2165 lck_mtx_lock(nd6_mutex);
2166 } else {
2167 RT_UNLOCK(rt);
2168 }
2169
2170 if (dr) {
2171 NDDR_REMREF(dr);
2172 /*
2173 * Unreachablity of a router might affect the default
2174 * router selection and on-link detection of advertised
2175 * prefixes.
2176 */
2177
2178 /*
2179 * Temporarily fake the state to choose a new default
2180 * router and to perform on-link determination of
2181 * prefixes correctly.
2182 * Below the state will be set correctly,
2183 * or the entry itself will be deleted.
2184 */
2185 RT_LOCK_SPIN(rt);
2186 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE);
2187
2188 /*
2189 * Since defrouter_select() does not affect the
2190 * on-link determination and MIP6 needs the check
2191 * before the default router selection, we perform
2192 * the check now.
2193 */
2194 RT_UNLOCK(rt);
2195 pfxlist_onlink_check();
2196
2197 /*
2198 * refresh default router list
2199 */
2200 defrouter_select(rt->rt_ifp);
2201 }
2202 RT_LOCK_ASSERT_NOTHELD(rt);
2203 lck_mtx_unlock(nd6_mutex);
2204 /*
2205 * Detach the route from the routing tree and the list of neighbor
2206 * caches, and disable the route entry not to be used in already
2207 * cached routes.
2208 */
2209 (void) rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 0, NULL);
2210
2211 /* Extra ref held above; now free it */
2212 rtfree(rt);
2213 }
2214
2215 void
2216 nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
2217 {
2218 #pragma unused(sa)
2219 struct sockaddr *gate = rt->rt_gateway;
2220 struct llinfo_nd6 *ln = rt->rt_llinfo;
2221 static struct sockaddr_dl null_sdl =
2222 { .sdl_len = sizeof (null_sdl), .sdl_family = AF_LINK };
2223 struct ifnet *ifp = rt->rt_ifp;
2224 struct ifaddr *ifa;
2225 uint64_t timenow;
2226 char buf[MAX_IPv6_STR_LEN];
2227 struct nd_ifinfo *ndi = ND_IFINFO(rt->rt_ifp);
2228
2229 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
2230 VERIFY(nd6_init_done);
2231 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
2232 RT_LOCK_ASSERT_HELD(rt);
2233
2234 /*
2235 * We have rnh_lock held, see if we need to schedule the timer;
2236 * we might do this again below during RTM_RESOLVE, but doing it
2237 * now handles all other cases.
2238 */
2239 if (nd6_sched_timeout_want)
2240 nd6_sched_timeout(NULL, NULL);
2241
2242 if (rt->rt_flags & RTF_GATEWAY)
2243 return;
2244
2245 if (!nd6_need_cache(ifp) && !(rt->rt_flags & RTF_HOST)) {
2246 /*
2247 * This is probably an interface direct route for a link
2248 * which does not need neighbor caches (e.g. fe80::%lo0/64).
2249 * We do not need special treatment below for such a route.
2250 * Moreover, the RTF_LLINFO flag which would be set below
2251 * would annoy the ndp(8) command.
2252 */
2253 return;
2254 }
2255
2256 if (req == RTM_RESOLVE) {
2257 int no_nd_cache;
2258
2259 if (!nd6_need_cache(ifp)) { /* stf case */
2260 no_nd_cache = 1;
2261 } else {
2262 struct sockaddr_in6 sin6;
2263
2264 rtkey_to_sa6(rt, &sin6);
2265 /*
2266 * nd6_is_addr_neighbor() may call nd6_lookup(),
2267 * therefore we drop rt_lock to avoid deadlock
2268 * during the lookup.
2269 */
2270 RT_ADDREF_LOCKED(rt);
2271 RT_UNLOCK(rt);
2272 no_nd_cache = !nd6_is_addr_neighbor(&sin6, ifp, 1);
2273 RT_LOCK(rt);
2274 RT_REMREF_LOCKED(rt);
2275 }
2276
2277 /*
2278 * FreeBSD and BSD/OS often make a cloned host route based
2279 * on a less-specific route (e.g. the default route).
2280 * If the less specific route does not have a "gateway"
2281 * (this is the case when the route just goes to a p2p or an
2282 * stf interface), we'll mistakenly make a neighbor cache for
2283 * the host route, and will see strange neighbor solicitation
2284 * for the corresponding destination. In order to avoid the
2285 * confusion, we check if the destination of the route is
2286 * a neighbor in terms of neighbor discovery, and stop the
2287 * process if not. Additionally, we remove the LLINFO flag
2288 * so that ndp(8) will not try to get the neighbor information
2289 * of the destination.
2290 */
2291 if (no_nd_cache) {
2292 rt->rt_flags &= ~RTF_LLINFO;
2293 return;
2294 }
2295 }
2296
2297 timenow = net_uptime();
2298
2299 switch (req) {
2300 case RTM_ADD:
2301 /*
2302 * There is no backward compatibility :)
2303 *
2304 * if ((rt->rt_flags & RTF_HOST) == 0 &&
2305 * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff)
2306 * rt->rt_flags |= RTF_CLONING;
2307 */
2308 if ((rt->rt_flags & RTF_CLONING) ||
2309 ((rt->rt_flags & RTF_LLINFO) && ln == NULL)) {
2310 /*
2311 * Case 1: This route should come from a route to
2312 * interface (RTF_CLONING case) or the route should be
2313 * treated as on-link but is currently not
2314 * (RTF_LLINFO && ln == NULL case).
2315 */
2316 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
2317 gate = rt->rt_gateway;
2318 SDL(gate)->sdl_type = ifp->if_type;
2319 SDL(gate)->sdl_index = ifp->if_index;
2320 /*
2321 * In case we're called before 1.0 sec.
2322 * has elapsed.
2323 */
2324 if (ln != NULL) {
2325 ln_setexpire(ln,
2326 (ifp->if_eflags & IFEF_IPV6_ND6ALT)
2327 ? 0 : MAX(timenow, 1));
2328 }
2329 }
2330 if (rt->rt_flags & RTF_CLONING)
2331 break;
2332 }
2333 /*
2334 * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here.
2335 * We don't do that here since llinfo is not ready yet.
2336 *
2337 * There are also couple of other things to be discussed:
2338 * - unsolicited NA code needs improvement beforehand
2339 * - RFC4861 says we MAY send multicast unsolicited NA
2340 * (7.2.6 paragraph 4), however, it also says that we
2341 * SHOULD provide a mechanism to prevent multicast NA storm.
2342 * we don't have anything like it right now.
2343 * note that the mechanism needs a mutual agreement
2344 * between proxies, which means that we need to implement
2345 * a new protocol, or a new kludge.
2346 * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA.
2347 * we need to check ip6forwarding before sending it.
2348 * (or should we allow proxy ND configuration only for
2349 * routers? there's no mention about proxy ND from hosts)
2350 */
2351 /* FALLTHROUGH */
2352 case RTM_RESOLVE:
2353 if (!(ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK))) {
2354 /*
2355 * Address resolution isn't necessary for a point to
2356 * point link, so we can skip this test for a p2p link.
2357 */
2358 if (gate->sa_family != AF_LINK ||
2359 gate->sa_len < sizeof (null_sdl)) {
2360 /* Don't complain in case of RTM_ADD */
2361 if (req == RTM_RESOLVE) {
2362 log(LOG_ERR, "%s: route to %s has bad "
2363 "gateway address (sa_family %u "
2364 "sa_len %u) on %s\n", __func__,
2365 inet_ntop(AF_INET6,
2366 &SIN6(rt_key(rt))->sin6_addr, buf,
2367 sizeof (buf)), gate->sa_family,
2368 gate->sa_len, if_name(ifp));
2369 }
2370 break;
2371 }
2372 SDL(gate)->sdl_type = ifp->if_type;
2373 SDL(gate)->sdl_index = ifp->if_index;
2374 }
2375 if (ln != NULL)
2376 break; /* This happens on a route change */
2377 /*
2378 * Case 2: This route may come from cloning, or a manual route
2379 * add with a LL address.
2380 */
2381 rt->rt_llinfo = ln = nd6_llinfo_alloc(M_WAITOK);
2382 if (ln == NULL)
2383 break;
2384
2385 nd6_allocated++;
2386 rt->rt_llinfo_get_ri = nd6_llinfo_get_ri;
2387 rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri;
2388 rt->rt_llinfo_purge = nd6_llinfo_purge;
2389 rt->rt_llinfo_free = nd6_llinfo_free;
2390 rt->rt_llinfo_refresh = nd6_llinfo_refresh;
2391 rt->rt_flags |= RTF_LLINFO;
2392 ln->ln_rt = rt;
2393 /* this is required for "ndp" command. - shin */
2394 /*
2395 * For interface's that do not perform NUD
2396 * neighbor cache entries must always be marked
2397 * reachable with no expiry
2398 */
2399 if ((req == RTM_ADD) ||
2400 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
2401 /*
2402 * gate should have some valid AF_LINK entry,
2403 * and ln->ln_expire should have some lifetime
2404 * which is specified by ndp command.
2405 */
2406 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2407 ln_setexpire(ln, 0);
2408 } else {
2409 /*
2410 * When req == RTM_RESOLVE, rt is created and
2411 * initialized in rtrequest(), so rt_expire is 0.
2412 */
2413 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_NOSTATE);
2414 /* In case we're called before 1.0 sec. has elapsed */
2415 ln_setexpire(ln, (ifp->if_eflags & IFEF_IPV6_ND6ALT) ?
2416 0 : MAX(timenow, 1));
2417 }
2418 LN_INSERTHEAD(ln);
2419 nd6_inuse++;
2420
2421 /* We have at least one entry; arm the timer if not already */
2422 nd6_sched_timeout(NULL, NULL);
2423
2424 /*
2425 * If we have too many cache entries, initiate immediate
2426 * purging for some "less recently used" entries. Note that
2427 * we cannot directly call nd6_free() here because it would
2428 * cause re-entering rtable related routines triggering an LOR
2429 * problem.
2430 */
2431 if (ip6_neighborgcthresh > 0 &&
2432 nd6_inuse >= ip6_neighborgcthresh) {
2433 int i;
2434
2435 for (i = 0; i < 10 && llinfo_nd6.ln_prev != ln; i++) {
2436 struct llinfo_nd6 *ln_end = llinfo_nd6.ln_prev;
2437 struct rtentry *rt_end = ln_end->ln_rt;
2438
2439 /* Move this entry to the head */
2440 RT_LOCK(rt_end);
2441 LN_DEQUEUE(ln_end);
2442 LN_INSERTHEAD(ln_end);
2443
2444 if (ln_end->ln_expire == 0) {
2445 RT_UNLOCK(rt_end);
2446 continue;
2447 }
2448 if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE)
2449 ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_STALE);
2450 else
2451 ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_PURGE);
2452 ln_setexpire(ln_end, timenow);
2453 RT_UNLOCK(rt_end);
2454 }
2455 }
2456
2457 /*
2458 * check if rt_key(rt) is one of my address assigned
2459 * to the interface.
2460 */
2461 ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp,
2462 &SIN6(rt_key(rt))->sin6_addr);
2463 if (ifa != NULL) {
2464 caddr_t macp = nd6_ifptomac(ifp);
2465 ln_setexpire(ln, 0);
2466 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2467 if (macp != NULL) {
2468 Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen);
2469 SDL(gate)->sdl_alen = ifp->if_addrlen;
2470 }
2471 if (nd6_useloopback) {
2472 if (rt->rt_ifp != lo_ifp) {
2473 /*
2474 * Purge any link-layer info caching.
2475 */
2476 if (rt->rt_llinfo_purge != NULL)
2477 rt->rt_llinfo_purge(rt);
2478
2479 /*
2480 * Adjust route ref count for the
2481 * interfaces.
2482 */
2483 if (rt->rt_if_ref_fn != NULL) {
2484 rt->rt_if_ref_fn(lo_ifp, 1);
2485 rt->rt_if_ref_fn(rt->rt_ifp,
2486 -1);
2487 }
2488 }
2489 rt->rt_ifp = lo_ifp;
2490 /*
2491 * If rmx_mtu is not locked, update it
2492 * to the MTU used by the new interface.
2493 */
2494 if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
2495 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
2496 /*
2497 * Make sure rt_ifa be equal to the ifaddr
2498 * corresponding to the address.
2499 * We need this because when we refer
2500 * rt_ifa->ia6_flags in ip6_input, we assume
2501 * that the rt_ifa points to the address instead
2502 * of the loopback address.
2503 */
2504 if (ifa != rt->rt_ifa) {
2505 rtsetifa(rt, ifa);
2506 }
2507 }
2508 IFA_REMREF(ifa);
2509 } else if (rt->rt_flags & RTF_ANNOUNCE) {
2510 ln_setexpire(ln, 0);
2511 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2512
2513 /* join solicited node multicast for proxy ND */
2514 if (ifp->if_flags & IFF_MULTICAST) {
2515 struct in6_addr llsol;
2516 struct in6_multi *in6m;
2517 int error;
2518
2519 llsol = SIN6(rt_key(rt))->sin6_addr;
2520 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
2521 llsol.s6_addr32[1] = 0;
2522 llsol.s6_addr32[2] = htonl(1);
2523 llsol.s6_addr8[12] = 0xff;
2524 if (in6_setscope(&llsol, ifp, NULL))
2525 break;
2526 error = in6_mc_join(ifp, &llsol,
2527 NULL, &in6m, 0);
2528 if (error) {
2529 nd6log((LOG_ERR, "%s: failed to join "
2530 "%s (errno=%d)\n", if_name(ifp),
2531 ip6_sprintf(&llsol), error));
2532 } else {
2533 IN6M_REMREF(in6m);
2534 }
2535 }
2536 }
2537 break;
2538
2539 case RTM_DELETE:
2540 if (ln == NULL)
2541 break;
2542 /* leave from solicited node multicast for proxy ND */
2543 if ((rt->rt_flags & RTF_ANNOUNCE) &&
2544 (ifp->if_flags & IFF_MULTICAST)) {
2545 struct in6_addr llsol;
2546 struct in6_multi *in6m;
2547
2548 llsol = SIN6(rt_key(rt))->sin6_addr;
2549 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
2550 llsol.s6_addr32[1] = 0;
2551 llsol.s6_addr32[2] = htonl(1);
2552 llsol.s6_addr8[12] = 0xff;
2553 if (in6_setscope(&llsol, ifp, NULL) == 0) {
2554 in6_multihead_lock_shared();
2555 IN6_LOOKUP_MULTI(&llsol, ifp, in6m);
2556 in6_multihead_lock_done();
2557 if (in6m != NULL) {
2558 in6_mc_leave(in6m, NULL);
2559 IN6M_REMREF(in6m);
2560 }
2561 }
2562 }
2563 nd6_inuse--;
2564 /*
2565 * Unchain it but defer the actual freeing until the route
2566 * itself is to be freed. rt->rt_llinfo still points to
2567 * llinfo_nd6, and likewise, ln->ln_rt stil points to this
2568 * route entry, except that RTF_LLINFO is now cleared.
2569 */
2570 if (ln->ln_flags & ND6_LNF_IN_USE)
2571 LN_DEQUEUE(ln);
2572
2573 /*
2574 * Purge any link-layer info caching.
2575 */
2576 if (rt->rt_llinfo_purge != NULL)
2577 rt->rt_llinfo_purge(rt);
2578
2579 rt->rt_flags &= ~RTF_LLINFO;
2580 if (ln->ln_hold != NULL) {
2581 m_freem_list(ln->ln_hold);
2582 ln->ln_hold = NULL;
2583 }
2584 }
2585 }
2586
2587 static int
2588 nd6_siocgdrlst(void *data, int data_is_64)
2589 {
2590 struct in6_drlist_32 *drl_32;
2591 struct nd_defrouter *dr;
2592 int i = 0;
2593
2594 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2595
2596 dr = TAILQ_FIRST(&nd_defrouter);
2597
2598 /* XXX Handle mapped defrouter entries */
2599 /* For 64-bit process */
2600 if (data_is_64) {
2601 struct in6_drlist_64 *drl_64;
2602
2603 drl_64 = _MALLOC(sizeof (*drl_64), M_TEMP, M_WAITOK|M_ZERO);
2604 if (drl_64 == NULL)
2605 return (ENOMEM);
2606
2607 /* preserve the interface name */
2608 bcopy(data, drl_64, sizeof (drl_64->ifname));
2609
2610 while (dr && i < DRLSTSIZ) {
2611 drl_64->defrouter[i].rtaddr = dr->rtaddr;
2612 if (IN6_IS_ADDR_LINKLOCAL(
2613 &drl_64->defrouter[i].rtaddr)) {
2614 /* XXX: need to this hack for KAME stack */
2615 drl_64->defrouter[i].rtaddr.s6_addr16[1] = 0;
2616 } else {
2617 log(LOG_ERR,
2618 "default router list contains a "
2619 "non-linklocal address(%s)\n",
2620 ip6_sprintf(&drl_64->defrouter[i].rtaddr));
2621 }
2622 drl_64->defrouter[i].flags = dr->flags;
2623 drl_64->defrouter[i].rtlifetime = dr->rtlifetime;
2624 drl_64->defrouter[i].expire = nddr_getexpire(dr);
2625 drl_64->defrouter[i].if_index = dr->ifp->if_index;
2626 i++;
2627 dr = TAILQ_NEXT(dr, dr_entry);
2628 }
2629 bcopy(drl_64, data, sizeof (*drl_64));
2630 _FREE(drl_64, M_TEMP);
2631 return (0);
2632 }
2633
2634 /* For 32-bit process */
2635 drl_32 = _MALLOC(sizeof (*drl_32), M_TEMP, M_WAITOK|M_ZERO);
2636 if (drl_32 == NULL)
2637 return (ENOMEM);
2638
2639 /* preserve the interface name */
2640 bcopy(data, drl_32, sizeof (drl_32->ifname));
2641
2642 while (dr != NULL && i < DRLSTSIZ) {
2643 drl_32->defrouter[i].rtaddr = dr->rtaddr;
2644 if (IN6_IS_ADDR_LINKLOCAL(&drl_32->defrouter[i].rtaddr)) {
2645 /* XXX: need to this hack for KAME stack */
2646 drl_32->defrouter[i].rtaddr.s6_addr16[1] = 0;
2647 } else {
2648 log(LOG_ERR,
2649 "default router list contains a "
2650 "non-linklocal address(%s)\n",
2651 ip6_sprintf(&drl_32->defrouter[i].rtaddr));
2652 }
2653 drl_32->defrouter[i].flags = dr->flags;
2654 drl_32->defrouter[i].rtlifetime = dr->rtlifetime;
2655 drl_32->defrouter[i].expire = nddr_getexpire(dr);
2656 drl_32->defrouter[i].if_index = dr->ifp->if_index;
2657 i++;
2658 dr = TAILQ_NEXT(dr, dr_entry);
2659 }
2660 bcopy(drl_32, data, sizeof (*drl_32));
2661 _FREE(drl_32, M_TEMP);
2662 return (0);
2663 }
2664
2665 /*
2666 * XXX meaning of fields, especialy "raflags", is very
2667 * differnet between RA prefix list and RR/static prefix list.
2668 * how about separating ioctls into two?
2669 */
2670 static int
2671 nd6_siocgprlst(void *data, int data_is_64)
2672 {
2673 struct in6_prlist_32 *prl_32;
2674 struct nd_prefix *pr;
2675 int i = 0;
2676
2677 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2678
2679 pr = nd_prefix.lh_first;
2680
2681 /* XXX Handle mapped defrouter entries */
2682 /* For 64-bit process */
2683 if (data_is_64) {
2684 struct in6_prlist_64 *prl_64;
2685
2686 prl_64 = _MALLOC(sizeof (*prl_64), M_TEMP, M_WAITOK|M_ZERO);
2687 if (prl_64 == NULL)
2688 return (ENOMEM);
2689
2690 /* preserve the interface name */
2691 bcopy(data, prl_64, sizeof (prl_64->ifname));
2692
2693 while (pr && i < PRLSTSIZ) {
2694 struct nd_pfxrouter *pfr;
2695 int j;
2696
2697 NDPR_LOCK(pr);
2698 (void) in6_embedscope(&prl_64->prefix[i].prefix,
2699 &pr->ndpr_prefix, NULL, NULL, NULL);
2700 prl_64->prefix[i].raflags = pr->ndpr_raf;
2701 prl_64->prefix[i].prefixlen = pr->ndpr_plen;
2702 prl_64->prefix[i].vltime = pr->ndpr_vltime;
2703 prl_64->prefix[i].pltime = pr->ndpr_pltime;
2704 prl_64->prefix[i].if_index = pr->ndpr_ifp->if_index;
2705 prl_64->prefix[i].expire = ndpr_getexpire(pr);
2706
2707 pfr = pr->ndpr_advrtrs.lh_first;
2708 j = 0;
2709 while (pfr) {
2710 if (j < DRLSTSIZ) {
2711 #define RTRADDR prl_64->prefix[i].advrtr[j]
2712 RTRADDR = pfr->router->rtaddr;
2713 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
2714 /* XXX: hack for KAME */
2715 RTRADDR.s6_addr16[1] = 0;
2716 } else {
2717 log(LOG_ERR,
2718 "a router(%s) advertises "
2719 "a prefix with "
2720 "non-link local address\n",
2721 ip6_sprintf(&RTRADDR));
2722 }
2723 #undef RTRADDR
2724 }
2725 j++;
2726 pfr = pfr->pfr_next;
2727 }
2728 prl_64->prefix[i].advrtrs = j;
2729 prl_64->prefix[i].origin = PR_ORIG_RA;
2730 NDPR_UNLOCK(pr);
2731
2732 i++;
2733 pr = pr->ndpr_next;
2734 }
2735 bcopy(prl_64, data, sizeof (*prl_64));
2736 _FREE(prl_64, M_TEMP);
2737 return (0);
2738 }
2739
2740 /* For 32-bit process */
2741 prl_32 = _MALLOC(sizeof (*prl_32), M_TEMP, M_WAITOK|M_ZERO);
2742 if (prl_32 == NULL)
2743 return (ENOMEM);
2744
2745 /* preserve the interface name */
2746 bcopy(data, prl_32, sizeof (prl_32->ifname));
2747
2748 while (pr && i < PRLSTSIZ) {
2749 struct nd_pfxrouter *pfr;
2750 int j;
2751
2752 NDPR_LOCK(pr);
2753 (void) in6_embedscope(&prl_32->prefix[i].prefix,
2754 &pr->ndpr_prefix, NULL, NULL, NULL);
2755 prl_32->prefix[i].raflags = pr->ndpr_raf;
2756 prl_32->prefix[i].prefixlen = pr->ndpr_plen;
2757 prl_32->prefix[i].vltime = pr->ndpr_vltime;
2758 prl_32->prefix[i].pltime = pr->ndpr_pltime;
2759 prl_32->prefix[i].if_index = pr->ndpr_ifp->if_index;
2760 prl_32->prefix[i].expire = ndpr_getexpire(pr);
2761
2762 pfr = pr->ndpr_advrtrs.lh_first;
2763 j = 0;
2764 while (pfr) {
2765 if (j < DRLSTSIZ) {
2766 #define RTRADDR prl_32->prefix[i].advrtr[j]
2767 RTRADDR = pfr->router->rtaddr;
2768 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
2769 /* XXX: hack for KAME */
2770 RTRADDR.s6_addr16[1] = 0;
2771 } else {
2772 log(LOG_ERR,
2773 "a router(%s) advertises "
2774 "a prefix with "
2775 "non-link local address\n",
2776 ip6_sprintf(&RTRADDR));
2777 }
2778 #undef RTRADDR
2779 }
2780 j++;
2781 pfr = pfr->pfr_next;
2782 }
2783 prl_32->prefix[i].advrtrs = j;
2784 prl_32->prefix[i].origin = PR_ORIG_RA;
2785 NDPR_UNLOCK(pr);
2786
2787 i++;
2788 pr = pr->ndpr_next;
2789 }
2790 bcopy(prl_32, data, sizeof (*prl_32));
2791 _FREE(prl_32, M_TEMP);
2792 return (0);
2793 }
2794
2795 int
2796 nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
2797 {
2798 struct nd_defrouter *dr;
2799 struct nd_prefix *pr;
2800 struct rtentry *rt;
2801 int error = 0;
2802
2803 VERIFY(ifp != NULL);
2804
2805 switch (cmd) {
2806 case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */
2807 case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */
2808 /*
2809 * obsolete API, use sysctl under net.inet6.icmp6
2810 */
2811 lck_mtx_lock(nd6_mutex);
2812 error = nd6_siocgdrlst(data, cmd == SIOCGDRLST_IN6_64);
2813 lck_mtx_unlock(nd6_mutex);
2814 break;
2815
2816 case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */
2817 case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */
2818 /*
2819 * obsolete API, use sysctl under net.inet6.icmp6
2820 */
2821 lck_mtx_lock(nd6_mutex);
2822 error = nd6_siocgprlst(data, cmd == SIOCGPRLST_IN6_64);
2823 lck_mtx_unlock(nd6_mutex);
2824 break;
2825
2826 case OSIOCGIFINFO_IN6: /* struct in6_ondireq */
2827 case SIOCGIFINFO_IN6: { /* struct in6_ondireq */
2828 u_int32_t linkmtu;
2829 struct in6_ondireq *ondi = (struct in6_ondireq *)(void *)data;
2830 struct nd_ifinfo *ndi;
2831 /*
2832 * SIOCGIFINFO_IN6 ioctl is encoded with in6_ondireq
2833 * instead of in6_ndireq, so we treat it as such.
2834 */
2835 ndi = ND_IFINFO(ifp);
2836 if ((NULL == ndi) || (FALSE == ndi->initialized)){
2837 error = EINVAL;
2838 break;
2839 }
2840 lck_mtx_lock(&ndi->lock);
2841 linkmtu = IN6_LINKMTU(ifp);
2842 bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof (linkmtu));
2843 bcopy(&ndi->maxmtu, &ondi->ndi.maxmtu,
2844 sizeof (u_int32_t));
2845 bcopy(&ndi->basereachable, &ondi->ndi.basereachable,
2846 sizeof (u_int32_t));
2847 bcopy(&ndi->reachable, &ondi->ndi.reachable,
2848 sizeof (u_int32_t));
2849 bcopy(&ndi->retrans, &ondi->ndi.retrans,
2850 sizeof (u_int32_t));
2851 bcopy(&ndi->flags, &ondi->ndi.flags,
2852 sizeof (u_int32_t));
2853 bcopy(&ndi->recalctm, &ondi->ndi.recalctm,
2854 sizeof (int));
2855 ondi->ndi.chlim = ndi->chlim;
2856 ondi->ndi.receivedra = 0;
2857 lck_mtx_unlock(&ndi->lock);
2858 break;
2859 }
2860
2861 case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */
2862 /*
2863 * XXX BSD has a bunch of checks here to ensure
2864 * that interface disabled flag is not reset if
2865 * link local address has failed DAD.
2866 * Investigate that part.
2867 */
2868 struct in6_ndireq *cndi = (struct in6_ndireq *)(void *)data;
2869 u_int32_t oflags, flags;
2870 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
2871
2872 /* XXX: almost all other fields of cndi->ndi is unused */
2873 if ((NULL == ndi) || !ndi->initialized) {
2874 error = EINVAL;
2875 break;
2876 }
2877
2878 lck_mtx_lock(&ndi->lock);
2879 oflags = ndi->flags;
2880 bcopy(&cndi->ndi.flags, &(ndi->flags), sizeof (flags));
2881 flags = ndi->flags;
2882 lck_mtx_unlock(&ndi->lock);
2883
2884 if (oflags == flags) {
2885 break;
2886 }
2887
2888 error = nd6_setifinfo(ifp, oflags, flags);
2889 break;
2890 }
2891
2892 case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */
2893 /* flush default router list */
2894 /*
2895 * xxx sumikawa: should not delete route if default
2896 * route equals to the top of default router list
2897 */
2898 lck_mtx_lock(nd6_mutex);
2899 defrouter_reset();
2900 defrouter_select(ifp);
2901 lck_mtx_unlock(nd6_mutex);
2902 /* xxx sumikawa: flush prefix list */
2903 break;
2904
2905 case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */
2906 /* flush all the prefix advertised by routers */
2907 struct nd_prefix *next = NULL;
2908
2909 lck_mtx_lock(nd6_mutex);
2910 for (pr = nd_prefix.lh_first; pr; pr = next) {
2911 struct in6_ifaddr *ia = NULL;
2912 bool iterate_pfxlist_again = false;
2913
2914 next = pr->ndpr_next;
2915
2916 NDPR_LOCK(pr);
2917 if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr)) {
2918 NDPR_UNLOCK(pr);
2919 continue; /* XXX */
2920 }
2921 if (ifp != lo_ifp && pr->ndpr_ifp != ifp) {
2922 NDPR_UNLOCK(pr);
2923 continue;
2924 }
2925 /* do we really have to remove addresses as well? */
2926 NDPR_ADDREF_LOCKED(pr);
2927 NDPR_UNLOCK(pr);
2928 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
2929 ia = in6_ifaddrs;
2930 while (ia != NULL) {
2931 IFA_LOCK(&ia->ia_ifa);
2932 if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
2933 IFA_UNLOCK(&ia->ia_ifa);
2934 ia = ia->ia_next;
2935 continue;
2936 }
2937
2938 if (ia->ia6_ndpr == pr) {
2939 IFA_ADDREF_LOCKED(&ia->ia_ifa);
2940 IFA_UNLOCK(&ia->ia_ifa);
2941 lck_rw_done(&in6_ifaddr_rwlock);
2942 lck_mtx_unlock(nd6_mutex);
2943 in6_purgeaddr(&ia->ia_ifa);
2944 IFA_REMREF(&ia->ia_ifa);
2945 lck_mtx_lock(nd6_mutex);
2946 lck_rw_lock_exclusive(
2947 &in6_ifaddr_rwlock);
2948 /*
2949 * Purging the address caused
2950 * in6_ifaddr_rwlock to be
2951 * dropped and
2952 * reacquired; therefore search again
2953 * from the beginning of in6_ifaddrs.
2954 * The same applies for the prefix list.
2955 */
2956 ia = in6_ifaddrs;
2957 iterate_pfxlist_again = true;
2958 continue;
2959 }
2960 IFA_UNLOCK(&ia->ia_ifa);
2961 ia = ia->ia_next;
2962 }
2963 lck_rw_done(&in6_ifaddr_rwlock);
2964 NDPR_LOCK(pr);
2965 prelist_remove(pr);
2966 NDPR_UNLOCK(pr);
2967 pfxlist_onlink_check();
2968 NDPR_REMREF(pr);
2969 if (iterate_pfxlist_again) {
2970 next = nd_prefix.lh_first;
2971 }
2972 }
2973 lck_mtx_unlock(nd6_mutex);
2974 break;
2975 }
2976
2977 case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */
2978 /* flush all the default routers */
2979 struct nd_defrouter *next;
2980 struct nd_drhead nd_defrouter_tmp;
2981
2982 TAILQ_INIT(&nd_defrouter_tmp);
2983 lck_mtx_lock(nd6_mutex);
2984 if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) {
2985 /*
2986 * The first entry of the list may be stored in
2987 * the routing table, so we'll delete it later.
2988 */
2989 for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) {
2990 next = TAILQ_NEXT(dr, dr_entry);
2991 if (ifp == lo_ifp || dr->ifp == ifp) {
2992 /*
2993 * Remove the entry from default router list
2994 * and add it to the temp list.
2995 * nd_defrouter_tmp will be a local temporary
2996 * list as no one else can get the same
2997 * removed entry once it is removed from default
2998 * router list.
2999 * Remove the reference after calling defrtrlist_de
3000 */
3001 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
3002 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
3003 }
3004 }
3005
3006 dr = TAILQ_FIRST(&nd_defrouter);
3007 if (ifp == lo_ifp ||
3008 dr->ifp == ifp) {
3009 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
3010 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
3011 }
3012 }
3013
3014 /*
3015 * Keep the following separate from the above iteration of
3016 * nd_defrouter because it's not safe to call
3017 * defrtrlist_del while iterating global default
3018 * router list. Global list has to be traversed
3019 * while holding nd6_mutex throughout.
3020 *
3021 * The following call to defrtrlist_del should be
3022 * safe as we are iterating a local list of
3023 * default routers.
3024 */
3025 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, next) {
3026 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
3027 defrtrlist_del(dr);
3028 NDDR_REMREF(dr); /* remove list reference */
3029 }
3030 lck_mtx_unlock(nd6_mutex);
3031 break;
3032 }
3033
3034 case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */
3035 struct llinfo_nd6 *ln;
3036 struct in6_nbrinfo_32 nbi_32;
3037 struct in6_addr nb_addr; /* make local for safety */
3038
3039 bcopy(data, &nbi_32, sizeof (nbi_32));
3040 nb_addr = nbi_32.addr;
3041 /*
3042 * XXX: KAME specific hack for scoped addresses
3043 * XXXX: for other scopes than link-local?
3044 */
3045 if (IN6_IS_ADDR_LINKLOCAL(&nbi_32.addr) ||
3046 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_32.addr)) {
3047 u_int16_t *idp =
3048 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
3049
3050 if (*idp == 0)
3051 *idp = htons(ifp->if_index);
3052 }
3053
3054 /* Callee returns a locked route upon success */
3055 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
3056 error = EINVAL;
3057 break;
3058 }
3059 RT_LOCK_ASSERT_HELD(rt);
3060 ln = rt->rt_llinfo;
3061 nbi_32.state = ln->ln_state;
3062 nbi_32.asked = ln->ln_asked;
3063 nbi_32.isrouter = ln->ln_router;
3064 nbi_32.expire = ln_getexpire(ln);
3065 RT_REMREF_LOCKED(rt);
3066 RT_UNLOCK(rt);
3067 bcopy(&nbi_32, data, sizeof (nbi_32));
3068 break;
3069 }
3070
3071 case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */
3072 struct llinfo_nd6 *ln;
3073 struct in6_nbrinfo_64 nbi_64;
3074 struct in6_addr nb_addr; /* make local for safety */
3075
3076 bcopy(data, &nbi_64, sizeof (nbi_64));
3077 nb_addr = nbi_64.addr;
3078 /*
3079 * XXX: KAME specific hack for scoped addresses
3080 * XXXX: for other scopes than link-local?
3081 */
3082 if (IN6_IS_ADDR_LINKLOCAL(&nbi_64.addr) ||
3083 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_64.addr)) {
3084 u_int16_t *idp =
3085 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
3086
3087 if (*idp == 0)
3088 *idp = htons(ifp->if_index);
3089 }
3090
3091 /* Callee returns a locked route upon success */
3092 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
3093 error = EINVAL;
3094 break;
3095 }
3096 RT_LOCK_ASSERT_HELD(rt);
3097 ln = rt->rt_llinfo;
3098 nbi_64.state = ln->ln_state;
3099 nbi_64.asked = ln->ln_asked;
3100 nbi_64.isrouter = ln->ln_router;
3101 nbi_64.expire = ln_getexpire(ln);
3102 RT_REMREF_LOCKED(rt);
3103 RT_UNLOCK(rt);
3104 bcopy(&nbi_64, data, sizeof (nbi_64));
3105 break;
3106 }
3107
3108 case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
3109 case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
3110 struct in6_ndifreq_64 *ndif_64 =
3111 (struct in6_ndifreq_64 *)(void *)data;
3112 struct in6_ndifreq_32 *ndif_32 =
3113 (struct in6_ndifreq_32 *)(void *)data;
3114
3115 if (cmd == SIOCGDEFIFACE_IN6_64) {
3116 u_int64_t j = nd6_defifindex;
3117 bcopy(&j, &ndif_64->ifindex, sizeof (j));
3118 } else {
3119 bcopy(&nd6_defifindex, &ndif_32->ifindex,
3120 sizeof (u_int32_t));
3121 }
3122 break;
3123 }
3124
3125 case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
3126 case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
3127 struct in6_ndifreq_64 *ndif_64 =
3128 (struct in6_ndifreq_64 *)(void *)data;
3129 struct in6_ndifreq_32 *ndif_32 =
3130 (struct in6_ndifreq_32 *)(void *)data;
3131 u_int32_t idx;
3132
3133 if (cmd == SIOCSDEFIFACE_IN6_64) {
3134 u_int64_t j;
3135 bcopy(&ndif_64->ifindex, &j, sizeof (j));
3136 idx = (u_int32_t)j;
3137 } else {
3138 bcopy(&ndif_32->ifindex, &idx, sizeof (idx));
3139 }
3140
3141 error = nd6_setdefaultiface(idx);
3142 return (error);
3143 /* NOTREACHED */
3144 }
3145 case SIOCGIFCGAPREP_IN6:
3146 case SIOCSIFCGAPREP_IN6:
3147 {
3148 struct in6_cgareq *p_cgareq =
3149 (struct in6_cgareq *)(void *)data;
3150 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
3151
3152 struct in6_cga_modifier *req_cga_mod =
3153 &(p_cgareq->cgar_cgaprep.cga_modifier);
3154 struct in6_cga_modifier *ndi_cga_mod = NULL;
3155
3156 if ((NULL == ndi) || !ndi->initialized) {
3157 error = EINVAL;
3158 break;
3159 }
3160
3161 lck_mtx_lock(&ndi->lock);
3162 ndi_cga_mod = &(ndi->local_cga_modifier);
3163
3164 if (cmd == SIOCSIFCGAPREP_IN6) {
3165 bcopy(req_cga_mod, ndi_cga_mod, sizeof(*ndi_cga_mod));
3166 ndi->cga_initialized = TRUE;
3167 } else
3168 bcopy(ndi_cga_mod, req_cga_mod, sizeof(*req_cga_mod));
3169
3170 lck_mtx_unlock(&ndi->lock);
3171 return (error);
3172 /* NOTREACHED */
3173 }
3174 }
3175 return (error);
3176 }
3177
3178 /*
3179 * Create neighbor cache entry and cache link-layer address,
3180 * on reception of inbound ND6 packets. (RS/RA/NS/redirect)
3181 */
3182 void
3183 nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr,
3184 int lladdrlen, int type, int code)
3185 {
3186 #pragma unused(lladdrlen)
3187 struct rtentry *rt = NULL;
3188 struct llinfo_nd6 *ln = NULL;
3189 int is_newentry;
3190 struct sockaddr_dl *sdl = NULL;
3191 int do_update;
3192 int olladdr;
3193 int llchange;
3194 int newstate = 0;
3195 uint64_t timenow;
3196 boolean_t sched_timeout = FALSE;
3197 struct nd_ifinfo *ndi = NULL;
3198
3199 if (ifp == NULL)
3200 panic("ifp == NULL in nd6_cache_lladdr");
3201 if (from == NULL)
3202 panic("from == NULL in nd6_cache_lladdr");
3203
3204 /* nothing must be updated for unspecified address */
3205 if (IN6_IS_ADDR_UNSPECIFIED(from))
3206 return;
3207
3208 /*
3209 * Validation about ifp->if_addrlen and lladdrlen must be done in
3210 * the caller.
3211 */
3212 timenow = net_uptime();
3213
3214 rt = nd6_lookup(from, 0, ifp, 0);
3215 if (rt == NULL) {
3216 if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL)
3217 return;
3218 RT_LOCK_ASSERT_HELD(rt);
3219 is_newentry = 1;
3220 } else {
3221 RT_LOCK_ASSERT_HELD(rt);
3222 /* do nothing if static ndp is set */
3223 if (rt->rt_flags & RTF_STATIC) {
3224 RT_REMREF_LOCKED(rt);
3225 RT_UNLOCK(rt);
3226 return;
3227 }
3228 is_newentry = 0;
3229 }
3230
3231 if (rt == NULL)
3232 return;
3233 if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) {
3234 fail:
3235 RT_UNLOCK(rt);
3236 nd6_free(rt);
3237 rtfree(rt);
3238 return;
3239 }
3240 ln = (struct llinfo_nd6 *)rt->rt_llinfo;
3241 if (ln == NULL)
3242 goto fail;
3243 if (rt->rt_gateway == NULL)
3244 goto fail;
3245 if (rt->rt_gateway->sa_family != AF_LINK)
3246 goto fail;
3247 sdl = SDL(rt->rt_gateway);
3248
3249 olladdr = (sdl->sdl_alen) ? 1 : 0;
3250 if (olladdr && lladdr) {
3251 if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen))
3252 llchange = 1;
3253 else
3254 llchange = 0;
3255 } else
3256 llchange = 0;
3257
3258 /*
3259 * newentry olladdr lladdr llchange (*=record)
3260 * 0 n n -- (1)
3261 * 0 y n -- (2)
3262 * 0 n y -- (3) * STALE
3263 * 0 y y n (4) *
3264 * 0 y y y (5) * STALE
3265 * 1 -- n -- (6) NOSTATE(= PASSIVE)
3266 * 1 -- y -- (7) * STALE
3267 */
3268
3269 if (lladdr != NULL) { /* (3-5) and (7) */
3270 /*
3271 * Record source link-layer address
3272 * XXX is it dependent to ifp->if_type?
3273 */
3274 sdl->sdl_alen = ifp->if_addrlen;
3275 bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen);
3276
3277 /* cache the gateway (sender HW) address */
3278 nd6_llreach_alloc(rt, ifp, LLADDR(sdl), sdl->sdl_alen, FALSE);
3279 }
3280
3281 if (is_newentry == 0) {
3282 if ((!olladdr && lladdr != NULL) || /* (3) */
3283 (olladdr && lladdr != NULL && llchange)) { /* (5) */
3284 do_update = 1;
3285 newstate = ND6_LLINFO_STALE;
3286 } else /* (1-2,4) */
3287 do_update = 0;
3288 } else {
3289 do_update = 1;
3290 if (lladdr == NULL) /* (6) */
3291 newstate = ND6_LLINFO_NOSTATE;
3292 else /* (7) */
3293 newstate = ND6_LLINFO_STALE;
3294 }
3295
3296 /*
3297 * For interface's that do not perform NUD
3298 * neighbor cache entres must always be marked
3299 * reachable with no expiry
3300 */
3301 ndi = ND_IFINFO(ifp);
3302 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
3303
3304 if (ndi && !(ndi->flags & ND6_IFF_PERFORMNUD)) {
3305 newstate = ND6_LLINFO_REACHABLE;
3306 ln_setexpire(ln, 0);
3307 }
3308
3309 if (do_update) {
3310 /*
3311 * Update the state of the neighbor cache.
3312 */
3313 ND6_CACHE_STATE_TRANSITION(ln, newstate);
3314
3315 if ((ln->ln_state == ND6_LLINFO_STALE) ||
3316 (ln->ln_state == ND6_LLINFO_REACHABLE)) {
3317 struct mbuf *m = ln->ln_hold;
3318 /*
3319 * XXX: since nd6_output() below will cause
3320 * state tansition to DELAY and reset the timer,
3321 * we must set the timer now, although it is actually
3322 * meaningless.
3323 */
3324 if (ln->ln_state == ND6_LLINFO_STALE)
3325 ln_setexpire(ln, timenow + nd6_gctimer);
3326
3327 ln->ln_hold = NULL;
3328 if (m != NULL) {
3329 struct sockaddr_in6 sin6;
3330
3331 rtkey_to_sa6(rt, &sin6);
3332 /*
3333 * we assume ifp is not a p2p here, so just
3334 * set the 2nd argument as the 1st one.
3335 */
3336 RT_UNLOCK(rt);
3337 nd6_output_list(ifp, ifp, m, &sin6, rt, NULL);
3338 RT_LOCK(rt);
3339 }
3340 } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
3341 /* probe right away */
3342 ln_setexpire(ln, timenow);
3343 sched_timeout = TRUE;
3344 }
3345 }
3346
3347 /*
3348 * ICMP6 type dependent behavior.
3349 *
3350 * NS: clear IsRouter if new entry
3351 * RS: clear IsRouter
3352 * RA: set IsRouter if there's lladdr
3353 * redir: clear IsRouter if new entry
3354 *
3355 * RA case, (1):
3356 * The spec says that we must set IsRouter in the following cases:
3357 * - If lladdr exist, set IsRouter. This means (1-5).
3358 * - If it is old entry (!newentry), set IsRouter. This means (7).
3359 * So, based on the spec, in (1-5) and (7) cases we must set IsRouter.
3360 * A quetion arises for (1) case. (1) case has no lladdr in the
3361 * neighbor cache, this is similar to (6).
3362 * This case is rare but we figured that we MUST NOT set IsRouter.
3363 *
3364 * newentry olladdr lladdr llchange NS RS RA redir
3365 * D R
3366 * 0 n n -- (1) c ? s
3367 * 0 y n -- (2) c s s
3368 * 0 n y -- (3) c s s
3369 * 0 y y n (4) c s s
3370 * 0 y y y (5) c s s
3371 * 1 -- n -- (6) c c c s
3372 * 1 -- y -- (7) c c s c s
3373 *
3374 * (c=clear s=set)
3375 */
3376 switch (type & 0xff) {
3377 case ND_NEIGHBOR_SOLICIT:
3378 /*
3379 * New entry must have is_router flag cleared.
3380 */
3381 if (is_newentry) /* (6-7) */
3382 ln->ln_router = 0;
3383 break;
3384 case ND_REDIRECT:
3385 /*
3386 * If the ICMP message is a Redirect to a better router, always
3387 * set the is_router flag. Otherwise, if the entry is newly
3388 * created, then clear the flag. [RFC 4861, sec 8.3]
3389 */
3390 if (code == ND_REDIRECT_ROUTER)
3391 ln->ln_router = 1;
3392 else if (is_newentry) /* (6-7) */
3393 ln->ln_router = 0;
3394 break;
3395 case ND_ROUTER_SOLICIT:
3396 /*
3397 * is_router flag must always be cleared.
3398 */
3399 ln->ln_router = 0;
3400 break;
3401 case ND_ROUTER_ADVERT:
3402 /*
3403 * Mark an entry with lladdr as a router.
3404 */
3405 if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */
3406 (is_newentry && lladdr)) { /* (7) */
3407 ln->ln_router = 1;
3408 }
3409 break;
3410 }
3411
3412 if (do_update) {
3413 int route_ev_code = 0;
3414
3415 if (llchange)
3416 route_ev_code = ROUTE_LLENTRY_CHANGED;
3417 else
3418 route_ev_code = ROUTE_LLENTRY_RESOLVED;
3419
3420 /* Enqueue work item to invoke callback for this route entry */
3421 route_event_enqueue_nwk_wq_entry(rt, NULL, route_ev_code, NULL, TRUE);
3422
3423 if (ln->ln_router || (rt->rt_flags & RTF_ROUTER)) {
3424 struct radix_node_head *rnh = NULL;
3425 struct route_event rt_ev;
3426 route_event_init(&rt_ev, rt, NULL, llchange ? ROUTE_LLENTRY_CHANGED :
3427 ROUTE_LLENTRY_RESOLVED);
3428 /*
3429 * We already have a valid reference on rt.
3430 * The function frees that before returning.
3431 * We therefore don't need an extra reference here
3432 */
3433 RT_UNLOCK(rt);
3434 lck_mtx_lock(rnh_lock);
3435
3436 rnh = rt_tables[AF_INET6];
3437 if (rnh != NULL)
3438 (void) rnh->rnh_walktree(rnh, route_event_walktree,
3439 (void *)&rt_ev);
3440 lck_mtx_unlock(rnh_lock);
3441 RT_LOCK(rt);
3442 }
3443 }
3444
3445 /*
3446 * When the link-layer address of a router changes, select the
3447 * best router again. In particular, when the neighbor entry is newly
3448 * created, it might affect the selection policy.
3449 * Question: can we restrict the first condition to the "is_newentry"
3450 * case?
3451 *
3452 * Note: Perform default router selection even when we are a router,
3453 * if Scoped Routing is enabled.
3454 */
3455 if (do_update && ln->ln_router) {
3456 RT_REMREF_LOCKED(rt);
3457 RT_UNLOCK(rt);
3458 lck_mtx_lock(nd6_mutex);
3459 defrouter_select(ifp);
3460 lck_mtx_unlock(nd6_mutex);
3461 } else {
3462 RT_REMREF_LOCKED(rt);
3463 RT_UNLOCK(rt);
3464 }
3465 if (sched_timeout) {
3466 lck_mtx_lock(rnh_lock);
3467 nd6_sched_timeout(NULL, NULL);
3468 lck_mtx_unlock(rnh_lock);
3469 }
3470 }
3471
3472 static void
3473 nd6_slowtimo(void *arg)
3474 {
3475 #pragma unused(arg)
3476 struct nd_ifinfo *nd6if = NULL;
3477 struct ifnet *ifp = NULL;
3478
3479 ifnet_head_lock_shared();
3480 for (ifp = ifnet_head.tqh_first; ifp;
3481 ifp = ifp->if_link.tqe_next) {
3482 nd6if = ND_IFINFO(ifp);
3483 if ((NULL == nd6if) || (FALSE == nd6if->initialized)) {
3484 continue;
3485 }
3486
3487 lck_mtx_lock(&nd6if->lock);
3488 if (nd6if->basereachable && /* already initialized */
3489 (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
3490 /*
3491 * Since reachable time rarely changes by router
3492 * advertisements, we SHOULD insure that a new random
3493 * value gets recomputed at least once every few hours.
3494 * (RFC 4861, 6.3.4)
3495 */
3496 nd6if->recalctm = nd6_recalc_reachtm_interval;
3497 nd6if->reachable =
3498 ND_COMPUTE_RTIME(nd6if->basereachable);
3499 }
3500 lck_mtx_unlock(&nd6if->lock);
3501 }
3502 ifnet_head_done();
3503 timeout(nd6_slowtimo, NULL, ND6_SLOWTIMER_INTERVAL * hz);
3504 }
3505
3506 int
3507 nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
3508 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
3509 {
3510 return nd6_output_list(ifp, origifp, m0, dst, hint0, adv);
3511 }
3512
3513 /*
3514 * nd6_output_list()
3515 *
3516 * Assumption: route determination for first packet can be correctly applied to
3517 * all packets in the chain.
3518 */
3519 #define senderr(e) { error = (e); goto bad; }
3520 int
3521 nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
3522 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
3523 {
3524 struct rtentry *rt = hint0, *hint = hint0;
3525 struct llinfo_nd6 *ln = NULL;
3526 int error = 0;
3527 uint64_t timenow;
3528 struct rtentry *rtrele = NULL;
3529 struct nd_ifinfo *ndi = NULL;
3530
3531 if (rt != NULL) {
3532 RT_LOCK_SPIN(rt);
3533 RT_ADDREF_LOCKED(rt);
3534 }
3535
3536 if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr) || !nd6_need_cache(ifp)) {
3537 if (rt != NULL)
3538 RT_UNLOCK(rt);
3539 goto sendpkt;
3540 }
3541
3542 /*
3543 * Next hop determination. Because we may involve the gateway route
3544 * in addition to the original route, locking is rather complicated.
3545 * The general concept is that regardless of whether the route points
3546 * to the original route or to the gateway route, this routine takes
3547 * an extra reference on such a route. This extra reference will be
3548 * released at the end.
3549 *
3550 * Care must be taken to ensure that the "hint0" route never gets freed
3551 * via rtfree(), since the caller may have stored it inside a struct
3552 * route with a reference held for that placeholder.
3553 *
3554 * This logic is similar to, though not exactly the same as the one
3555 * used by route_to_gwroute().
3556 */
3557 if (rt != NULL) {
3558 /*
3559 * We have a reference to "rt" by now (or below via rtalloc1),
3560 * which will either be released or freed at the end of this
3561 * routine.
3562 */
3563 RT_LOCK_ASSERT_HELD(rt);
3564 if (!(rt->rt_flags & RTF_UP)) {
3565 RT_REMREF_LOCKED(rt);
3566 RT_UNLOCK(rt);
3567 if ((hint = rt = rtalloc1_scoped(SA(dst), 1, 0,
3568 ifp->if_index)) != NULL) {
3569 RT_LOCK_SPIN(rt);
3570 if (rt->rt_ifp != ifp) {
3571 /* XXX: loop care? */
3572 RT_UNLOCK(rt);
3573 error = nd6_output_list(ifp, origifp, m0,
3574 dst, rt, adv);
3575 rtfree(rt);
3576 return (error);
3577 }
3578 } else {
3579 senderr(EHOSTUNREACH);
3580 }
3581 }
3582
3583 if (rt->rt_flags & RTF_GATEWAY) {
3584 struct rtentry *gwrt;
3585 struct in6_ifaddr *ia6 = NULL;
3586 struct sockaddr_in6 gw6;
3587
3588 rtgw_to_sa6(rt, &gw6);
3589 /*
3590 * Must drop rt_lock since nd6_is_addr_neighbor()
3591 * calls nd6_lookup() and acquires rnh_lock.
3592 */
3593 RT_UNLOCK(rt);
3594
3595 /*
3596 * We skip link-layer address resolution and NUD
3597 * if the gateway is not a neighbor from ND point
3598 * of view, regardless of the value of nd_ifinfo.flags.
3599 * The second condition is a bit tricky; we skip
3600 * if the gateway is our own address, which is
3601 * sometimes used to install a route to a p2p link.
3602 */
3603 if (!nd6_is_addr_neighbor(&gw6, ifp, 0) ||
3604 (ia6 = in6ifa_ifpwithaddr(ifp, &gw6.sin6_addr))) {
3605 /*
3606 * We allow this kind of tricky route only
3607 * when the outgoing interface is p2p.
3608 * XXX: we may need a more generic rule here.
3609 */
3610 if (ia6 != NULL)
3611 IFA_REMREF(&ia6->ia_ifa);
3612 if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
3613 senderr(EHOSTUNREACH);
3614 goto sendpkt;
3615 }
3616
3617 RT_LOCK_SPIN(rt);
3618 gw6 = *(SIN6(rt->rt_gateway));
3619
3620 /* If hint is now down, give up */
3621 if (!(rt->rt_flags & RTF_UP)) {
3622 RT_UNLOCK(rt);
3623 senderr(EHOSTUNREACH);
3624 }
3625
3626 /* If there's no gateway route, look it up */
3627 if ((gwrt = rt->rt_gwroute) == NULL) {
3628 RT_UNLOCK(rt);
3629 goto lookup;
3630 }
3631 /* Become a regular mutex */
3632 RT_CONVERT_LOCK(rt);
3633
3634 /*
3635 * Take gwrt's lock while holding route's lock;
3636 * this is okay since gwrt never points back
3637 * to rt, so no lock ordering issues.
3638 */
3639 RT_LOCK_SPIN(gwrt);
3640 if (!(gwrt->rt_flags & RTF_UP)) {
3641 rt->rt_gwroute = NULL;
3642 RT_UNLOCK(gwrt);
3643 RT_UNLOCK(rt);
3644 rtfree(gwrt);
3645 lookup:
3646 lck_mtx_lock(rnh_lock);
3647 gwrt = rtalloc1_scoped_locked(SA(&gw6), 1, 0,
3648 ifp->if_index);
3649
3650 RT_LOCK(rt);
3651 /*
3652 * Bail out if the route is down, no route
3653 * to gateway, circular route, or if the
3654 * gateway portion of "rt" has changed.
3655 */
3656 if (!(rt->rt_flags & RTF_UP) ||
3657 gwrt == NULL || gwrt == rt ||
3658 !equal(SA(&gw6), rt->rt_gateway)) {
3659 if (gwrt == rt) {
3660 RT_REMREF_LOCKED(gwrt);
3661 gwrt = NULL;
3662 }
3663 RT_UNLOCK(rt);
3664 if (gwrt != NULL)
3665 rtfree_locked(gwrt);
3666 lck_mtx_unlock(rnh_lock);
3667 senderr(EHOSTUNREACH);
3668 }
3669 VERIFY(gwrt != NULL);
3670 /*
3671 * Set gateway route; callee adds ref to gwrt;
3672 * gwrt has an extra ref from rtalloc1() for
3673 * this routine.
3674 */
3675 rt_set_gwroute(rt, rt_key(rt), gwrt);
3676 RT_UNLOCK(rt);
3677 lck_mtx_unlock(rnh_lock);
3678 /* Remember to release/free "rt" at the end */
3679 rtrele = rt;
3680 rt = gwrt;
3681 } else {
3682 RT_ADDREF_LOCKED(gwrt);
3683 RT_UNLOCK(gwrt);
3684 RT_UNLOCK(rt);
3685 /* Remember to release/free "rt" at the end */
3686 rtrele = rt;
3687 rt = gwrt;
3688 }
3689 VERIFY(rt == gwrt);
3690
3691 /*
3692 * This is an opportunity to revalidate the parent
3693 * route's gwroute, in case it now points to a dead
3694 * route entry. Parent route won't go away since the
3695 * clone (hint) holds a reference to it. rt == gwrt.
3696 */
3697 RT_LOCK_SPIN(hint);
3698 if ((hint->rt_flags & (RTF_WASCLONED | RTF_UP)) ==
3699 (RTF_WASCLONED | RTF_UP)) {
3700 struct rtentry *prt = hint->rt_parent;
3701 VERIFY(prt != NULL);
3702
3703 RT_CONVERT_LOCK(hint);
3704 RT_ADDREF(prt);
3705 RT_UNLOCK(hint);
3706 rt_revalidate_gwroute(prt, rt);
3707 RT_REMREF(prt);
3708 } else {
3709 RT_UNLOCK(hint);
3710 }
3711
3712 RT_LOCK_SPIN(rt);
3713 /* rt == gwrt; if it is now down, give up */
3714 if (!(rt->rt_flags & RTF_UP)) {
3715 RT_UNLOCK(rt);
3716 rtfree(rt);
3717 rt = NULL;
3718 /* "rtrele" == original "rt" */
3719 senderr(EHOSTUNREACH);
3720 }
3721 }
3722
3723 /* Become a regular mutex */
3724 RT_CONVERT_LOCK(rt);
3725 }
3726
3727 /*
3728 * Address resolution or Neighbor Unreachability Detection
3729 * for the next hop.
3730 * At this point, the destination of the packet must be a unicast
3731 * or an anycast address(i.e. not a multicast).
3732 */
3733
3734 /* Look up the neighbor cache for the nexthop */
3735 if (rt && (rt->rt_flags & RTF_LLINFO) != 0) {
3736 ln = rt->rt_llinfo;
3737 } else {
3738 struct sockaddr_in6 sin6;
3739 /*
3740 * Clear out Scope ID field in case it is set.
3741 */
3742 sin6 = *dst;
3743 sin6.sin6_scope_id = 0;
3744 /*
3745 * Since nd6_is_addr_neighbor() internally calls nd6_lookup(),
3746 * the condition below is not very efficient. But we believe
3747 * it is tolerable, because this should be a rare case.
3748 * Must drop rt_lock since nd6_is_addr_neighbor() calls
3749 * nd6_lookup() and acquires rnh_lock.
3750 */
3751 if (rt != NULL)
3752 RT_UNLOCK(rt);
3753 if (nd6_is_addr_neighbor(&sin6, ifp, 0)) {
3754 /* "rtrele" may have been used, so clean up "rt" now */
3755 if (rt != NULL) {
3756 /* Don't free "hint0" */
3757 if (rt == hint0)
3758 RT_REMREF(rt);
3759 else
3760 rtfree(rt);
3761 }
3762 /* Callee returns a locked route upon success */
3763 rt = nd6_lookup(&dst->sin6_addr, 1, ifp, 0);
3764 if (rt != NULL) {
3765 RT_LOCK_ASSERT_HELD(rt);
3766 ln = rt->rt_llinfo;
3767 }
3768 } else if (rt != NULL) {
3769 RT_LOCK(rt);
3770 }
3771 }
3772
3773 if (!ln || !rt) {
3774 if (rt != NULL) {
3775 RT_UNLOCK(rt);
3776 }
3777 ndi = ND_IFINFO(ifp);
3778 VERIFY(ndi != NULL && ndi->initialized);
3779 lck_mtx_lock(&ndi->lock);
3780 if ((ifp->if_flags & IFF_POINTOPOINT) == 0 &&
3781 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
3782 lck_mtx_unlock(&ndi->lock);
3783 log(LOG_DEBUG,
3784 "nd6_output: can't allocate llinfo for %s "
3785 "(ln=0x%llx, rt=0x%llx)\n",
3786 ip6_sprintf(&dst->sin6_addr),
3787 (uint64_t)VM_KERNEL_ADDRPERM(ln),
3788 (uint64_t)VM_KERNEL_ADDRPERM(rt));
3789 senderr(EIO); /* XXX: good error? */
3790 }
3791 lck_mtx_unlock(&ndi->lock);
3792
3793 goto sendpkt; /* send anyway */
3794 }
3795
3796 net_update_uptime();
3797 timenow = net_uptime();
3798
3799 /* We don't have to do link-layer address resolution on a p2p link. */
3800 if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
3801 ln->ln_state < ND6_LLINFO_REACHABLE) {
3802 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
3803 ln_setexpire(ln, timenow + nd6_gctimer);
3804 }
3805
3806 /*
3807 * The first time we send a packet to a neighbor whose entry is
3808 * STALE, we have to change the state to DELAY and a sets a timer to
3809 * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do
3810 * neighbor unreachability detection on expiration.
3811 * (RFC 4861 7.3.3)
3812 */
3813 if (ln->ln_state == ND6_LLINFO_STALE) {
3814 ln->ln_asked = 0;
3815 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_DELAY);
3816 ln_setexpire(ln, timenow + nd6_delay);
3817 /* N.B.: we will re-arm the timer below. */
3818 _CASSERT(ND6_LLINFO_DELAY > ND6_LLINFO_INCOMPLETE);
3819 }
3820
3821 /*
3822 * If the neighbor cache entry has a state other than INCOMPLETE
3823 * (i.e. its link-layer address is already resolved), just
3824 * send the packet.
3825 */
3826 if (ln->ln_state > ND6_LLINFO_INCOMPLETE) {
3827 RT_UNLOCK(rt);
3828 /*
3829 * Move this entry to the head of the queue so that it is
3830 * less likely for this entry to be a target of forced
3831 * garbage collection (see nd6_rtrequest()). Do this only
3832 * if the entry is non-permanent (as permanent ones will
3833 * never be purged), and if the number of active entries
3834 * is at least half of the threshold.
3835 */
3836 if (ln->ln_state == ND6_LLINFO_DELAY ||
3837 (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
3838 nd6_inuse >= (ip6_neighborgcthresh >> 1))) {
3839 lck_mtx_lock(rnh_lock);
3840 if (ln->ln_state == ND6_LLINFO_DELAY)
3841 nd6_sched_timeout(NULL, NULL);
3842 if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
3843 nd6_inuse >= (ip6_neighborgcthresh >> 1)) {
3844 RT_LOCK_SPIN(rt);
3845 if (ln->ln_flags & ND6_LNF_IN_USE) {
3846 LN_DEQUEUE(ln);
3847 LN_INSERTHEAD(ln);
3848 }
3849 RT_UNLOCK(rt);
3850 }
3851 lck_mtx_unlock(rnh_lock);
3852 }
3853 goto sendpkt;
3854 }
3855
3856 /*
3857 * If this is a prefix proxy route, record the inbound interface
3858 * so that it can be excluded from the list of interfaces eligible
3859 * for forwarding the proxied NS in nd6_prproxy_ns_output().
3860 */
3861 if (rt->rt_flags & RTF_PROXY)
3862 ln->ln_exclifp = ((origifp == ifp) ? NULL : origifp);
3863
3864 /*
3865 * There is a neighbor cache entry, but no ethernet address
3866 * response yet. Replace the held mbuf (if any) with this
3867 * latest one.
3868 *
3869 * This code conforms to the rate-limiting rule described in Section
3870 * 7.2.2 of RFC 4861, because the timer is set correctly after sending
3871 * an NS below.
3872 */
3873 if (ln->ln_state == ND6_LLINFO_NOSTATE)
3874 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE);
3875 if (ln->ln_hold)
3876 m_freem_list(ln->ln_hold);
3877 ln->ln_hold = m0;
3878 if (!ND6_LLINFO_PERMANENT(ln) && ln->ln_asked == 0) {
3879 ln->ln_asked++;
3880 ndi = ND_IFINFO(ifp);
3881 VERIFY(ndi != NULL && ndi->initialized);
3882 lck_mtx_lock(&ndi->lock);
3883 ln_setexpire(ln, timenow + ndi->retrans / 1000);
3884 lck_mtx_unlock(&ndi->lock);
3885 RT_UNLOCK(rt);
3886 /* We still have a reference on rt (for ln) */
3887 if (ip6_forwarding)
3888 nd6_prproxy_ns_output(ifp, origifp, NULL,
3889 &dst->sin6_addr, ln);
3890 else
3891 nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, NULL);
3892 lck_mtx_lock(rnh_lock);
3893 nd6_sched_timeout(NULL, NULL);
3894 lck_mtx_unlock(rnh_lock);
3895 } else {
3896 RT_UNLOCK(rt);
3897 }
3898 /*
3899 * Move this entry to the head of the queue so that it is
3900 * less likely for this entry to be a target of forced
3901 * garbage collection (see nd6_rtrequest()). Do this only
3902 * if the entry is non-permanent (as permanent ones will
3903 * never be purged), and if the number of active entries
3904 * is at least half of the threshold.
3905 */
3906 if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
3907 nd6_inuse >= (ip6_neighborgcthresh >> 1)) {
3908 lck_mtx_lock(rnh_lock);
3909 RT_LOCK_SPIN(rt);
3910 if (ln->ln_flags & ND6_LNF_IN_USE) {
3911 LN_DEQUEUE(ln);
3912 LN_INSERTHEAD(ln);
3913 }
3914 /* Clean up "rt" now while we can */
3915 if (rt == hint0) {
3916 RT_REMREF_LOCKED(rt);
3917 RT_UNLOCK(rt);
3918 } else {
3919 RT_UNLOCK(rt);
3920 rtfree_locked(rt);
3921 }
3922 rt = NULL; /* "rt" has been taken care of */
3923 lck_mtx_unlock(rnh_lock);
3924 }
3925 error = 0;
3926 goto release;
3927
3928 sendpkt:
3929 if (rt != NULL)
3930 RT_LOCK_ASSERT_NOTHELD(rt);
3931
3932 /* discard the packet if IPv6 operation is disabled on the interface */
3933 if (ifp->if_eflags & IFEF_IPV6_DISABLED) {
3934 error = ENETDOWN; /* better error? */
3935 goto bad;
3936 }
3937
3938 if (ifp->if_flags & IFF_LOOPBACK) {
3939 /* forwarding rules require the original scope_id */
3940 m0->m_pkthdr.rcvif = origifp;
3941 error = dlil_output(origifp, PF_INET6, m0, (caddr_t)rt,
3942 SA(dst), 0, adv);
3943 goto release;
3944 } else {
3945 /* Do not allow loopback address to wind up on a wire */
3946 struct ip6_hdr *ip6 = mtod(m0, struct ip6_hdr *);
3947
3948 if ((IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) ||
3949 IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst))) {
3950 ip6stat.ip6s_badscope++;
3951 error = EADDRNOTAVAIL;
3952 goto bad;
3953 }
3954 }
3955
3956 if (rt != NULL) {
3957 RT_LOCK_SPIN(rt);
3958 /* Mark use timestamp */
3959 if (rt->rt_llinfo != NULL)
3960 nd6_llreach_use(rt->rt_llinfo);
3961 RT_UNLOCK(rt);
3962 }
3963
3964 struct mbuf *mcur = m0;
3965 uint32_t pktcnt = 0;
3966
3967 while (mcur) {
3968 if (hint != NULL && nstat_collect) {
3969 int scnt;
3970
3971 if ((mcur->m_pkthdr.csum_flags & CSUM_TSO_IPV6) &&
3972 (mcur->m_pkthdr.tso_segsz > 0))
3973 scnt = mcur->m_pkthdr.len / mcur->m_pkthdr.tso_segsz;
3974 else
3975 scnt = 1;
3976
3977 nstat_route_tx(hint, scnt, mcur->m_pkthdr.len, 0);
3978 }
3979 pktcnt++;
3980
3981 mcur->m_pkthdr.rcvif = NULL;
3982 mcur = mcur->m_nextpkt;
3983 }
3984 if (pktcnt > ip6_maxchainsent)
3985 ip6_maxchainsent = pktcnt;
3986 error = dlil_output(ifp, PF_INET6, m0, (caddr_t)rt, SA(dst), 0, adv);
3987 goto release;
3988
3989 bad:
3990 if (m0 != NULL)
3991 m_freem_list(m0);
3992
3993 release:
3994 /* Clean up "rt" unless it's already been done */
3995 if (rt != NULL) {
3996 RT_LOCK_SPIN(rt);
3997 if (rt == hint0) {
3998 RT_REMREF_LOCKED(rt);
3999 RT_UNLOCK(rt);
4000 } else {
4001 RT_UNLOCK(rt);
4002 rtfree(rt);
4003 }
4004 }
4005 /* And now clean up "rtrele" if there is any */
4006 if (rtrele != NULL) {
4007 RT_LOCK_SPIN(rtrele);
4008 if (rtrele == hint0) {
4009 RT_REMREF_LOCKED(rtrele);
4010 RT_UNLOCK(rtrele);
4011 } else {
4012 RT_UNLOCK(rtrele);
4013 rtfree(rtrele);
4014 }
4015 }
4016 return (error);
4017 }
4018 #undef senderr
4019
4020 int
4021 nd6_need_cache(struct ifnet *ifp)
4022 {
4023 /*
4024 * XXX: we currently do not make neighbor cache on any interface
4025 * other than ARCnet, Ethernet, FDDI and GIF.
4026 *
4027 * RFC2893 says:
4028 * - unidirectional tunnels needs no ND
4029 */
4030 switch (ifp->if_type) {
4031 case IFT_ARCNET:
4032 case IFT_ETHER:
4033 case IFT_FDDI:
4034 case IFT_IEEE1394:
4035 case IFT_L2VLAN:
4036 case IFT_IEEE8023ADLAG:
4037 #if IFT_IEEE80211
4038 case IFT_IEEE80211:
4039 #endif
4040 case IFT_GIF: /* XXX need more cases? */
4041 case IFT_PPP:
4042 #if IFT_TUNNEL
4043 case IFT_TUNNEL:
4044 #endif
4045 case IFT_BRIDGE:
4046 case IFT_CELLULAR:
4047 return (1);
4048 default:
4049 return (0);
4050 }
4051 }
4052
4053 int
4054 nd6_storelladdr(struct ifnet *ifp, struct rtentry *rt, struct mbuf *m,
4055 struct sockaddr *dst, u_char *desten)
4056 {
4057 int i;
4058 struct sockaddr_dl *sdl;
4059
4060 if (m->m_flags & M_MCAST) {
4061 switch (ifp->if_type) {
4062 case IFT_ETHER:
4063 case IFT_FDDI:
4064 case IFT_L2VLAN:
4065 case IFT_IEEE8023ADLAG:
4066 #if IFT_IEEE80211
4067 case IFT_IEEE80211:
4068 #endif
4069 case IFT_BRIDGE:
4070 ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, desten);
4071 return (1);
4072 case IFT_IEEE1394:
4073 for (i = 0; i < ifp->if_addrlen; i++)
4074 desten[i] = ~0;
4075 return (1);
4076 case IFT_ARCNET:
4077 *desten = 0;
4078 return (1);
4079 default:
4080 return (0); /* caller will free mbuf */
4081 }
4082 }
4083
4084 if (rt == NULL) {
4085 /* this could happen, if we could not allocate memory */
4086 return (0); /* caller will free mbuf */
4087 }
4088 RT_LOCK(rt);
4089 if (rt->rt_gateway->sa_family != AF_LINK) {
4090 printf("nd6_storelladdr: something odd happens\n");
4091 RT_UNLOCK(rt);
4092 return (0); /* caller will free mbuf */
4093 }
4094 sdl = SDL(rt->rt_gateway);
4095 if (sdl->sdl_alen == 0) {
4096 /* this should be impossible, but we bark here for debugging */
4097 printf("nd6_storelladdr: sdl_alen == 0\n");
4098 RT_UNLOCK(rt);
4099 return (0); /* caller will free mbuf */
4100 }
4101
4102 bcopy(LLADDR(sdl), desten, sdl->sdl_alen);
4103 RT_UNLOCK(rt);
4104 return (1);
4105 }
4106
4107 /*
4108 * This is the ND pre-output routine; care must be taken to ensure that
4109 * the "hint" route never gets freed via rtfree(), since the caller may
4110 * have stored it inside a struct route with a reference held for that
4111 * placeholder.
4112 */
4113 errno_t
4114 nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest,
4115 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
4116 mbuf_t packet)
4117 {
4118 route_t route = hint;
4119 errno_t result = 0;
4120 struct sockaddr_dl *sdl = NULL;
4121 size_t copy_len;
4122
4123 if (ifp == NULL || ip6_dest == NULL)
4124 return (EINVAL);
4125
4126 if (ip6_dest->sin6_family != AF_INET6)
4127 return (EAFNOSUPPORT);
4128
4129 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
4130 return (ENETDOWN);
4131
4132 if (hint != NULL) {
4133 /*
4134 * Callee holds a reference on the route and returns
4135 * with the route entry locked, upon success.
4136 */
4137 result = route_to_gwroute((const struct sockaddr *)ip6_dest,
4138 hint, &route);
4139 if (result != 0)
4140 return (result);
4141 if (route != NULL)
4142 RT_LOCK_ASSERT_HELD(route);
4143 }
4144
4145 if ((packet != NULL && (packet->m_flags & M_MCAST) != 0) ||
4146 ((ifp->if_flags & IFF_MULTICAST) &&
4147 IN6_IS_ADDR_MULTICAST(&ip6_dest->sin6_addr))) {
4148 if (route != NULL)
4149 RT_UNLOCK(route);
4150 result = dlil_resolve_multi(ifp,
4151 (const struct sockaddr *)ip6_dest,
4152 SA(ll_dest), ll_dest_len);
4153 if (route != NULL)
4154 RT_LOCK(route);
4155 goto release;
4156 } else if (route == NULL) {
4157 /*
4158 * rdar://24596652
4159 * For unicast, lookup existing ND6 entries but
4160 * do not trigger a resolution
4161 */
4162 lck_mtx_lock(rnh_lock);
4163 route = rt_lookup(TRUE,
4164 __DECONST(struct sockaddr *, ip6_dest), NULL,
4165 rt_tables[AF_INET6], ifp->if_index);
4166 lck_mtx_unlock(rnh_lock);
4167
4168 if (route != NULL) {
4169 RT_LOCK(route);
4170 }
4171 }
4172
4173 if (route == NULL) {
4174 /*
4175 * This could happen, if we could not allocate memory or
4176 * if route_to_gwroute() didn't return a route.
4177 */
4178 result = ENOBUFS;
4179 goto release;
4180 }
4181
4182 if (route->rt_gateway->sa_family != AF_LINK) {
4183 printf("%s: route %s on %s%d gateway address not AF_LINK\n",
4184 __func__, ip6_sprintf(&ip6_dest->sin6_addr),
4185 route->rt_ifp->if_name, route->rt_ifp->if_unit);
4186 result = EADDRNOTAVAIL;
4187 goto release;
4188 }
4189
4190 sdl = SDL(route->rt_gateway);
4191 if (sdl->sdl_alen == 0) {
4192 /* this should be impossible, but we bark here for debugging */
4193 printf("%s: route %s on %s%d sdl_alen == 0\n", __func__,
4194 ip6_sprintf(&ip6_dest->sin6_addr), route->rt_ifp->if_name,
4195 route->rt_ifp->if_unit);
4196 result = EHOSTUNREACH;
4197 goto release;
4198 }
4199
4200 copy_len = sdl->sdl_len <= ll_dest_len ? sdl->sdl_len : ll_dest_len;
4201 bcopy(sdl, ll_dest, copy_len);
4202
4203 release:
4204 if (route != NULL) {
4205 if (route == hint) {
4206 RT_REMREF_LOCKED(route);
4207 RT_UNLOCK(route);
4208 } else {
4209 RT_UNLOCK(route);
4210 rtfree(route);
4211 }
4212 }
4213 return (result);
4214 }
4215
4216 #if (DEVELOPMENT || DEBUG)
4217
4218 static int sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS;
4219 SYSCTL_PROC(_net_inet6_icmp6, OID_AUTO, nd6_lookup_ipv6,
4220 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
4221 sysctl_nd6_lookup_ipv6, "S", "");
4222
4223 int
4224 sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS
4225 {
4226 #pragma unused(oidp, arg1, arg2)
4227 int error = 0;
4228 struct nd6_lookup_ipv6_args nd6_lookup_ipv6_args;
4229 ifnet_t ifp = NULL;
4230
4231 /*
4232 * Only root can lookup MAC addresses
4233 */
4234 error = proc_suser(current_proc());
4235 if (error != 0) {
4236 printf("%s: proc_suser() error %d\n",
4237 __func__, error);
4238 goto done;
4239 }
4240 if (req->oldptr == USER_ADDR_NULL) {
4241 req->oldidx = sizeof(struct nd6_lookup_ipv6_args);
4242 }
4243 if (req->newptr == USER_ADDR_NULL) {
4244 goto done;
4245 }
4246 if (req->oldlen != sizeof(struct nd6_lookup_ipv6_args) ||
4247 req->newlen != sizeof(struct nd6_lookup_ipv6_args)) {
4248 error = EINVAL;
4249 printf("%s: bad req, error %d\n",
4250 __func__, error);
4251 goto done;
4252 }
4253 error = SYSCTL_IN(req, &nd6_lookup_ipv6_args,
4254 sizeof(struct nd6_lookup_ipv6_args));
4255 if (error != 0) {
4256 printf("%s: SYSCTL_IN() error %d\n",
4257 __func__, error);
4258 goto done;
4259 }
4260 /* Make sure to terminate the string */
4261 nd6_lookup_ipv6_args.ifname[IFNAMSIZ - 1] = 0;
4262
4263 error = ifnet_find_by_name(nd6_lookup_ipv6_args.ifname, &ifp);
4264 if (error != 0) {
4265 printf("%s: ifnet_find_by_name() error %d\n",
4266 __func__, error);
4267 goto done;
4268 }
4269
4270 error = nd6_lookup_ipv6(ifp, &nd6_lookup_ipv6_args.ip6_dest,
4271 &nd6_lookup_ipv6_args.ll_dest_._sdl,
4272 nd6_lookup_ipv6_args.ll_dest_len, NULL, NULL);
4273 if (error != 0) {
4274 printf("%s: nd6_lookup_ipv6() error %d\n",
4275 __func__, error);
4276 goto done;
4277 }
4278
4279 error = SYSCTL_OUT(req, &nd6_lookup_ipv6_args,
4280 sizeof(struct nd6_lookup_ipv6_args));
4281 if (error != 0) {
4282 printf("%s: SYSCTL_OUT() error %d\n",
4283 __func__, error);
4284 goto done;
4285 }
4286 done:
4287 return (error);
4288 }
4289
4290 #endif /* (DEVELOPEMENT || DEBUG) */
4291
4292 int
4293 nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after)
4294 {
4295 uint32_t b, a;
4296 int err = 0;
4297
4298 /*
4299 * Handle ND6_IFF_IFDISABLED
4300 */
4301 if ((before & ND6_IFF_IFDISABLED) ||
4302 (after & ND6_IFF_IFDISABLED)) {
4303 b = (before & ND6_IFF_IFDISABLED);
4304 a = (after & ND6_IFF_IFDISABLED);
4305
4306 if (b != a && (err = nd6_if_disable(ifp,
4307 ((int32_t)(a - b) > 0))) != 0)
4308 goto done;
4309 }
4310
4311 /*
4312 * Handle ND6_IFF_PROXY_PREFIXES
4313 */
4314 if ((before & ND6_IFF_PROXY_PREFIXES) ||
4315 (after & ND6_IFF_PROXY_PREFIXES)) {
4316 b = (before & ND6_IFF_PROXY_PREFIXES);
4317 a = (after & ND6_IFF_PROXY_PREFIXES);
4318
4319 if (b != a && (err = nd6_if_prproxy(ifp,
4320 ((int32_t)(a - b) > 0))) != 0)
4321 goto done;
4322 }
4323 done:
4324 return (err);
4325 }
4326
4327 /*
4328 * Enable/disable IPv6 on an interface, called as part of
4329 * setting/clearing ND6_IFF_IFDISABLED, or during DAD failure.
4330 */
4331 int
4332 nd6_if_disable(struct ifnet *ifp, boolean_t enable)
4333 {
4334 ifnet_lock_shared(ifp);
4335 if (enable)
4336 ifp->if_eflags |= IFEF_IPV6_DISABLED;
4337 else
4338 ifp->if_eflags &= ~IFEF_IPV6_DISABLED;
4339 ifnet_lock_done(ifp);
4340
4341 return (0);
4342 }
4343
4344 static int
4345 nd6_sysctl_drlist SYSCTL_HANDLER_ARGS
4346 {
4347 #pragma unused(oidp, arg1, arg2)
4348 char pbuf[MAX_IPv6_STR_LEN];
4349 struct nd_defrouter *dr;
4350 int error = 0;
4351
4352 if (req->newptr != USER_ADDR_NULL)
4353 return (EPERM);
4354
4355 /* XXX Handle mapped defrouter entries */
4356 lck_mtx_lock(nd6_mutex);
4357 if (proc_is64bit(req->p)) {
4358 struct in6_defrouter_64 d;
4359
4360 bzero(&d, sizeof (d));
4361 d.rtaddr.sin6_family = AF_INET6;
4362 d.rtaddr.sin6_len = sizeof (d.rtaddr);
4363
4364 TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) {
4365 d.rtaddr.sin6_addr = dr->rtaddr;
4366 if (in6_recoverscope(&d.rtaddr,
4367 &dr->rtaddr, dr->ifp) != 0)
4368 log(LOG_ERR, "scope error in default router "
4369 "list (%s)\n", inet_ntop(AF_INET6,
4370 &dr->rtaddr, pbuf, sizeof (pbuf)));
4371 d.flags = dr->flags;
4372 d.stateflags = dr->stateflags;
4373 d.rtlifetime = dr->rtlifetime;
4374 d.expire = nddr_getexpire(dr);
4375 d.if_index = dr->ifp->if_index;
4376 error = SYSCTL_OUT(req, &d, sizeof (d));
4377 if (error != 0)
4378 break;
4379 }
4380 } else {
4381 struct in6_defrouter_32 d;
4382
4383 bzero(&d, sizeof (d));
4384 d.rtaddr.sin6_family = AF_INET6;
4385 d.rtaddr.sin6_len = sizeof (d.rtaddr);
4386
4387 TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) {
4388 d.rtaddr.sin6_addr = dr->rtaddr;
4389 if (in6_recoverscope(&d.rtaddr,
4390 &dr->rtaddr, dr->ifp) != 0)
4391 log(LOG_ERR, "scope error in default router "
4392 "list (%s)\n", inet_ntop(AF_INET6,
4393 &dr->rtaddr, pbuf, sizeof (pbuf)));
4394 d.flags = dr->flags;
4395 d.stateflags = dr->stateflags;
4396 d.rtlifetime = dr->rtlifetime;
4397 d.expire = nddr_getexpire(dr);
4398 d.if_index = dr->ifp->if_index;
4399 error = SYSCTL_OUT(req, &d, sizeof (d));
4400 if (error != 0)
4401 break;
4402 }
4403 }
4404 lck_mtx_unlock(nd6_mutex);
4405 return (error);
4406 }
4407
4408 static int
4409 nd6_sysctl_prlist SYSCTL_HANDLER_ARGS
4410 {
4411 #pragma unused(oidp, arg1, arg2)
4412 char pbuf[MAX_IPv6_STR_LEN];
4413 struct nd_pfxrouter *pfr;
4414 struct sockaddr_in6 s6;
4415 struct nd_prefix *pr;
4416 int error = 0;
4417
4418 if (req->newptr != USER_ADDR_NULL)
4419 return (EPERM);
4420
4421 bzero(&s6, sizeof (s6));
4422 s6.sin6_family = AF_INET6;
4423 s6.sin6_len = sizeof (s6);
4424
4425 /* XXX Handle mapped defrouter entries */
4426 lck_mtx_lock(nd6_mutex);
4427 if (proc_is64bit(req->p)) {
4428 struct in6_prefix_64 p;
4429
4430 bzero(&p, sizeof (p));
4431 p.origin = PR_ORIG_RA;
4432
4433 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
4434 NDPR_LOCK(pr);
4435 p.prefix = pr->ndpr_prefix;
4436 if (in6_recoverscope(&p.prefix,
4437 &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0)
4438 log(LOG_ERR, "scope error in "
4439 "prefix list (%s)\n", inet_ntop(AF_INET6,
4440 &p.prefix.sin6_addr, pbuf, sizeof (pbuf)));
4441 p.raflags = pr->ndpr_raf;
4442 p.prefixlen = pr->ndpr_plen;
4443 p.vltime = pr->ndpr_vltime;
4444 p.pltime = pr->ndpr_pltime;
4445 p.if_index = pr->ndpr_ifp->if_index;
4446 p.expire = ndpr_getexpire(pr);
4447 p.refcnt = pr->ndpr_addrcnt;
4448 p.flags = pr->ndpr_stateflags;
4449 p.advrtrs = 0;
4450 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry)
4451 p.advrtrs++;
4452 error = SYSCTL_OUT(req, &p, sizeof (p));
4453 if (error != 0) {
4454 NDPR_UNLOCK(pr);
4455 break;
4456 }
4457 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
4458 s6.sin6_addr = pfr->router->rtaddr;
4459 if (in6_recoverscope(&s6, &pfr->router->rtaddr,
4460 pfr->router->ifp) != 0)
4461 log(LOG_ERR,
4462 "scope error in prefix list (%s)\n",
4463 inet_ntop(AF_INET6, &s6.sin6_addr,
4464 pbuf, sizeof (pbuf)));
4465 error = SYSCTL_OUT(req, &s6, sizeof (s6));
4466 if (error != 0)
4467 break;
4468 }
4469 NDPR_UNLOCK(pr);
4470 if (error != 0)
4471 break;
4472 }
4473 } else {
4474 struct in6_prefix_32 p;
4475
4476 bzero(&p, sizeof (p));
4477 p.origin = PR_ORIG_RA;
4478
4479 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
4480 NDPR_LOCK(pr);
4481 p.prefix = pr->ndpr_prefix;
4482 if (in6_recoverscope(&p.prefix,
4483 &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0)
4484 log(LOG_ERR,
4485 "scope error in prefix list (%s)\n",
4486 inet_ntop(AF_INET6, &p.prefix.sin6_addr,
4487 pbuf, sizeof (pbuf)));
4488 p.raflags = pr->ndpr_raf;
4489 p.prefixlen = pr->ndpr_plen;
4490 p.vltime = pr->ndpr_vltime;
4491 p.pltime = pr->ndpr_pltime;
4492 p.if_index = pr->ndpr_ifp->if_index;
4493 p.expire = ndpr_getexpire(pr);
4494 p.refcnt = pr->ndpr_addrcnt;
4495 p.flags = pr->ndpr_stateflags;
4496 p.advrtrs = 0;
4497 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry)
4498 p.advrtrs++;
4499 error = SYSCTL_OUT(req, &p, sizeof (p));
4500 if (error != 0) {
4501 NDPR_UNLOCK(pr);
4502 break;
4503 }
4504 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
4505 s6.sin6_addr = pfr->router->rtaddr;
4506 if (in6_recoverscope(&s6, &pfr->router->rtaddr,
4507 pfr->router->ifp) != 0)
4508 log(LOG_ERR,
4509 "scope error in prefix list (%s)\n",
4510 inet_ntop(AF_INET6, &s6.sin6_addr,
4511 pbuf, sizeof (pbuf)));
4512 error = SYSCTL_OUT(req, &s6, sizeof (s6));
4513 if (error != 0)
4514 break;
4515 }
4516 NDPR_UNLOCK(pr);
4517 if (error != 0)
4518 break;
4519 }
4520 }
4521 lck_mtx_unlock(nd6_mutex);
4522
4523 return (error);
4524 }
4525
4526 void
4527 in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
4528 {
4529 struct ifnet* ifp = ia->ia_ifp;
4530 uint32_t flags = IN6_IFF_TENTATIVE;
4531 uint32_t optdad = nd6_optimistic_dad;
4532 struct nd_ifinfo *ndi = NULL;
4533
4534 ndi = ND_IFINFO(ifp);
4535 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
4536 if (!(ndi->flags & ND6_IFF_DAD))
4537 return;
4538
4539 if (optdad) {
4540 if ((ifp->if_eflags & IFEF_IPV6_ROUTER) != 0) {
4541 optdad = 0;
4542 } else {
4543 lck_mtx_lock(&ndi->lock);
4544 if ((ndi->flags & ND6_IFF_REPLICATED) != 0) {
4545 optdad = 0;
4546 }
4547 lck_mtx_unlock(&ndi->lock);
4548 }
4549 }
4550
4551 if (optdad) {
4552 if ((optdad & ND6_OPTIMISTIC_DAD_LINKLOCAL) &&
4553 IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
4554 flags = IN6_IFF_OPTIMISTIC;
4555 else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) &&
4556 (ia->ia6_flags & IN6_IFF_AUTOCONF)) {
4557 if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
4558 if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
4559 flags = IN6_IFF_OPTIMISTIC;
4560 } else if (ia->ia6_flags & IN6_IFF_SECURED) {
4561 if (optdad & ND6_OPTIMISTIC_DAD_SECURED)
4562 flags = IN6_IFF_OPTIMISTIC;
4563 } else {
4564 /*
4565 * Keeping the behavior for temp and CGA
4566 * SLAAC addresses to have a knob for optimistic
4567 * DAD.
4568 * Other than that if ND6_OPTIMISTIC_DAD_AUTOCONF
4569 * is set, we should default to optimistic
4570 * DAD.
4571 * For now this means SLAAC addresses with interface
4572 * identifier derived from modified EUI-64 bit
4573 * identifiers.
4574 */
4575 flags = IN6_IFF_OPTIMISTIC;
4576 }
4577 } else if ((optdad & ND6_OPTIMISTIC_DAD_DYNAMIC) &&
4578 (ia->ia6_flags & IN6_IFF_DYNAMIC)) {
4579 if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
4580 if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
4581 flags = IN6_IFF_OPTIMISTIC;
4582 } else {
4583 flags = IN6_IFF_OPTIMISTIC;
4584 }
4585 } else if ((optdad & ND6_OPTIMISTIC_DAD_MANUAL) &&
4586 (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) {
4587 /*
4588 * rdar://17483438
4589 * Bypass tentative for address assignments
4590 * not covered above (e.g. manual) upon request
4591 */
4592 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr) &&
4593 !(ia->ia6_flags & IN6_IFF_AUTOCONF) &&
4594 !(ia->ia6_flags & IN6_IFF_DYNAMIC))
4595 flags = IN6_IFF_OPTIMISTIC;
4596 }
4597 }
4598
4599 ia->ia6_flags &= ~(IN6_IFF_DUPLICATED | IN6_IFF_DADPROGRESS);
4600 ia->ia6_flags |= flags;
4601
4602 nd6log2((LOG_DEBUG, "%s - %s ifp %s ia6_flags 0x%x\n",
4603 __func__,
4604 ip6_sprintf(&ia->ia_addr.sin6_addr),
4605 if_name(ia->ia_ifp),
4606 ia->ia6_flags));
4607 }
4608