]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet6/nd6.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / netinet6 / nd6.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the project nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58/*
59 * XXX
60 * KAME 970409 note:
61 * BSD/OS version heavily modifies this code, related to llinfo.
62 * Since we don't have BSD/OS version of net/route.c in our hand,
63 * I left the code mostly as it was in 970310. -- itojun
64 */
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/malloc.h>
69#include <sys/mbuf.h>
70#include <sys/socket.h>
71#include <sys/sockio.h>
72#include <sys/time.h>
73#include <sys/kernel.h>
74#include <sys/sysctl.h>
75#include <sys/errno.h>
76#include <sys/syslog.h>
77#include <sys/protosw.h>
78#include <sys/proc.h>
79#include <sys/mcache.h>
80
81#include <dev/random/randomdev.h>
82
83#include <kern/queue.h>
84#include <kern/zalloc.h>
85
86#include <net/if.h>
87#include <net/if_dl.h>
88#include <net/if_types.h>
89#include <net/if_llreach.h>
90#include <net/route.h>
91#include <net/dlil.h>
92#include <net/ntstat.h>
93#include <net/net_osdep.h>
94#include <net/nwk_wq.h>
95
96#include <netinet/in.h>
97#include <netinet/in_arp.h>
98#include <netinet/if_ether.h>
99#include <netinet6/in6_var.h>
100#include <netinet/ip6.h>
101#include <netinet6/ip6_var.h>
102#include <netinet6/nd6.h>
103#include <netinet6/scope6_var.h>
104#include <netinet/icmp6.h>
105
106#include <os/log.h>
107
108#include "loop.h"
109
110#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
111#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
112
113#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
114
115/* timer values */
116int nd6_prune = 1; /* walk list every 1 seconds */
117int nd6_prune_lazy = 5; /* lazily walk list every 5 seconds */
118int nd6_delay = 5; /* delay first probe time 5 second */
119int nd6_umaxtries = 3; /* maximum unicast query */
120int nd6_mmaxtries = 3; /* maximum multicast query */
121int nd6_useloopback = 1; /* use loopback interface for local traffic */
122int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */
123
124/* preventing too many loops in ND option parsing */
125int nd6_maxndopt = 10; /* max # of ND options allowed */
126
127int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */
128
129#if ND6_DEBUG
130int nd6_debug = 1;
131#else
132int nd6_debug = 0;
133#endif
134
135int nd6_optimistic_dad = ND6_OPTIMISTIC_DAD_DEFAULT;
136
137/* for debugging? */
138static int nd6_inuse, nd6_allocated;
139
140/*
141 * Synchronization notes:
142 *
143 * The global list of ND entries are stored in llinfo_nd6; an entry
144 * gets inserted into the list when the route is created and gets
145 * removed from the list when it is deleted; this is done as part
146 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in nd6_rtrequest().
147 *
148 * Because rnh_lock and rt_lock for the entry are held during those
149 * operations, the same locks (and thus lock ordering) must be used
150 * elsewhere to access the relevant data structure fields:
151 *
152 * ln_next, ln_prev, ln_rt
153 *
154 * - Routing lock (rnh_lock)
155 *
156 * ln_hold, ln_asked, ln_expire, ln_state, ln_router, ln_flags,
157 * ln_llreach, ln_lastused
158 *
159 * - Routing entry lock (rt_lock)
160 *
161 * Due to the dependency on rt_lock, llinfo_nd6 has the same lifetime
162 * as the route entry itself. When a route is deleted (RTM_DELETE),
163 * it is simply removed from the global list but the memory is not
164 * freed until the route itself is freed.
165 */
166struct llinfo_nd6 llinfo_nd6 = {
167 .ln_next = &llinfo_nd6,
168 .ln_prev = &llinfo_nd6,
169};
170
171static lck_grp_attr_t *nd_if_lock_grp_attr = NULL;
172static lck_grp_t *nd_if_lock_grp = NULL;
173static lck_attr_t *nd_if_lock_attr = NULL;
174
175/* Protected by nd6_mutex */
176struct nd_drhead nd_defrouter_list;
177struct nd_prhead nd_prefix = { .lh_first = 0 };
178struct nd_rtihead nd_rti_list;
179/*
180 * nd6_timeout() is scheduled on a demand basis. nd6_timeout_run is used
181 * to indicate whether or not a timeout has been scheduled. The rnh_lock
182 * mutex is used to protect this scheduling; it is a natural choice given
183 * the work done in the timer callback. Unfortunately, there are cases
184 * when nd6_timeout() needs to be scheduled while rnh_lock cannot be easily
185 * held, due to lock ordering. In those cases, we utilize a "demand" counter
186 * nd6_sched_timeout_want which can be atomically incremented without
187 * having to hold rnh_lock. On places where we acquire rnh_lock, such as
188 * nd6_rtrequest(), we check this counter and schedule the timer if it is
189 * non-zero. The increment happens on various places when we allocate
190 * new ND entries, default routers, prefixes and addresses.
191 */
192static int nd6_timeout_run; /* nd6_timeout is scheduled to run */
193static void nd6_timeout(void *);
194int nd6_sched_timeout_want; /* demand count for timer to be sched */
195static boolean_t nd6_fast_timer_on = FALSE;
196
197/* Serialization variables for nd6_service(), protected by rnh_lock */
198static boolean_t nd6_service_busy;
199static void *nd6_service_wc = &nd6_service_busy;
200static int nd6_service_waiters = 0;
201
202int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL;
203static struct sockaddr_in6 all1_sa;
204
205static int regen_tmpaddr(struct in6_ifaddr *);
206extern lck_mtx_t *nd6_mutex;
207
208static struct llinfo_nd6 *nd6_llinfo_alloc(zalloc_flags_t);
209static void nd6_llinfo_free(void *);
210static void nd6_llinfo_purge(struct rtentry *);
211static void nd6_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
212static void nd6_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
213static void nd6_llinfo_refresh(struct rtentry *);
214static uint64_t ln_getexpire(struct llinfo_nd6 *);
215
216static void nd6_service(void *);
217static void nd6_slowtimo(void *);
218static int nd6_is_new_addr_neighbor(struct sockaddr_in6 *, struct ifnet *);
219static int nd6_siocgdrlst(void *, int);
220static int nd6_siocgprlst(void *, int);
221
222static void nd6_router_select_rti_entries(struct ifnet *);
223static void nd6_purge_interface_default_routers(struct ifnet *);
224static void nd6_purge_interface_rti_entries(struct ifnet *);
225static void nd6_purge_interface_prefixes(struct ifnet *);
226static void nd6_purge_interface_llinfo(struct ifnet *);
227
228static int nd6_sysctl_drlist SYSCTL_HANDLER_ARGS;
229static int nd6_sysctl_prlist SYSCTL_HANDLER_ARGS;
230
231/*
232 * Insertion and removal from llinfo_nd6 must be done with rnh_lock held.
233 */
234#define LN_DEQUEUE(_ln) do { \
235 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \
236 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
237 (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \
238 (_ln)->ln_prev->ln_next = (_ln)->ln_next; \
239 (_ln)->ln_prev = (_ln)->ln_next = NULL; \
240 (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \
241} while (0)
242
243#define LN_INSERTHEAD(_ln) do { \
244 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \
245 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
246 (_ln)->ln_next = llinfo_nd6.ln_next; \
247 llinfo_nd6.ln_next = (_ln); \
248 (_ln)->ln_prev = &llinfo_nd6; \
249 (_ln)->ln_next->ln_prev = (_ln); \
250 (_ln)->ln_flags |= ND6_LNF_IN_USE; \
251} while (0)
252
253static ZONE_DECLARE(llinfo_nd6_zone, "llinfo_nd6",
254 sizeof(struct llinfo_nd6), ZC_ZFREE_CLEARMEM);
255
256extern int tvtohz(struct timeval *);
257
258static int nd6_init_done;
259
260SYSCTL_DECL(_net_inet6_icmp6);
261
262SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist,
263 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
264 nd6_sysctl_drlist, "S,in6_defrouter", "");
265
266SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist,
267 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
268 nd6_sysctl_prlist, "S,in6_defrouter", "");
269
270SYSCTL_DECL(_net_inet6_ip6);
271
272static int ip6_maxchainsent = 0;
273SYSCTL_INT(_net_inet6_ip6, OID_AUTO, maxchainsent,
274 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxchainsent, 0,
275 "use dlil_output_list");
276
277SYSCTL_DECL(_net_inet6_icmp6);
278int nd6_process_rti = ND6_PROCESS_RTI_DEFAULT;
279
280SYSCTL_INT(_net_inet6_icmp6, OID_AUTO, nd6_process_rti, CTLFLAG_RW | CTLFLAG_LOCKED,
281 &nd6_process_rti, 0,
282 "Enable/disable processing of Route Information Option in the "
283 "IPv6 Router Advertisement.");
284
285void
286nd6_init(void)
287{
288 int i;
289
290 VERIFY(!nd6_init_done);
291
292 all1_sa.sin6_family = AF_INET6;
293 all1_sa.sin6_len = sizeof(struct sockaddr_in6);
294 for (i = 0; i < sizeof(all1_sa.sin6_addr); i++) {
295 all1_sa.sin6_addr.s6_addr[i] = 0xff;
296 }
297
298 /* initialization of the default router list */
299 TAILQ_INIT(&nd_defrouter_list);
300 TAILQ_INIT(&nd_rti_list);
301
302 nd_if_lock_grp_attr = lck_grp_attr_alloc_init();
303 nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr);
304 nd_if_lock_attr = lck_attr_alloc_init();
305
306 nd6_nbr_init();
307 nd6_rtr_init();
308
309 nd6_init_done = 1;
310
311 /* start timer */
312 timeout(nd6_slowtimo, NULL, ND6_SLOWTIMER_INTERVAL * hz);
313}
314
315static struct llinfo_nd6 *
316nd6_llinfo_alloc(zalloc_flags_t how)
317{
318 return zalloc_flags(llinfo_nd6_zone, how | Z_ZERO);
319}
320
321static void
322nd6_llinfo_free(void *arg)
323{
324 struct llinfo_nd6 *ln = arg;
325
326 if (ln->ln_next != NULL || ln->ln_prev != NULL) {
327 panic("%s: trying to free %p when it is in use", __func__, ln);
328 /* NOTREACHED */
329 }
330
331 /* Just in case there's anything there, free it */
332 if (ln->ln_hold != NULL) {
333 m_freem_list(ln->ln_hold);
334 ln->ln_hold = NULL;
335 }
336
337 /* Purge any link-layer info caching */
338 VERIFY(ln->ln_rt->rt_llinfo == ln);
339 if (ln->ln_rt->rt_llinfo_purge != NULL) {
340 ln->ln_rt->rt_llinfo_purge(ln->ln_rt);
341 }
342
343 zfree(llinfo_nd6_zone, ln);
344}
345
346static void
347nd6_llinfo_purge(struct rtentry *rt)
348{
349 struct llinfo_nd6 *ln = rt->rt_llinfo;
350
351 RT_LOCK_ASSERT_HELD(rt);
352 VERIFY(rt->rt_llinfo_purge == nd6_llinfo_purge && ln != NULL);
353
354 if (ln->ln_llreach != NULL) {
355 RT_CONVERT_LOCK(rt);
356 ifnet_llreach_free(ln->ln_llreach);
357 ln->ln_llreach = NULL;
358 }
359 ln->ln_lastused = 0;
360}
361
362static void
363nd6_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
364{
365 struct llinfo_nd6 *ln = rt->rt_llinfo;
366 struct if_llreach *lr = ln->ln_llreach;
367
368 if (lr == NULL) {
369 bzero(ri, sizeof(*ri));
370 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
371 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
372 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
373 } else {
374 IFLR_LOCK(lr);
375 /* Export to rt_reach_info structure */
376 ifnet_lr2ri(lr, ri);
377 /* Export ND6 send expiration (calendar) time */
378 ri->ri_snd_expire =
379 ifnet_llreach_up2calexp(lr, ln->ln_lastused);
380 IFLR_UNLOCK(lr);
381 }
382}
383
384static void
385nd6_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
386{
387 struct llinfo_nd6 *ln = rt->rt_llinfo;
388 struct if_llreach *lr = ln->ln_llreach;
389
390 if (lr == NULL) {
391 bzero(iflri, sizeof(*iflri));
392 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
393 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
394 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
395 } else {
396 IFLR_LOCK(lr);
397 /* Export to ifnet_llreach_info structure */
398 ifnet_lr2iflri(lr, iflri);
399 /* Export ND6 send expiration (uptime) time */
400 iflri->iflri_snd_expire =
401 ifnet_llreach_up2upexp(lr, ln->ln_lastused);
402 IFLR_UNLOCK(lr);
403 }
404}
405
406static void
407nd6_llinfo_refresh(struct rtentry *rt)
408{
409 struct llinfo_nd6 *ln = rt->rt_llinfo;
410 uint64_t timenow = net_uptime();
411 /*
412 * Can't refresh permanent, static or entries that are
413 * not direct host entries
414 */
415 if (!ln || ln->ln_expire == 0 ||
416 (rt->rt_flags & RTF_STATIC) ||
417 !(rt->rt_flags & RTF_LLINFO)) {
418 return;
419 }
420
421 if ((ln->ln_state > ND6_LLINFO_INCOMPLETE) &&
422 (ln->ln_state < ND6_LLINFO_PROBE)) {
423 if (ln->ln_expire > timenow) {
424 ln_setexpire(ln, timenow);
425 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PROBE);
426 }
427 }
428 return;
429}
430
431const char *
432ndcache_state2str(short ndp_state)
433{
434 const char *ndp_state_str = "UNKNOWN";
435 switch (ndp_state) {
436 case ND6_LLINFO_PURGE:
437 ndp_state_str = "ND6_LLINFO_PURGE";
438 break;
439 case ND6_LLINFO_NOSTATE:
440 ndp_state_str = "ND6_LLINFO_NOSTATE";
441 break;
442 case ND6_LLINFO_INCOMPLETE:
443 ndp_state_str = "ND6_LLINFO_INCOMPLETE";
444 break;
445 case ND6_LLINFO_REACHABLE:
446 ndp_state_str = "ND6_LLINFO_REACHABLE";
447 break;
448 case ND6_LLINFO_STALE:
449 ndp_state_str = "ND6_LLINFO_STALE";
450 break;
451 case ND6_LLINFO_DELAY:
452 ndp_state_str = "ND6_LLINFO_DELAY";
453 break;
454 case ND6_LLINFO_PROBE:
455 ndp_state_str = "ND6_LLINFO_PROBE";
456 break;
457 default:
458 /* Init'd to UNKNOWN */
459 break;
460 }
461 return ndp_state_str;
462}
463
464void
465ln_setexpire(struct llinfo_nd6 *ln, uint64_t expiry)
466{
467 ln->ln_expire = expiry;
468}
469
470static uint64_t
471ln_getexpire(struct llinfo_nd6 *ln)
472{
473 struct timeval caltime;
474 uint64_t expiry;
475
476 if (ln->ln_expire != 0) {
477 struct rtentry *rt = ln->ln_rt;
478
479 VERIFY(rt != NULL);
480 /* account for system time change */
481 getmicrotime(&caltime);
482
483 rt->base_calendartime +=
484 NET_CALCULATE_CLOCKSKEW(caltime,
485 rt->base_calendartime, net_uptime(), rt->base_uptime);
486
487 expiry = rt->base_calendartime +
488 ln->ln_expire - rt->base_uptime;
489 } else {
490 expiry = 0;
491 }
492 return expiry;
493}
494
495void
496nd6_ifreset(struct ifnet *ifp)
497{
498 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
499 VERIFY(NULL != ndi);
500 VERIFY(ndi->initialized);
501
502 LCK_MTX_ASSERT(&ndi->lock, LCK_MTX_ASSERT_OWNED);
503 ndi->linkmtu = ifp->if_mtu;
504 ndi->chlim = IPV6_DEFHLIM;
505 ndi->basereachable = REACHABLE_TIME;
506 ndi->reachable = ND_COMPUTE_RTIME(ndi->basereachable);
507 ndi->retrans = RETRANS_TIMER;
508}
509
510void
511nd6_ifattach(struct ifnet *ifp)
512{
513 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
514
515 VERIFY(NULL != ndi);
516 if (!ndi->initialized) {
517 lck_mtx_init(&ndi->lock, nd_if_lock_grp, nd_if_lock_attr);
518 ndi->flags = ND6_IFF_PERFORMNUD;
519 ndi->flags |= ND6_IFF_DAD;
520 ndi->initialized = TRUE;
521 }
522
523 lck_mtx_lock(&ndi->lock);
524
525 if (!(ifp->if_flags & IFF_MULTICAST)) {
526 ndi->flags |= ND6_IFF_IFDISABLED;
527 }
528
529 nd6_ifreset(ifp);
530 lck_mtx_unlock(&ndi->lock);
531 nd6_setmtu(ifp);
532
533 nd6log0(info,
534 "Reinit'd ND information for interface %s\n",
535 if_name(ifp));
536 return;
537}
538
539#if 0
540/*
541 * XXX Look more into this. Especially since we recycle ifnets and do delayed
542 * cleanup
543 */
544void
545nd6_ifdetach(struct nd_ifinfo *nd)
546{
547 /* XXX destroy nd's lock? */
548 FREE(nd, M_IP6NDP);
549}
550#endif
551
552void
553nd6_setmtu(struct ifnet *ifp)
554{
555 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
556 u_int32_t oldmaxmtu, maxmtu;
557
558 if ((NULL == ndi) || (FALSE == ndi->initialized)) {
559 return;
560 }
561
562 lck_mtx_lock(&ndi->lock);
563 oldmaxmtu = ndi->maxmtu;
564
565 /*
566 * The ND level maxmtu is somewhat redundant to the interface MTU
567 * and is an implementation artifact of KAME. Instead of hard-
568 * limiting the maxmtu based on the interface type here, we simply
569 * take the if_mtu value since SIOCSIFMTU would have taken care of
570 * the sanity checks related to the maximum MTU allowed for the
571 * interface (a value that is known only by the interface layer),
572 * by sending the request down via ifnet_ioctl(). The use of the
573 * ND level maxmtu and linkmtu are done via IN6_LINKMTU() which
574 * does further checking against if_mtu.
575 */
576 maxmtu = ndi->maxmtu = ifp->if_mtu;
577
578 /*
579 * Decreasing the interface MTU under IPV6 minimum MTU may cause
580 * undesirable situation. We thus notify the operator of the change
581 * explicitly. The check for oldmaxmtu is necessary to restrict the
582 * log to the case of changing the MTU, not initializing it.
583 */
584 if (oldmaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
585 log(LOG_NOTICE, "nd6_setmtu: "
586 "new link MTU on %s (%u) is too small for IPv6\n",
587 if_name(ifp), (uint32_t)ndi->maxmtu);
588 }
589 ndi->linkmtu = ifp->if_mtu;
590 lck_mtx_unlock(&ndi->lock);
591
592 /* also adjust in6_maxmtu if necessary. */
593 if (maxmtu > in6_maxmtu) {
594 in6_setmaxmtu();
595 }
596}
597
598void
599nd6_option_init(void *opt, int icmp6len, union nd_opts *ndopts)
600{
601 bzero(ndopts, sizeof(*ndopts));
602 ndopts->nd_opts_search = (struct nd_opt_hdr *)opt;
603 ndopts->nd_opts_last =
604 (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len);
605
606 if (icmp6len == 0) {
607 ndopts->nd_opts_done = 1;
608 ndopts->nd_opts_search = NULL;
609 }
610}
611
612/*
613 * Take one ND option.
614 */
615struct nd_opt_hdr *
616nd6_option(union nd_opts *ndopts)
617{
618 struct nd_opt_hdr *nd_opt;
619 int olen;
620
621 if (!ndopts) {
622 panic("ndopts == NULL in nd6_option\n");
623 }
624 if (!ndopts->nd_opts_last) {
625 panic("uninitialized ndopts in nd6_option\n");
626 }
627 if (!ndopts->nd_opts_search) {
628 return NULL;
629 }
630 if (ndopts->nd_opts_done) {
631 return NULL;
632 }
633
634 nd_opt = ndopts->nd_opts_search;
635
636 /* make sure nd_opt_len is inside the buffer */
637 if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) {
638 bzero(ndopts, sizeof(*ndopts));
639 return NULL;
640 }
641
642 olen = nd_opt->nd_opt_len << 3;
643 if (olen == 0) {
644 /*
645 * Message validation requires that all included
646 * options have a length that is greater than zero.
647 */
648 bzero(ndopts, sizeof(*ndopts));
649 return NULL;
650 }
651
652 ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen);
653 if (ndopts->nd_opts_search > ndopts->nd_opts_last) {
654 /* option overruns the end of buffer, invalid */
655 bzero(ndopts, sizeof(*ndopts));
656 return NULL;
657 } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) {
658 /* reached the end of options chain */
659 ndopts->nd_opts_done = 1;
660 ndopts->nd_opts_search = NULL;
661 }
662 return nd_opt;
663}
664
665/*
666 * Parse multiple ND options.
667 * This function is much easier to use, for ND routines that do not need
668 * multiple options of the same type.
669 */
670int
671nd6_options(union nd_opts *ndopts)
672{
673 struct nd_opt_hdr *nd_opt;
674 int i = 0;
675
676 if (ndopts == NULL) {
677 panic("ndopts == NULL in nd6_options");
678 }
679 if (ndopts->nd_opts_last == NULL) {
680 panic("uninitialized ndopts in nd6_options");
681 }
682 if (ndopts->nd_opts_search == NULL) {
683 return 0;
684 }
685
686 while (1) {
687 nd_opt = nd6_option(ndopts);
688 if (nd_opt == NULL && ndopts->nd_opts_last == NULL) {
689 /*
690 * Message validation requires that all included
691 * options have a length that is greater than zero.
692 */
693 icmp6stat.icp6s_nd_badopt++;
694 bzero(ndopts, sizeof(*ndopts));
695 return -1;
696 }
697
698 if (nd_opt == NULL) {
699 goto skip1;
700 }
701
702 switch (nd_opt->nd_opt_type) {
703 case ND_OPT_SOURCE_LINKADDR:
704 case ND_OPT_TARGET_LINKADDR:
705 case ND_OPT_MTU:
706 case ND_OPT_REDIRECTED_HEADER:
707 case ND_OPT_NONCE:
708 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
709 nd6log(error,
710 "duplicated ND6 option found (type=%d)\n",
711 nd_opt->nd_opt_type);
712 /* XXX bark? */
713 } else {
714 ndopts->nd_opt_array[nd_opt->nd_opt_type] =
715 nd_opt;
716 }
717 break;
718 case ND_OPT_PREFIX_INFORMATION:
719 if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
720 ndopts->nd_opt_array[nd_opt->nd_opt_type] =
721 nd_opt;
722 }
723 ndopts->nd_opts_pi_end =
724 (struct nd_opt_prefix_info *)nd_opt;
725 break;
726 case ND_OPT_RDNSS:
727 case ND_OPT_DNSSL:
728 case ND_OPT_CAPTIVE_PORTAL:
729 /* ignore */
730 break;
731 case ND_OPT_ROUTE_INFO:
732 if (nd6_process_rti) {
733 if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
734 ndopts->nd_opt_array[nd_opt->nd_opt_type]
735 = nd_opt;
736 }
737 ndopts->nd_opts_rti_end =
738 (struct nd_opt_route_info *)nd_opt;
739 break;
740 }
741 OS_FALLTHROUGH;
742 default:
743 /*
744 * Unknown options must be silently ignored,
745 * to accomodate future extension to the protocol.
746 */
747 nd6log(debug,
748 "nd6_options: unsupported option %d - "
749 "option ignored\n", nd_opt->nd_opt_type);
750 }
751
752skip1:
753 i++;
754 if (i > nd6_maxndopt) {
755 icmp6stat.icp6s_nd_toomanyopt++;
756 nd6log(info, "too many loop in nd opt\n");
757 break;
758 }
759
760 if (ndopts->nd_opts_done) {
761 break;
762 }
763 }
764
765 return 0;
766}
767
768struct nd6svc_arg {
769 int draining;
770 uint32_t killed;
771 uint32_t aging_lazy;
772 uint32_t aging;
773 uint32_t sticky;
774 uint32_t found;
775};
776
777
778static void
779nd6_service_neighbor_cache(struct nd6svc_arg *ap, uint64_t timenow)
780{
781 struct llinfo_nd6 *ln;
782 struct ifnet *ifp = NULL;
783 boolean_t send_nc_failure_kev = FALSE;
784 struct radix_node_head *rnh = rt_tables[AF_INET6];
785
786 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
787again:
788 /*
789 * send_nc_failure_kev gets set when default router's IPv6 address
790 * can't be resolved.
791 * That can happen either:
792 * 1. When the entry has resolved once but can't be
793 * resolved later and the neighbor cache entry for gateway is deleted
794 * after max probe attempts.
795 *
796 * 2. When the entry is in ND6_LLINFO_INCOMPLETE but can not be resolved
797 * after max neighbor address resolution attempts.
798 *
799 * Both set send_nc_failure_kev to true. ifp is also set to the previous
800 * neighbor cache entry's route's ifp.
801 * Once we are done sending the notification, set send_nc_failure_kev
802 * to false to stop sending false notifications for non default router
803 * neighbors.
804 *
805 * We may to send more information like Gateway's IP that could not be
806 * resolved, however right now we do not install more than one default
807 * route per interface in the routing table.
808 */
809 if (send_nc_failure_kev && ifp != NULL &&
810 ifp->if_addrlen == IF_LLREACH_MAXLEN) {
811 struct kev_msg ev_msg;
812 struct kev_nd6_ndfailure nd6_ndfailure;
813 bzero(&ev_msg, sizeof(ev_msg));
814 bzero(&nd6_ndfailure, sizeof(nd6_ndfailure));
815 ev_msg.vendor_code = KEV_VENDOR_APPLE;
816 ev_msg.kev_class = KEV_NETWORK_CLASS;
817 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
818 ev_msg.event_code = KEV_ND6_NDFAILURE;
819
820 nd6_ndfailure.link_data.if_family = ifp->if_family;
821 nd6_ndfailure.link_data.if_unit = ifp->if_unit;
822 strlcpy(nd6_ndfailure.link_data.if_name,
823 ifp->if_name,
824 sizeof(nd6_ndfailure.link_data.if_name));
825 ev_msg.dv[0].data_ptr = &nd6_ndfailure;
826 ev_msg.dv[0].data_length =
827 sizeof(nd6_ndfailure);
828 dlil_post_complete_msg(NULL, &ev_msg);
829 }
830
831 send_nc_failure_kev = FALSE;
832 ifp = NULL;
833 /*
834 * The global list llinfo_nd6 is modified by nd6_request() and is
835 * therefore protected by rnh_lock. For obvious reasons, we cannot
836 * hold rnh_lock across calls that might lead to code paths which
837 * attempt to acquire rnh_lock, else we deadlock. Hence for such
838 * cases we drop rt_lock and rnh_lock, make the calls, and repeat the
839 * loop. To ensure that we don't process the same entry more than
840 * once in a single timeout, we mark the "already-seen" entries with
841 * ND6_LNF_TIMER_SKIP flag. At the end of the loop, we do a second
842 * pass thru the entries and clear the flag so they can be processed
843 * during the next timeout.
844 */
845 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
846
847 ln = llinfo_nd6.ln_next;
848 while (ln != NULL && ln != &llinfo_nd6) {
849 struct rtentry *rt;
850 struct sockaddr_in6 *dst;
851 struct llinfo_nd6 *next;
852 u_int32_t retrans, flags;
853 struct nd_ifinfo *ndi = NULL;
854 boolean_t is_router = FALSE;
855
856 /* ln_next/prev/rt is protected by rnh_lock */
857 next = ln->ln_next;
858 rt = ln->ln_rt;
859 RT_LOCK(rt);
860
861 /* We've seen this already; skip it */
862 if (ln->ln_flags & ND6_LNF_TIMER_SKIP) {
863 RT_UNLOCK(rt);
864 ln = next;
865 continue;
866 }
867 ap->found++;
868
869 /* rt->rt_ifp should never be NULL */
870 if ((ifp = rt->rt_ifp) == NULL) {
871 panic("%s: ln(%p) rt(%p) rt_ifp == NULL", __func__,
872 ln, rt);
873 /* NOTREACHED */
874 }
875
876 /* rt_llinfo must always be equal to ln */
877 if ((struct llinfo_nd6 *)rt->rt_llinfo != ln) {
878 panic("%s: rt_llinfo(%p) is not equal to ln(%p)",
879 __func__, rt->rt_llinfo, ln);
880 /* NOTREACHED */
881 }
882
883 /* rt_key should never be NULL */
884 dst = SIN6(rt_key(rt));
885 if (dst == NULL) {
886 panic("%s: rt(%p) key is NULL ln(%p)", __func__,
887 rt, ln);
888 /* NOTREACHED */
889 }
890
891 /* Set the flag in case we jump to "again" */
892 ln->ln_flags |= ND6_LNF_TIMER_SKIP;
893
894 if (ln->ln_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
895 ap->sticky++;
896 } else if (ap->draining && (rt->rt_refcnt == 0)) {
897 /*
898 * If we are draining, immediately purge non-static
899 * entries without oustanding route refcnt.
900 */
901 if (ln->ln_state > ND6_LLINFO_INCOMPLETE) {
902 ND6_CACHE_STATE_TRANSITION(ln, (short)ND6_LLINFO_STALE);
903 } else {
904 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PURGE);
905 }
906 ln_setexpire(ln, timenow);
907 }
908
909 /*
910 * If the entry has not expired, skip it. Take note on the
911 * state, as entries that are in the STALE state are simply
912 * waiting to be garbage collected, in which case we can
913 * relax the callout scheduling (use nd6_prune_lazy).
914 */
915 if (ln->ln_expire > timenow) {
916 switch (ln->ln_state) {
917 case ND6_LLINFO_STALE:
918 ap->aging_lazy++;
919 break;
920 default:
921 ap->aging++;
922 break;
923 }
924 RT_UNLOCK(rt);
925 ln = next;
926 continue;
927 }
928
929 ndi = ND_IFINFO(ifp);
930 VERIFY(ndi->initialized);
931 retrans = ndi->retrans;
932 flags = ndi->flags;
933
934 RT_LOCK_ASSERT_HELD(rt);
935 is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
936
937 switch (ln->ln_state) {
938 case ND6_LLINFO_INCOMPLETE:
939 if (ln->ln_asked < nd6_mmaxtries) {
940 struct ifnet *exclifp = ln->ln_exclifp;
941 ln->ln_asked++;
942 ln_setexpire(ln, timenow + retrans / 1000);
943 RT_ADDREF_LOCKED(rt);
944 RT_UNLOCK(rt);
945 lck_mtx_unlock(rnh_lock);
946 if (ip6_forwarding) {
947 nd6_prproxy_ns_output(ifp, exclifp,
948 NULL, &dst->sin6_addr, ln);
949 } else {
950 nd6_ns_output(ifp, NULL,
951 &dst->sin6_addr, ln, NULL);
952 }
953 RT_REMREF(rt);
954 ap->aging++;
955 lck_mtx_lock(rnh_lock);
956 } else {
957 struct mbuf *m = ln->ln_hold;
958 ln->ln_hold = NULL;
959 send_nc_failure_kev = is_router;
960 if (m != NULL) {
961 RT_ADDREF_LOCKED(rt);
962 RT_UNLOCK(rt);
963 lck_mtx_unlock(rnh_lock);
964
965 struct mbuf *mnext;
966 while (m) {
967 mnext = m->m_nextpkt;
968 m->m_nextpkt = NULL;
969 m->m_pkthdr.rcvif = ifp;
970 icmp6_error_flag(m, ICMP6_DST_UNREACH,
971 ICMP6_DST_UNREACH_ADDR, 0, 0);
972 m = mnext;
973 }
974 } else {
975 RT_ADDREF_LOCKED(rt);
976 RT_UNLOCK(rt);
977 lck_mtx_unlock(rnh_lock);
978 }
979
980 /*
981 * Enqueue work item to invoke callback for
982 * this route entry
983 */
984 route_event_enqueue_nwk_wq_entry(rt, NULL,
985 ROUTE_LLENTRY_UNREACH, NULL, FALSE);
986 nd6_free(rt);
987 ap->killed++;
988 lck_mtx_lock(rnh_lock);
989 /*
990 * nd6_free above would flush out the routing table of
991 * any cloned routes with same next-hop.
992 * Walk the tree anyways as there could be static routes
993 * left.
994 *
995 * We also already have a reference to rt that gets freed right
996 * after the block below executes. Don't need an extra reference
997 * on rt here.
998 */
999 if (is_router) {
1000 struct route_event rt_ev;
1001 route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_UNREACH);
1002 (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
1003 }
1004 rtfree_locked(rt);
1005 }
1006 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1007 goto again;
1008
1009 case ND6_LLINFO_REACHABLE:
1010 if (ln->ln_expire != 0) {
1011 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
1012 ln_setexpire(ln, timenow + nd6_gctimer);
1013 ap->aging_lazy++;
1014 /*
1015 * Enqueue work item to invoke callback for
1016 * this route entry
1017 */
1018 route_event_enqueue_nwk_wq_entry(rt, NULL,
1019 ROUTE_LLENTRY_STALE, NULL, TRUE);
1020
1021 RT_ADDREF_LOCKED(rt);
1022 RT_UNLOCK(rt);
1023 if (is_router) {
1024 struct route_event rt_ev;
1025 route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_STALE);
1026 (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev);
1027 }
1028 rtfree_locked(rt);
1029 } else {
1030 RT_UNLOCK(rt);
1031 }
1032 break;
1033
1034 case ND6_LLINFO_STALE:
1035 case ND6_LLINFO_PURGE:
1036 /* Garbage Collection(RFC 4861 5.3) */
1037 if (ln->ln_expire != 0) {
1038 RT_ADDREF_LOCKED(rt);
1039 RT_UNLOCK(rt);
1040 lck_mtx_unlock(rnh_lock);
1041 nd6_free(rt);
1042 ap->killed++;
1043 lck_mtx_lock(rnh_lock);
1044 rtfree_locked(rt);
1045 goto again;
1046 } else {
1047 RT_UNLOCK(rt);
1048 }
1049 break;
1050
1051 case ND6_LLINFO_DELAY:
1052 if ((flags & ND6_IFF_PERFORMNUD) != 0) {
1053 /* We need NUD */
1054 ln->ln_asked = 1;
1055 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PROBE);
1056 ln_setexpire(ln, timenow + retrans / 1000);
1057 RT_ADDREF_LOCKED(rt);
1058 RT_UNLOCK(rt);
1059 lck_mtx_unlock(rnh_lock);
1060 nd6_ns_output(ifp, &dst->sin6_addr,
1061 &dst->sin6_addr, ln, NULL);
1062 RT_REMREF(rt);
1063 ap->aging++;
1064 lck_mtx_lock(rnh_lock);
1065 goto again;
1066 }
1067 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); /* XXX */
1068 ln_setexpire(ln, timenow + nd6_gctimer);
1069 RT_UNLOCK(rt);
1070 ap->aging_lazy++;
1071 break;
1072
1073 case ND6_LLINFO_PROBE:
1074 if (ln->ln_asked < nd6_umaxtries) {
1075 ln->ln_asked++;
1076 ln_setexpire(ln, timenow + retrans / 1000);
1077 RT_ADDREF_LOCKED(rt);
1078 RT_UNLOCK(rt);
1079 lck_mtx_unlock(rnh_lock);
1080 nd6_ns_output(ifp, &dst->sin6_addr,
1081 &dst->sin6_addr, ln, NULL);
1082 RT_REMREF(rt);
1083 ap->aging++;
1084 lck_mtx_lock(rnh_lock);
1085 } else {
1086 is_router = (rt->rt_flags & RTF_ROUTER) ? TRUE : FALSE;
1087 send_nc_failure_kev = is_router;
1088 RT_ADDREF_LOCKED(rt);
1089 RT_UNLOCK(rt);
1090 lck_mtx_unlock(rnh_lock);
1091 nd6_free(rt);
1092 ap->killed++;
1093
1094 /*
1095 * Enqueue work item to invoke callback for
1096 * this route entry
1097 */
1098 route_event_enqueue_nwk_wq_entry(rt, NULL,
1099 ROUTE_LLENTRY_UNREACH, NULL, FALSE);
1100
1101 lck_mtx_lock(rnh_lock);
1102 /*
1103 * nd6_free above would flush out the routing table of
1104 * any cloned routes with same next-hop.
1105 * Walk the tree anyways as there could be static routes
1106 * left.
1107 *
1108 * We also already have a reference to rt that gets freed right
1109 * after the block below executes. Don't need an extra reference
1110 * on rt here.
1111 */
1112 if (is_router) {
1113 struct route_event rt_ev;
1114 route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_UNREACH);
1115 (void) rnh->rnh_walktree(rnh,
1116 route_event_walktree, (void *)&rt_ev);
1117 }
1118 rtfree_locked(rt);
1119 }
1120 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1121 goto again;
1122
1123 default:
1124 RT_UNLOCK(rt);
1125 break;
1126 }
1127 ln = next;
1128 }
1129 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1130
1131 /* Now clear the flag from all entries */
1132 ln = llinfo_nd6.ln_next;
1133 while (ln != NULL && ln != &llinfo_nd6) {
1134 struct rtentry *rt = ln->ln_rt;
1135 struct llinfo_nd6 *next = ln->ln_next;
1136
1137 RT_LOCK_SPIN(rt);
1138 if (ln->ln_flags & ND6_LNF_TIMER_SKIP) {
1139 ln->ln_flags &= ~ND6_LNF_TIMER_SKIP;
1140 }
1141 RT_UNLOCK(rt);
1142 ln = next;
1143 }
1144}
1145
1146static void
1147nd6_service_expired_default_router(struct nd6svc_arg *ap, uint64_t timenow)
1148{
1149 struct nd_defrouter *dr = NULL;
1150 struct nd_defrouter *ndr = NULL;
1151 struct nd_drhead nd_defrouter_tmp;
1152 /* expire default router list */
1153 TAILQ_INIT(&nd_defrouter_tmp);
1154
1155 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1156 lck_mtx_lock(nd6_mutex);
1157
1158 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_list, dr_entry, ndr) {
1159 ap->found++;
1160 if (dr->expire != 0 && dr->expire < timenow) {
1161 VERIFY(dr->ifp != NULL);
1162 in6_ifstat_inc(dr->ifp, ifs6_defrtr_expiry_cnt);
1163 if ((dr->stateflags & NDDRF_INELIGIBLE) == 0) {
1164 in6_event_enqueue_nwk_wq_entry(IN6_NDP_RTR_EXPIRY, dr->ifp,
1165 &dr->rtaddr, dr->rtlifetime);
1166 }
1167 if (dr->ifp != NULL &&
1168 dr->ifp->if_type == IFT_CELLULAR) {
1169 /*
1170 * Some buggy cellular gateways may not send
1171 * periodic router advertisements.
1172 * Or they may send it with router lifetime
1173 * value that is less than the configured Max and Min
1174 * Router Advertisement interval.
1175 * To top that an idle device may not wake up
1176 * when periodic RA is received on cellular
1177 * interface.
1178 * We could send RS on every wake but RFC
1179 * 4861 precludes that.
1180 * The addresses are of infinite lifetimes
1181 * and are tied to the lifetime of the bearer,
1182 * so keeping the addresses and just getting rid of
1183 * the router does not help us anyways.
1184 * If there's network renumbering, a lifetime with
1185 * value 0 would remove the default router.
1186 * Also it will get deleted as part of purge when
1187 * the PDP context is torn down and configured again.
1188 * For that reason, do not expire the default router
1189 * learned on cellular interface. Ever.
1190 */
1191 dr->expire += dr->rtlifetime;
1192 nd6log2(debug,
1193 "%s: Refreshing expired default router entry "
1194 "%s for interface %s\n", __func__,
1195 ip6_sprintf(&dr->rtaddr), if_name(dr->ifp));
1196 } else {
1197 ap->killed++;
1198 /*
1199 * Remove the entry from default router list
1200 * and add it to the temp list.
1201 * nd_defrouter_tmp will be a local temporary
1202 * list as no one else can get the same
1203 * removed entry once it is removed from default
1204 * router list.
1205 * Remove the reference after calling defrtrlist_del
1206 */
1207 TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry);
1208 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
1209 }
1210 } else {
1211 if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC)) {
1212 ap->sticky++;
1213 } else {
1214 ap->aging_lazy++;
1215 }
1216 }
1217 }
1218
1219 /*
1220 * Keep the following separate from the above
1221 * iteration of nd_defrouter because it's not safe
1222 * to call defrtrlist_del while iterating global default
1223 * router list. Global list has to be traversed
1224 * while holding nd6_mutex throughout.
1225 *
1226 * The following call to defrtrlist_del should be
1227 * safe as we are iterating a local list of
1228 * default routers.
1229 */
1230 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) {
1231 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
1232 defrtrlist_del(dr, NULL);
1233 NDDR_REMREF(dr); /* remove list reference */
1234 }
1235
1236 /* XXX TBD: Also iterate through RTI router lists */
1237 /*
1238 * Also check if default router selection needs to be triggered
1239 * for default interface, to avoid an issue with co-existence of
1240 * static un-scoped default route configuration and default router
1241 * discovery/selection.
1242 */
1243 if (trigger_v6_defrtr_select) {
1244 defrouter_select(NULL, NULL);
1245 trigger_v6_defrtr_select = FALSE;
1246 }
1247 lck_mtx_unlock(nd6_mutex);
1248}
1249
1250static void
1251nd6_service_expired_route_info(struct nd6svc_arg *ap, uint64_t timenow)
1252{
1253 struct nd_route_info *rti = NULL;
1254 struct nd_route_info *rti_next = NULL;
1255
1256 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1257 lck_mtx_lock(nd6_mutex);
1258 nd6_rti_list_wait(__func__);
1259
1260 TAILQ_FOREACH_SAFE(rti, &nd_rti_list, nd_rti_entry, rti_next) {
1261 struct nd_defrouter *dr = NULL;
1262 struct nd_defrouter *ndr = NULL;
1263 struct nd_route_info rti_tmp = {};
1264
1265 rti_tmp.nd_rti_prefix = rti->nd_rti_prefix;
1266 rti_tmp.nd_rti_prefixlen = rti->nd_rti_prefixlen;
1267 TAILQ_INIT(&rti_tmp.nd_rti_router_list);
1268
1269 TAILQ_FOREACH_SAFE(dr, &rti->nd_rti_router_list, dr_entry, ndr) {
1270 ap->found++;
1271 if (dr->expire != 0 && dr->expire < timenow) {
1272 VERIFY(dr->ifp != NULL);
1273 if (dr->ifp != NULL &&
1274 dr->ifp->if_type == IFT_CELLULAR) {
1275 /*
1276 * Don't expire these routes over cellular.
1277 * XXX Should we change this for non default routes?
1278 */
1279 dr->expire += dr->rtlifetime;
1280 nd6log2(debug,
1281 "%s: Refreshing expired default router entry "
1282 "%s for interface %s\n", __func__,
1283 ip6_sprintf(&dr->rtaddr), if_name(dr->ifp));
1284 } else {
1285 ap->killed++;
1286 /*
1287 * Remove the entry from rti entry's router list
1288 * and add it to the temp list.
1289 * Remove the reference after calling defrtrlist_del
1290 */
1291 TAILQ_REMOVE(&rti->nd_rti_router_list, dr, dr_entry);
1292 TAILQ_INSERT_TAIL(&rti_tmp.nd_rti_router_list, dr, dr_entry);
1293 }
1294 } else {
1295 if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC)) {
1296 ap->sticky++;
1297 } else {
1298 ap->aging_lazy++;
1299 }
1300 }
1301 }
1302
1303 /*
1304 * Keep the following separate from the above
1305 * iteration of nd_defrouter because it's not safe
1306 * to call defrtrlist_del while iterating global default
1307 * router list. Global list has to be traversed
1308 * while holding nd6_mutex throughout.
1309 *
1310 * The following call to defrtrlist_del should be
1311 * safe as we are iterating a local list of
1312 * default routers.
1313 */
1314 TAILQ_FOREACH_SAFE(dr, &rti_tmp.nd_rti_router_list, dr_entry, ndr) {
1315 TAILQ_REMOVE(&rti_tmp.nd_rti_router_list, dr, dr_entry);
1316 defrtrlist_del(dr, &rti->nd_rti_router_list);
1317 NDDR_REMREF(dr); /* remove list reference */
1318 }
1319
1320 /*
1321 * The above may have removed an entry from default router list.
1322 * If it did and the list is now empty, remove the rti as well.
1323 */
1324 if (TAILQ_EMPTY(&rti->nd_rti_router_list)) {
1325 TAILQ_REMOVE(&nd_rti_list, rti, nd_rti_entry);
1326 ndrti_free(rti);
1327 }
1328 }
1329
1330 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
1331 nd6_rti_list_signal_done();
1332 lck_mtx_unlock(nd6_mutex);
1333}
1334
1335
1336/*
1337 * @function nd6_handle_duplicated_ip6_addr
1338 *
1339 * @brief
1340 * Handle a duplicated IPv6 secured non-termporary address
1341 *
1342 * @discussion
1343 * If the collision count hasn't been exceeded, removes the old
1344 * conflicting IPv6 address, increments the collision count,
1345 * and allocates a new address.
1346 *
1347 * Returns TRUE if the old address was removed, and the locks
1348 * (in6_ifaddr_rwlock, ia6->ia_ifa) were unlocked.
1349 */
1350static boolean_t
1351nd6_handle_duplicated_ip6_addr(struct in6_ifaddr *ia6)
1352{
1353 uint8_t collision_count;
1354 int error = 0;
1355 struct in6_ifaddr *new_ia6;
1356 struct nd_prefix *pr;
1357 struct ifnet *ifp;
1358
1359 LCK_RW_ASSERT(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
1360 IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa);
1361
1362 /* don't retry too many times */
1363 collision_count = ia6->ia6_cga_collision_count;
1364 if (collision_count >= ip6_cga_conflict_retries) {
1365 return FALSE;
1366 }
1367
1368 /* need the prefix to allocate a new address */
1369 pr = ia6->ia6_ndpr;
1370 if (pr == NULL) {
1371 return FALSE;
1372 }
1373 NDPR_ADDREF(pr);
1374 ifp = pr->ndpr_ifp;
1375 log(LOG_DEBUG,
1376 "%s: %s duplicated (collision count %d)\n",
1377 ifp->if_xname, ip6_sprintf(&ia6->ia_addr.sin6_addr),
1378 collision_count);
1379
1380 /* remove the old address */
1381 IFA_UNLOCK(&ia6->ia_ifa);
1382 lck_rw_done(&in6_ifaddr_rwlock);
1383 in6_purgeaddr(&ia6->ia_ifa);
1384
1385 /* allocate a new address with new collision count */
1386 collision_count++;
1387 new_ia6 = in6_pfx_newpersistaddr(pr, 1, &error, FALSE, collision_count);
1388 if (new_ia6 != NULL) {
1389 log(LOG_DEBUG,
1390 "%s: %s new (collision count %d)\n",
1391 ifp->if_xname, ip6_sprintf(&new_ia6->ia_addr.sin6_addr),
1392 collision_count);
1393 IFA_LOCK(&new_ia6->ia_ifa);
1394 NDPR_LOCK(pr);
1395 new_ia6->ia6_ndpr = pr;
1396 NDPR_ADDREF(pr); /* for addr reference */
1397 pr->ndpr_addrcnt++;
1398 VERIFY(pr->ndpr_addrcnt != 0);
1399 NDPR_UNLOCK(pr);
1400 IFA_UNLOCK(&new_ia6->ia_ifa);
1401 IFA_REMREF(&new_ia6->ia_ifa);
1402 } else {
1403 log(LOG_ERR, "%s: in6_pfx_newpersistaddr failed %d\n",
1404 __func__, error);
1405 }
1406
1407 /* release extra prefix reference */
1408 NDPR_REMREF(pr);
1409 return TRUE;
1410}
1411
1412static boolean_t
1413secured_address_is_duplicated(int flags)
1414{
1415#define _IN6_IFF_DUPLICATED_AUTOCONF_SECURED \
1416 (IN6_IFF_DUPLICATED | IN6_IFF_AUTOCONF | IN6_IFF_SECURED)
1417 return (flags & _IN6_IFF_DUPLICATED_AUTOCONF_SECURED) ==
1418 _IN6_IFF_DUPLICATED_AUTOCONF_SECURED;
1419}
1420
1421static void
1422nd6_service_ip6_addr(struct nd6svc_arg *ap, uint64_t timenow)
1423{
1424 struct in6_ifaddr *ia6 = NULL;
1425 struct in6_ifaddr *nia6 = NULL;
1426 /*
1427 * expire interface addresses.
1428 * in the past the loop was inside prefix expiry processing.
1429 * However, from a stricter spec-conformance standpoint, we should
1430 * rather separate address lifetimes and prefix lifetimes.
1431 */
1432
1433addrloop:
1434 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
1435
1436 TAILQ_FOREACH_SAFE(ia6, &in6_ifaddrhead, ia6_link, nia6) {
1437 int oldflags = ia6->ia6_flags;
1438 ap->found++;
1439 IFA_LOCK(&ia6->ia_ifa);
1440 /*
1441 * Extra reference for ourselves; it's no-op if
1442 * we don't have to regenerate temporary address,
1443 * otherwise it protects the address from going
1444 * away since we drop in6_ifaddr_rwlock below.
1445 */
1446 IFA_ADDREF_LOCKED(&ia6->ia_ifa);
1447
1448 /* check for duplicated secured address */
1449 if (secured_address_is_duplicated(ia6->ia6_flags) &&
1450 nd6_handle_duplicated_ip6_addr(ia6)) {
1451 /*
1452 * nd6_handle_duplicated_ip6_addr() unlocked
1453 * (in6_ifaddr_rwlock, ia6->ia_ifa) already.
1454 * Still need to release extra reference on
1455 * ia6->ia_ifa taken above.
1456 */
1457 IFA_REMREF(&ia6->ia_ifa);
1458 goto addrloop;
1459 }
1460
1461 /* check address lifetime */
1462 if (IFA6_IS_INVALID(ia6, timenow)) {
1463 /*
1464 * If the expiring address is temporary, try
1465 * regenerating a new one. This would be useful when
1466 * we suspended a laptop PC, then turned it on after a
1467 * period that could invalidate all temporary
1468 * addresses. Although we may have to restart the
1469 * loop (see below), it must be after purging the
1470 * address. Otherwise, we'd see an infinite loop of
1471 * regeneration.
1472 */
1473 if (ip6_use_tempaddr &&
1474 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
1475 /*
1476 * NOTE: We have to drop the lock here
1477 * because regen_tmpaddr() eventually calls
1478 * in6_update_ifa(), which must take the lock
1479 * and would otherwise cause a hang. This is
1480 * safe because the goto addrloop leads to a
1481 * re-evaluation of the in6_ifaddrs list
1482 */
1483 IFA_UNLOCK(&ia6->ia_ifa);
1484 lck_rw_done(&in6_ifaddr_rwlock);
1485 (void) regen_tmpaddr(ia6);
1486 } else {
1487 IFA_UNLOCK(&ia6->ia_ifa);
1488 lck_rw_done(&in6_ifaddr_rwlock);
1489 }
1490
1491 /*
1492 * Purging the address would have caused
1493 * in6_ifaddr_rwlock to be dropped and reacquired;
1494 * therefore search again from the beginning
1495 * of in6_ifaddrs list.
1496 */
1497 in6_purgeaddr(&ia6->ia_ifa);
1498 ap->killed++;
1499
1500 if ((ia6->ia6_flags & IN6_IFF_TEMPORARY) == 0) {
1501 in6_ifstat_inc(ia6->ia_ifa.ifa_ifp, ifs6_addr_expiry_cnt);
1502 in6_event_enqueue_nwk_wq_entry(IN6_NDP_ADDR_EXPIRY,
1503 ia6->ia_ifa.ifa_ifp, &ia6->ia_addr.sin6_addr,
1504 0);
1505 }
1506 /* Release extra reference taken above */
1507 IFA_REMREF(&ia6->ia_ifa);
1508 goto addrloop;
1509 }
1510 /*
1511 * The lazy timer runs every nd6_prune_lazy seconds with at
1512 * most "2 * nd6_prune_lazy - 1" leeway. We consider the worst
1513 * case here and make sure we schedule the regular timer if an
1514 * interface address is about to expire.
1515 */
1516 if (IFA6_IS_INVALID(ia6, timenow + 3 * nd6_prune_lazy)) {
1517 ap->aging++;
1518 } else {
1519 ap->aging_lazy++;
1520 }
1521 IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa);
1522 if (IFA6_IS_DEPRECATED(ia6, timenow)) {
1523 ia6->ia6_flags |= IN6_IFF_DEPRECATED;
1524
1525 if ((oldflags & IN6_IFF_DEPRECATED) == 0) {
1526 /*
1527 * Only enqueue the Deprecated event when the address just
1528 * becomes deprecated.
1529 * Keep it limited to the stable address as it is common for
1530 * older temporary addresses to get deprecated while we generate
1531 * new ones.
1532 */
1533 if ((ia6->ia6_flags & IN6_IFF_TEMPORARY) == 0) {
1534 in6_event_enqueue_nwk_wq_entry(IN6_ADDR_MARKED_DEPRECATED,
1535 ia6->ia_ifa.ifa_ifp, &ia6->ia_addr.sin6_addr,
1536 0);
1537 }
1538 }
1539 /*
1540 * If a temporary address has just become deprecated,
1541 * regenerate a new one if possible.
1542 */
1543 if (ip6_use_tempaddr &&
1544 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
1545 (oldflags & IN6_IFF_DEPRECATED) == 0) {
1546 /* see NOTE above */
1547 IFA_UNLOCK(&ia6->ia_ifa);
1548 lck_rw_done(&in6_ifaddr_rwlock);
1549 if (regen_tmpaddr(ia6) == 0) {
1550 /*
1551 * A new temporary address is
1552 * generated.
1553 * XXX: this means the address chain
1554 * has changed while we are still in
1555 * the loop. Although the change
1556 * would not cause disaster (because
1557 * it's not a deletion, but an
1558 * addition,) we'd rather restart the
1559 * loop just for safety. Or does this
1560 * significantly reduce performance??
1561 */
1562 /* Release extra reference */
1563 IFA_REMREF(&ia6->ia_ifa);
1564 goto addrloop;
1565 }
1566 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
1567 } else {
1568 IFA_UNLOCK(&ia6->ia_ifa);
1569 }
1570 } else {
1571 /*
1572 * A new RA might have made a deprecated address
1573 * preferred.
1574 */
1575 ia6->ia6_flags &= ~IN6_IFF_DEPRECATED;
1576 IFA_UNLOCK(&ia6->ia_ifa);
1577 }
1578 LCK_RW_ASSERT(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
1579 /* Release extra reference taken above */
1580 IFA_REMREF(&ia6->ia_ifa);
1581 }
1582 lck_rw_done(&in6_ifaddr_rwlock);
1583}
1584
1585static void
1586nd6_service_expired_prefix(struct nd6svc_arg *ap, uint64_t timenow)
1587{
1588 struct nd_prefix *pr = NULL;
1589
1590 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1591 lck_mtx_lock(nd6_mutex);
1592 /* expire prefix list */
1593 pr = nd_prefix.lh_first;
1594 while (pr != NULL) {
1595 ap->found++;
1596 /*
1597 * check prefix lifetime.
1598 * since pltime is just for autoconf, pltime processing for
1599 * prefix is not necessary.
1600 */
1601 NDPR_LOCK(pr);
1602 if (pr->ndpr_stateflags & NDPRF_PROCESSED_SERVICE ||
1603 pr->ndpr_stateflags & NDPRF_DEFUNCT) {
1604 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
1605 NDPR_UNLOCK(pr);
1606 pr = pr->ndpr_next;
1607 continue;
1608 }
1609 if (pr->ndpr_expire != 0 && pr->ndpr_expire < timenow) {
1610 /*
1611 * address expiration and prefix expiration are
1612 * separate. NEVER perform in6_purgeaddr here.
1613 */
1614 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
1615 NDPR_ADDREF(pr);
1616 prelist_remove(pr);
1617 NDPR_UNLOCK(pr);
1618
1619 in6_ifstat_inc(pr->ndpr_ifp, ifs6_pfx_expiry_cnt);
1620 in6_event_enqueue_nwk_wq_entry(IN6_NDP_PFX_EXPIRY,
1621 pr->ndpr_ifp, &pr->ndpr_prefix.sin6_addr,
1622 0);
1623 NDPR_REMREF(pr);
1624 pfxlist_onlink_check();
1625 pr = nd_prefix.lh_first;
1626 ap->killed++;
1627 } else {
1628 if (pr->ndpr_expire == 0 ||
1629 (pr->ndpr_stateflags & NDPRF_STATIC)) {
1630 ap->sticky++;
1631 } else {
1632 ap->aging_lazy++;
1633 }
1634 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
1635 NDPR_UNLOCK(pr);
1636 pr = pr->ndpr_next;
1637 }
1638 }
1639 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
1640 NDPR_LOCK(pr);
1641 pr->ndpr_stateflags &= ~NDPRF_PROCESSED_SERVICE;
1642 NDPR_UNLOCK(pr);
1643 }
1644 lck_mtx_unlock(nd6_mutex);
1645}
1646
1647
1648/*
1649 * ND6 service routine to expire default route list and prefix list
1650 */
1651static void
1652nd6_service(void *arg)
1653{
1654 struct nd6svc_arg *ap = arg;
1655 uint64_t timenow;
1656
1657 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1658 /*
1659 * Since we may drop rnh_lock and nd6_mutex below, we want
1660 * to run this entire operation single threaded.
1661 */
1662 while (nd6_service_busy) {
1663 nd6log2(debug, "%s: %s is blocked by %d waiters\n",
1664 __func__, ap->draining ? "drainer" : "timer",
1665 nd6_service_waiters);
1666 nd6_service_waiters++;
1667 (void) msleep(nd6_service_wc, rnh_lock, (PZERO - 1),
1668 __func__, NULL);
1669 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1670 }
1671
1672 /* We are busy now; tell everyone else to go away */
1673 nd6_service_busy = TRUE;
1674 net_update_uptime();
1675 timenow = net_uptime();
1676
1677 /* Iterate and service neighbor cache entries */
1678 nd6_service_neighbor_cache(ap, timenow);
1679
1680 /*
1681 * There is lock ordering requirement and rnh_lock
1682 * has to be released before acquiring nd6_mutex.
1683 */
1684 lck_mtx_unlock(rnh_lock);
1685
1686 /* Iterate and service expired default router */
1687 nd6_service_expired_default_router(ap, timenow);
1688 /* Iterate and service expired route information entries */
1689 nd6_service_expired_route_info(ap, timenow);
1690
1691 /* Iterate and service expired/duplicated IPv6 address */
1692 nd6_service_ip6_addr(ap, timenow);
1693
1694 /* Iterate and service expired IPv6 prefixes */
1695 nd6_service_expired_prefix(ap, timenow);
1696
1697 lck_mtx_lock(rnh_lock);
1698 /* We're done; let others enter */
1699 nd6_service_busy = FALSE;
1700 if (nd6_service_waiters > 0) {
1701 nd6_service_waiters = 0;
1702 wakeup(nd6_service_wc);
1703 }
1704}
1705
1706static int nd6_need_draining = 0;
1707
1708void
1709nd6_drain(void *arg)
1710{
1711#pragma unused(arg)
1712 nd6log2(debug, "%s: draining ND6 entries\n", __func__);
1713
1714 lck_mtx_lock(rnh_lock);
1715 nd6_need_draining = 1;
1716 nd6_sched_timeout(NULL, NULL);
1717 lck_mtx_unlock(rnh_lock);
1718}
1719
1720/*
1721 * We use the ``arg'' variable to decide whether or not the timer we're
1722 * running is the fast timer. We do this to reset the nd6_fast_timer_on
1723 * variable so that later we don't end up ignoring a ``fast timer''
1724 * request if the 5 second timer is running (see nd6_sched_timeout).
1725 */
1726static void
1727nd6_timeout(void *arg)
1728{
1729 struct nd6svc_arg sarg;
1730 uint32_t buf;
1731
1732 lck_mtx_lock(rnh_lock);
1733 bzero(&sarg, sizeof(sarg));
1734 if (nd6_need_draining != 0) {
1735 nd6_need_draining = 0;
1736 sarg.draining = 1;
1737 }
1738 nd6_service(&sarg);
1739 nd6log2(debug, "%s: found %u, aging_lazy %u, aging %u, "
1740 "sticky %u, killed %u\n", __func__, sarg.found, sarg.aging_lazy,
1741 sarg.aging, sarg.sticky, sarg.killed);
1742 /* re-arm the timer if there's work to do */
1743 nd6_timeout_run--;
1744 VERIFY(nd6_timeout_run >= 0 && nd6_timeout_run < 2);
1745 if (arg == &nd6_fast_timer_on) {
1746 nd6_fast_timer_on = FALSE;
1747 }
1748 if (sarg.aging_lazy > 0 || sarg.aging > 0 || nd6_sched_timeout_want) {
1749 struct timeval atv, ltv, *leeway;
1750 int lazy = nd6_prune_lazy;
1751
1752 if (sarg.aging > 0 || lazy < 1) {
1753 atv.tv_usec = 0;
1754 atv.tv_sec = nd6_prune;
1755 leeway = NULL;
1756 } else {
1757 VERIFY(lazy >= 1);
1758 atv.tv_usec = 0;
1759 atv.tv_sec = MAX(nd6_prune, lazy);
1760 ltv.tv_usec = 0;
1761 read_frandom(&buf, sizeof(buf));
1762 ltv.tv_sec = MAX(buf % lazy, 1) * 2;
1763 leeway = &ltv;
1764 }
1765 nd6_sched_timeout(&atv, leeway);
1766 } else if (nd6_debug) {
1767 nd6log2(debug, "%s: not rescheduling timer\n", __func__);
1768 }
1769 lck_mtx_unlock(rnh_lock);
1770}
1771
1772void
1773nd6_sched_timeout(struct timeval *atv, struct timeval *ltv)
1774{
1775 struct timeval tv;
1776
1777 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
1778 if (atv == NULL) {
1779 tv.tv_usec = 0;
1780 tv.tv_sec = MAX(nd6_prune, 1);
1781 atv = &tv;
1782 ltv = NULL; /* ignore leeway */
1783 }
1784 /* see comments on top of this file */
1785 if (nd6_timeout_run == 0) {
1786 if (ltv == NULL) {
1787 nd6log2(debug, "%s: timer scheduled in "
1788 "T+%llus.%lluu (demand %d)\n", __func__,
1789 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1790 nd6_sched_timeout_want);
1791 nd6_fast_timer_on = TRUE;
1792 timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
1793 } else {
1794 nd6log2(debug, "%s: timer scheduled in "
1795 "T+%llus.%lluu with %llus.%lluu leeway "
1796 "(demand %d)\n", __func__, (uint64_t)atv->tv_sec,
1797 (uint64_t)atv->tv_usec, (uint64_t)ltv->tv_sec,
1798 (uint64_t)ltv->tv_usec, nd6_sched_timeout_want);
1799 nd6_fast_timer_on = FALSE;
1800 timeout_with_leeway(nd6_timeout, NULL,
1801 tvtohz(atv), tvtohz(ltv));
1802 }
1803 nd6_timeout_run++;
1804 nd6_sched_timeout_want = 0;
1805 } else if (nd6_timeout_run == 1 && ltv == NULL &&
1806 nd6_fast_timer_on == FALSE) {
1807 nd6log2(debug, "%s: fast timer scheduled in "
1808 "T+%llus.%lluu (demand %d)\n", __func__,
1809 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1810 nd6_sched_timeout_want);
1811 nd6_fast_timer_on = TRUE;
1812 nd6_sched_timeout_want = 0;
1813 nd6_timeout_run++;
1814 timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
1815 } else {
1816 if (ltv == NULL) {
1817 nd6log2(debug, "%s: not scheduling timer: "
1818 "timers %d, fast_timer %d, T+%llus.%lluu\n",
1819 __func__, nd6_timeout_run, nd6_fast_timer_on,
1820 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
1821 } else {
1822 nd6log2(debug, "%s: not scheduling timer: "
1823 "timers %d, fast_timer %d, T+%llus.%lluu "
1824 "with %llus.%lluu leeway\n", __func__,
1825 nd6_timeout_run, nd6_fast_timer_on,
1826 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1827 (uint64_t)ltv->tv_sec, (uint64_t)ltv->tv_usec);
1828 }
1829 }
1830}
1831
1832/*
1833 * ND6 router advertisement kernel notification
1834 */
1835void
1836nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list,
1837 u_int32_t list_length, u_int32_t mtu)
1838{
1839 struct kev_msg ev_msg;
1840 struct kev_nd6_ra_data nd6_ra_msg_data;
1841 struct nd_prefix_list *itr = prefix_list;
1842
1843 bzero(&ev_msg, sizeof(struct kev_msg));
1844 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1845 ev_msg.kev_class = KEV_NETWORK_CLASS;
1846 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
1847 ev_msg.event_code = code;
1848
1849 bzero(&nd6_ra_msg_data, sizeof(nd6_ra_msg_data));
1850
1851 if (mtu > 0 && mtu >= IPV6_MMTU) {
1852 nd6_ra_msg_data.mtu = mtu;
1853 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_MTU;
1854 }
1855
1856 if (list_length > 0 && prefix_list != NULL) {
1857 nd6_ra_msg_data.list_length = list_length;
1858 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_PREFIX;
1859 }
1860
1861 while (itr != NULL && nd6_ra_msg_data.list_index < list_length) {
1862 bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix,
1863 sizeof(nd6_ra_msg_data.prefix.prefix));
1864 nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf;
1865 nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen;
1866 nd6_ra_msg_data.prefix.origin = PR_ORIG_RA;
1867 nd6_ra_msg_data.prefix.vltime = itr->pr.ndpr_vltime;
1868 nd6_ra_msg_data.prefix.pltime = itr->pr.ndpr_pltime;
1869 nd6_ra_msg_data.prefix.expire = ndpr_getexpire(&itr->pr);
1870 nd6_ra_msg_data.prefix.flags = itr->pr.ndpr_stateflags;
1871 nd6_ra_msg_data.prefix.refcnt = itr->pr.ndpr_addrcnt;
1872 nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index;
1873
1874 /* send the message up */
1875 ev_msg.dv[0].data_ptr = &nd6_ra_msg_data;
1876 ev_msg.dv[0].data_length = sizeof(nd6_ra_msg_data);
1877 ev_msg.dv[1].data_length = 0;
1878 dlil_post_complete_msg(NULL, &ev_msg);
1879
1880 /* clean up for the next prefix */
1881 bzero(&nd6_ra_msg_data.prefix, sizeof(nd6_ra_msg_data.prefix));
1882 itr = itr->next;
1883 nd6_ra_msg_data.list_index++;
1884 }
1885}
1886
1887/*
1888 * Regenerate deprecated/invalidated temporary address
1889 */
1890static int
1891regen_tmpaddr(struct in6_ifaddr *ia6)
1892{
1893 struct ifaddr *ifa;
1894 struct ifnet *ifp;
1895 struct in6_ifaddr *public_ifa6 = NULL;
1896 uint64_t timenow = net_uptime();
1897
1898 ifp = ia6->ia_ifa.ifa_ifp;
1899 ifnet_lock_shared(ifp);
1900 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
1901 struct in6_ifaddr *it6;
1902
1903 IFA_LOCK(ifa);
1904 if (ifa->ifa_addr->sa_family != AF_INET6) {
1905 IFA_UNLOCK(ifa);
1906 continue;
1907 }
1908 it6 = (struct in6_ifaddr *)ifa;
1909
1910 /* ignore no autoconf addresses. */
1911 if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
1912 IFA_UNLOCK(ifa);
1913 continue;
1914 }
1915 /* ignore autoconf addresses with different prefixes. */
1916 if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr) {
1917 IFA_UNLOCK(ifa);
1918 continue;
1919 }
1920 /*
1921 * Now we are looking at an autoconf address with the same
1922 * prefix as ours. If the address is temporary and is still
1923 * preferred, do not create another one. It would be rare, but
1924 * could happen, for example, when we resume a laptop PC after
1925 * a long period.
1926 */
1927 if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
1928 !IFA6_IS_DEPRECATED(it6, timenow)) {
1929 IFA_UNLOCK(ifa);
1930 if (public_ifa6 != NULL) {
1931 IFA_REMREF(&public_ifa6->ia_ifa);
1932 }
1933 public_ifa6 = NULL;
1934 break;
1935 }
1936
1937 /*
1938 * This is a public autoconf address that has the same prefix
1939 * as ours. If it is preferred, keep it. We can't break the
1940 * loop here, because there may be a still-preferred temporary
1941 * address with the prefix.
1942 */
1943 if (!IFA6_IS_DEPRECATED(it6, timenow)) {
1944 IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */
1945 IFA_UNLOCK(ifa);
1946 if (public_ifa6 != NULL) {
1947 IFA_REMREF(&public_ifa6->ia_ifa);
1948 }
1949 public_ifa6 = it6;
1950 } else {
1951 IFA_UNLOCK(ifa);
1952 }
1953 }
1954 ifnet_lock_done(ifp);
1955
1956 if (public_ifa6 != NULL) {
1957 int e;
1958
1959 if ((e = in6_tmpifadd(public_ifa6, 0)) != 0) {
1960 log(LOG_NOTICE, "regen_tmpaddr: failed to create a new"
1961 " tmp addr,errno=%d\n", e);
1962 IFA_REMREF(&public_ifa6->ia_ifa);
1963 return -1;
1964 }
1965 IFA_REMREF(&public_ifa6->ia_ifa);
1966 return 0;
1967 }
1968
1969 return -1;
1970}
1971
1972static void
1973nd6_purge_interface_default_routers(struct ifnet *ifp)
1974{
1975 struct nd_defrouter *dr = NULL;
1976 struct nd_defrouter *ndr = NULL;
1977 struct nd_drhead nd_defrouter_tmp = {};
1978
1979 TAILQ_INIT(&nd_defrouter_tmp);
1980
1981 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
1982
1983 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_list, dr_entry, ndr) {
1984 if (dr->ifp != ifp) {
1985 continue;
1986 }
1987 /*
1988 * Remove the entry from default router list
1989 * and add it to the temp list.
1990 * nd_defrouter_tmp will be a local temporary
1991 * list as no one else can get the same
1992 * removed entry once it is removed from default
1993 * router list.
1994 * Remove the reference after calling defrtrlist_del.
1995 *
1996 * The uninstalled entries have to be iterated first
1997 * when we call defrtrlist_del.
1998 * This is to ensure that we don't end up calling
1999 * default router selection when there are other
2000 * uninstalled candidate default routers on
2001 * the interface.
2002 * If we don't respect that order, we may end
2003 * up missing out on some entries.
2004 *
2005 * For that reason, installed ones must be inserted
2006 * at the tail and uninstalled ones at the head
2007 */
2008 TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry);
2009
2010 if (dr->stateflags & NDDRF_INSTALLED) {
2011 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
2012 } else {
2013 TAILQ_INSERT_HEAD(&nd_defrouter_tmp, dr, dr_entry);
2014 }
2015 }
2016
2017 /*
2018 * The following call to defrtrlist_del should be
2019 * safe as we are iterating a local list of
2020 * default routers.
2021 *
2022 * We don't really need nd6_mutex here but keeping
2023 * it as it is to avoid changing assertios held in
2024 * the functions in the call-path.
2025 */
2026 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) {
2027 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
2028 defrtrlist_del(dr, NULL);
2029 NDDR_REMREF(dr); /* remove list reference */
2030 }
2031}
2032
2033static void
2034nd6_purge_interface_prefixes(struct ifnet *ifp)
2035{
2036 boolean_t removed = FALSE;
2037 struct nd_prefix *pr = NULL;
2038 struct nd_prefix *npr = NULL;
2039
2040 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2041
2042 /* Nuke prefix list entries toward ifp */
2043 for (pr = nd_prefix.lh_first; pr; pr = npr) {
2044 NDPR_LOCK(pr);
2045 npr = pr->ndpr_next;
2046 if (pr->ndpr_ifp == ifp &&
2047 !(pr->ndpr_stateflags & NDPRF_DEFUNCT)) {
2048 /*
2049 * Because if_detach() does *not* release prefixes
2050 * while purging addresses the reference count will
2051 * still be above zero. We therefore reset it to
2052 * make sure that the prefix really gets purged.
2053 */
2054 pr->ndpr_addrcnt = 0;
2055
2056 /*
2057 * Previously, pr->ndpr_addr is removed as well,
2058 * but I strongly believe we don't have to do it.
2059 * nd6_purge() is only called from in6_ifdetach(),
2060 * which removes all the associated interface addresses
2061 * by itself.
2062 * (jinmei@kame.net 20010129)
2063 */
2064 NDPR_ADDREF(pr);
2065 prelist_remove(pr);
2066 NDPR_UNLOCK(pr);
2067 NDPR_REMREF(pr);
2068 removed = TRUE;
2069 npr = nd_prefix.lh_first;
2070 } else {
2071 NDPR_UNLOCK(pr);
2072 }
2073 }
2074 if (removed) {
2075 pfxlist_onlink_check();
2076 }
2077}
2078
2079static void
2080nd6_router_select_rti_entries(struct ifnet *ifp)
2081{
2082 struct nd_route_info *rti = NULL;
2083 struct nd_route_info *rti_next = NULL;
2084
2085 nd6_rti_list_wait(__func__);
2086
2087 TAILQ_FOREACH_SAFE(rti, &nd_rti_list, nd_rti_entry, rti_next) {
2088 defrouter_select(ifp, &rti->nd_rti_router_list);
2089 }
2090
2091 nd6_rti_list_signal_done();
2092}
2093
2094static void
2095nd6_purge_interface_rti_entries(struct ifnet *ifp)
2096{
2097 struct nd_route_info *rti = NULL;
2098 struct nd_route_info *rti_next = NULL;
2099
2100 nd6_rti_list_wait(__func__);
2101
2102 TAILQ_FOREACH_SAFE(rti, &nd_rti_list, nd_rti_entry, rti_next) {
2103 struct nd_route_info rti_tmp = {};
2104 struct nd_defrouter *dr = NULL;
2105 struct nd_defrouter *ndr = NULL;
2106
2107 rti_tmp.nd_rti_prefix = rti->nd_rti_prefix;
2108 rti_tmp.nd_rti_prefixlen = rti->nd_rti_prefixlen;
2109 TAILQ_INIT(&rti_tmp.nd_rti_router_list);
2110
2111 TAILQ_FOREACH_SAFE(dr, &rti->nd_rti_router_list, dr_entry, ndr) {
2112 /*
2113 * If ifp is provided, skip the entries that don't match.
2114 * Else it is treated as a purge.
2115 */
2116 if (ifp != NULL && dr->ifp != ifp) {
2117 continue;
2118 }
2119
2120 /*
2121 * Remove the entry from rti's router list
2122 * and add it to the temp list.
2123 * Remove the reference after calling defrtrlist_del.
2124 *
2125 * The uninstalled entries have to be iterated first
2126 * when we call defrtrlist_del.
2127 * This is to ensure that we don't end up calling
2128 * router selection when there are other
2129 * uninstalled candidate default routers on
2130 * the interface.
2131 * If we don't respect that order, we may end
2132 * up missing out on some entries.
2133 *
2134 * For that reason, installed ones must be inserted
2135 * at the tail and uninstalled ones at the head
2136 */
2137
2138 TAILQ_REMOVE(&rti->nd_rti_router_list, dr, dr_entry);
2139 if (dr->stateflags & NDDRF_INSTALLED) {
2140 TAILQ_INSERT_TAIL(&rti_tmp.nd_rti_router_list, dr, dr_entry);
2141 } else {
2142 TAILQ_INSERT_HEAD(&rti_tmp.nd_rti_router_list, dr, dr_entry);
2143 }
2144 }
2145
2146 /*
2147 * The following call to defrtrlist_del should be
2148 * safe as we are iterating a local list of
2149 * routers.
2150 *
2151 * We don't really need nd6_mutex here but keeping
2152 * it as it is to avoid changing assertios held in
2153 * the functions in the call-path.
2154 */
2155 TAILQ_FOREACH_SAFE(dr, &rti_tmp.nd_rti_router_list, dr_entry, ndr) {
2156 TAILQ_REMOVE(&rti_tmp.nd_rti_router_list, dr, dr_entry);
2157 defrtrlist_del(dr, &rti->nd_rti_router_list);
2158 NDDR_REMREF(dr); /* remove list reference */
2159 }
2160 /*
2161 * The above may have removed an entry from default router list.
2162 * If it did and the list is now empty, remove the rti as well.
2163 */
2164 if (TAILQ_EMPTY(&rti->nd_rti_router_list)) {
2165 TAILQ_REMOVE(&nd_rti_list, rti, nd_rti_entry);
2166 ndrti_free(rti);
2167 }
2168 }
2169
2170 nd6_rti_list_signal_done();
2171}
2172
2173static void
2174nd6_purge_interface_llinfo(struct ifnet *ifp)
2175{
2176 struct llinfo_nd6 *ln = NULL;
2177 /* Note that rt->rt_ifp may not be the same as ifp,
2178 * due to KAME goto ours hack. See RTM_RESOLVE case in
2179 * nd6_rtrequest(), and ip6_input().
2180 */
2181again:
2182 lck_mtx_lock(rnh_lock);
2183 ln = llinfo_nd6.ln_next;
2184 while (ln != NULL && ln != &llinfo_nd6) {
2185 struct rtentry *rt;
2186 struct llinfo_nd6 *nln;
2187
2188 nln = ln->ln_next;
2189 rt = ln->ln_rt;
2190 RT_LOCK(rt);
2191 if (rt->rt_gateway != NULL &&
2192 rt->rt_gateway->sa_family == AF_LINK &&
2193 SDL(rt->rt_gateway)->sdl_index == ifp->if_index) {
2194 RT_ADDREF_LOCKED(rt);
2195 RT_UNLOCK(rt);
2196 lck_mtx_unlock(rnh_lock);
2197 /*
2198 * See comments on nd6_service() for reasons why
2199 * this loop is repeated; we bite the costs of
2200 * going thru the same llinfo_nd6 more than once
2201 * here, since this purge happens during detach,
2202 * and that unlike the timer case, it's possible
2203 * there's more than one purges happening at the
2204 * same time (thus a flag wouldn't buy anything).
2205 */
2206 nd6_free(rt);
2207 RT_REMREF(rt);
2208 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2209 goto again;
2210 } else {
2211 RT_UNLOCK(rt);
2212 }
2213 ln = nln;
2214 }
2215 lck_mtx_unlock(rnh_lock);
2216}
2217
2218/*
2219 * Nuke neighbor cache/prefix/default router management table, right before
2220 * ifp goes away.
2221 */
2222void
2223nd6_purge(struct ifnet *ifp)
2224{
2225 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2226 lck_mtx_lock(nd6_mutex);
2227
2228 /* Nuke default router list entries toward ifp */
2229 nd6_purge_interface_default_routers(ifp);
2230
2231 /* Nuke prefix list entries toward ifp */
2232 nd6_purge_interface_prefixes(ifp);
2233
2234 /* Nuke route info option entries toward ifp */
2235 nd6_purge_interface_rti_entries(ifp);
2236
2237 lck_mtx_unlock(nd6_mutex);
2238
2239 /* cancel default outgoing interface setting */
2240 if (nd6_defifindex == ifp->if_index) {
2241 nd6_setdefaultiface(0);
2242 }
2243
2244 /*
2245 * Perform default router selection even when we are a router,
2246 * if Scoped Routing is enabled.
2247 * XXX ?Should really not be needed since when defrouter_select
2248 * was changed to work on interface.
2249 */
2250 lck_mtx_lock(nd6_mutex);
2251 /* refresh default router list */
2252 defrouter_select(ifp, NULL);
2253 lck_mtx_unlock(nd6_mutex);
2254
2255 /* Nuke neighbor cache entries for the ifp. */
2256 nd6_purge_interface_llinfo(ifp);
2257}
2258
2259/*
2260 * Upon success, the returned route will be locked and the caller is
2261 * responsible for releasing the reference and doing RT_UNLOCK(rt).
2262 * This routine does not require rnh_lock to be held by the caller,
2263 * although it needs to be indicated of such a case in order to call
2264 * the correct variant of the relevant routing routines.
2265 */
2266struct rtentry *
2267nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked)
2268{
2269 struct rtentry *rt;
2270 struct sockaddr_in6 sin6;
2271 unsigned int ifscope;
2272
2273 bzero(&sin6, sizeof(sin6));
2274 sin6.sin6_len = sizeof(struct sockaddr_in6);
2275 sin6.sin6_family = AF_INET6;
2276 sin6.sin6_addr = *addr6;
2277
2278 ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
2279 if (rt_locked) {
2280 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
2281 rt = rtalloc1_scoped_locked(SA(&sin6), create, 0, ifscope);
2282 } else {
2283 rt = rtalloc1_scoped(SA(&sin6), create, 0, ifscope);
2284 }
2285
2286 if (rt != NULL) {
2287 RT_LOCK(rt);
2288 if ((rt->rt_flags & RTF_LLINFO) == 0) {
2289 /*
2290 * This is the case for the default route.
2291 * If we want to create a neighbor cache for the
2292 * address, we should free the route for the
2293 * destination and allocate an interface route.
2294 */
2295 if (create) {
2296 RT_UNLOCK(rt);
2297 if (rt_locked) {
2298 rtfree_locked(rt);
2299 } else {
2300 rtfree(rt);
2301 }
2302 rt = NULL;
2303 }
2304 }
2305 }
2306 if (rt == NULL) {
2307 if (create && ifp) {
2308 struct ifaddr *ifa;
2309 u_int32_t ifa_flags;
2310 int e;
2311
2312 /*
2313 * If no route is available and create is set,
2314 * we allocate a host route for the destination
2315 * and treat it like an interface route.
2316 * This hack is necessary for a neighbor which can't
2317 * be covered by our own prefix.
2318 */
2319 ifa = ifaof_ifpforaddr(SA(&sin6), ifp);
2320 if (ifa == NULL) {
2321 return NULL;
2322 }
2323
2324 /*
2325 * Create a new route. RTF_LLINFO is necessary
2326 * to create a Neighbor Cache entry for the
2327 * destination in nd6_rtrequest which will be
2328 * called in rtrequest via ifa->ifa_rtrequest.
2329 */
2330 if (!rt_locked) {
2331 lck_mtx_lock(rnh_lock);
2332 }
2333 IFA_LOCK_SPIN(ifa);
2334 ifa_flags = ifa->ifa_flags;
2335 IFA_UNLOCK(ifa);
2336 if ((e = rtrequest_scoped_locked(RTM_ADD,
2337 SA(&sin6), ifa->ifa_addr, SA(&all1_sa),
2338 (ifa_flags | RTF_HOST | RTF_LLINFO) &
2339 ~RTF_CLONING, &rt, ifscope)) != 0) {
2340 if (e != EEXIST) {
2341 log(LOG_ERR, "%s: failed to add route "
2342 "for a neighbor(%s), errno=%d\n",
2343 __func__, ip6_sprintf(addr6), e);
2344 }
2345 }
2346 if (!rt_locked) {
2347 lck_mtx_unlock(rnh_lock);
2348 }
2349 IFA_REMREF(ifa);
2350 if (rt == NULL) {
2351 return NULL;
2352 }
2353
2354 RT_LOCK(rt);
2355 if (rt->rt_llinfo) {
2356 struct llinfo_nd6 *ln = rt->rt_llinfo;
2357 struct nd_ifinfo *ndi = ND_IFINFO(rt->rt_ifp);
2358
2359 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
2360 /*
2361 * For interface's that do not perform NUD
2362 * neighbor cache entres must always be marked
2363 * reachable with no expiry
2364 */
2365 if (ndi->flags & ND6_IFF_PERFORMNUD) {
2366 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_NOSTATE);
2367 } else {
2368 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2369 ln_setexpire(ln, 0);
2370 }
2371 }
2372 } else {
2373 return NULL;
2374 }
2375 }
2376 RT_LOCK_ASSERT_HELD(rt);
2377 /*
2378 * Validation for the entry.
2379 * Note that the check for rt_llinfo is necessary because a cloned
2380 * route from a parent route that has the L flag (e.g. the default
2381 * route to a p2p interface) may have the flag, too, while the
2382 * destination is not actually a neighbor.
2383 * XXX: we can't use rt->rt_ifp to check for the interface, since
2384 * it might be the loopback interface if the entry is for our
2385 * own address on a non-loopback interface. Instead, we should
2386 * use rt->rt_ifa->ifa_ifp, which would specify the REAL
2387 * interface.
2388 * Note also that ifa_ifp and ifp may differ when we connect two
2389 * interfaces to a same link, install a link prefix to an interface,
2390 * and try to install a neighbor cache on an interface that does not
2391 * have a route to the prefix.
2392 *
2393 * If the address is from a proxied prefix, the ifa_ifp and ifp might
2394 * not match, because nd6_na_input() could have modified the ifp
2395 * of the route to point to the interface where the NA arrived on,
2396 * hence the test for RTF_PROXY.
2397 */
2398 if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 ||
2399 rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL ||
2400 (ifp && rt->rt_ifa->ifa_ifp != ifp &&
2401 !(rt->rt_flags & RTF_PROXY))) {
2402 RT_REMREF_LOCKED(rt);
2403 RT_UNLOCK(rt);
2404 if (create) {
2405 log(LOG_DEBUG, "%s: failed to lookup %s "
2406 "(if = %s)\n", __func__, ip6_sprintf(addr6),
2407 ifp ? if_name(ifp) : "unspec");
2408 /* xxx more logs... kazu */
2409 }
2410 return NULL;
2411 }
2412 /*
2413 * Caller needs to release reference and call RT_UNLOCK(rt).
2414 */
2415 return rt;
2416}
2417
2418/*
2419 * Test whether a given IPv6 address is a neighbor or not, ignoring
2420 * the actual neighbor cache. The neighbor cache is ignored in order
2421 * to not reenter the routing code from within itself.
2422 */
2423static int
2424nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp)
2425{
2426 struct nd_prefix *pr;
2427 struct ifaddr *dstaddr;
2428
2429 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2430
2431 /*
2432 * A link-local address is always a neighbor.
2433 * XXX: a link does not necessarily specify a single interface.
2434 */
2435 if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
2436 struct sockaddr_in6 sin6_copy;
2437 u_int32_t zone;
2438
2439 /*
2440 * We need sin6_copy since sa6_recoverscope() may modify the
2441 * content (XXX).
2442 */
2443 sin6_copy = *addr;
2444 if (sa6_recoverscope(&sin6_copy, FALSE)) {
2445 return 0; /* XXX: should be impossible */
2446 }
2447 if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone)) {
2448 return 0;
2449 }
2450 if (sin6_copy.sin6_scope_id == zone) {
2451 return 1;
2452 } else {
2453 return 0;
2454 }
2455 }
2456
2457 /*
2458 * If the address matches one of our addresses,
2459 * it should be a neighbor.
2460 * If the address matches one of our on-link prefixes, it should be a
2461 * neighbor.
2462 */
2463 for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
2464 NDPR_LOCK(pr);
2465 if (pr->ndpr_ifp != ifp) {
2466 NDPR_UNLOCK(pr);
2467 continue;
2468 }
2469 if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) {
2470 NDPR_UNLOCK(pr);
2471 continue;
2472 }
2473 if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
2474 &addr->sin6_addr, &pr->ndpr_mask)) {
2475 NDPR_UNLOCK(pr);
2476 return 1;
2477 }
2478 NDPR_UNLOCK(pr);
2479 }
2480
2481 /*
2482 * If the address is assigned on the node of the other side of
2483 * a p2p interface, the address should be a neighbor.
2484 */
2485 dstaddr = ifa_ifwithdstaddr(SA(addr));
2486 if (dstaddr != NULL) {
2487 if (dstaddr->ifa_ifp == ifp) {
2488 IFA_REMREF(dstaddr);
2489 return 1;
2490 }
2491 IFA_REMREF(dstaddr);
2492 dstaddr = NULL;
2493 }
2494
2495 return 0;
2496}
2497
2498
2499/*
2500 * Detect if a given IPv6 address identifies a neighbor on a given link.
2501 * XXX: should take care of the destination of a p2p link?
2502 */
2503int
2504nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp,
2505 int rt_locked)
2506{
2507 struct rtentry *rt;
2508
2509 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED);
2510 lck_mtx_lock(nd6_mutex);
2511 if (nd6_is_new_addr_neighbor(addr, ifp)) {
2512 lck_mtx_unlock(nd6_mutex);
2513 return 1;
2514 }
2515 lck_mtx_unlock(nd6_mutex);
2516
2517 /*
2518 * Even if the address matches none of our addresses, it might be
2519 * in the neighbor cache.
2520 */
2521 if ((rt = nd6_lookup(&addr->sin6_addr, 0, ifp, rt_locked)) != NULL) {
2522 RT_LOCK_ASSERT_HELD(rt);
2523 RT_REMREF_LOCKED(rt);
2524 RT_UNLOCK(rt);
2525 return 1;
2526 }
2527
2528 return 0;
2529}
2530
2531/*
2532 * Free an nd6 llinfo entry.
2533 * Since the function would cause significant changes in the kernel, DO NOT
2534 * make it global, unless you have a strong reason for the change, and are sure
2535 * that the change is safe.
2536 */
2537void
2538nd6_free(struct rtentry *rt)
2539{
2540 struct llinfo_nd6 *ln = NULL;
2541 struct in6_addr in6 = {};
2542 struct nd_defrouter *dr = NULL;
2543
2544 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2545 RT_LOCK_ASSERT_NOTHELD(rt);
2546 lck_mtx_lock(nd6_mutex);
2547
2548 RT_LOCK(rt);
2549 RT_ADDREF_LOCKED(rt); /* Extra ref */
2550 ln = rt->rt_llinfo;
2551 in6 = SIN6(rt_key(rt))->sin6_addr;
2552
2553 /*
2554 * Prevent another thread from modifying rt_key, rt_gateway
2555 * via rt_setgate() after the rt_lock is dropped by marking
2556 * the route as defunct.
2557 */
2558 rt->rt_flags |= RTF_CONDEMNED;
2559
2560 /*
2561 * We used to have pfctlinput(PRC_HOSTDEAD) here. Even though it is
2562 * not harmful, it was not really necessary. Perform default router
2563 * selection even when we are a router, if Scoped Routing is enabled.
2564 */
2565 /* XXX TDB Handle lists in route information option as well */
2566 dr = defrouter_lookup(NULL, &SIN6(rt_key(rt))->sin6_addr, rt->rt_ifp);
2567
2568 if ((ln && ln->ln_router) || dr) {
2569 /*
2570 * rt6_flush must be called whether or not the neighbor
2571 * is in the Default Router List.
2572 * See a corresponding comment in nd6_na_input().
2573 */
2574 RT_UNLOCK(rt);
2575 lck_mtx_unlock(nd6_mutex);
2576 rt6_flush(&in6, rt->rt_ifp);
2577 lck_mtx_lock(nd6_mutex);
2578 } else {
2579 RT_UNLOCK(rt);
2580 }
2581
2582 if (dr) {
2583 NDDR_REMREF(dr);
2584 /*
2585 * Unreachablity of a router might affect the default
2586 * router selection and on-link detection of advertised
2587 * prefixes.
2588 */
2589
2590 /*
2591 * Temporarily fake the state to choose a new default
2592 * router and to perform on-link determination of
2593 * prefixes correctly.
2594 * Below the state will be set correctly,
2595 * or the entry itself will be deleted.
2596 */
2597 RT_LOCK_SPIN(rt);
2598 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE);
2599
2600 /*
2601 * Since defrouter_select() does not affect the
2602 * on-link determination and MIP6 needs the check
2603 * before the default router selection, we perform
2604 * the check now.
2605 */
2606 RT_UNLOCK(rt);
2607 pfxlist_onlink_check();
2608
2609 /*
2610 * refresh default router list
2611 */
2612 defrouter_select(rt->rt_ifp, NULL);
2613
2614 /* Loop through all RTI's as well and trigger router selection. */
2615 nd6_router_select_rti_entries(rt->rt_ifp);
2616 }
2617 RT_LOCK_ASSERT_NOTHELD(rt);
2618 lck_mtx_unlock(nd6_mutex);
2619 /*
2620 * Detach the route from the routing tree and the list of neighbor
2621 * caches, and disable the route entry not to be used in already
2622 * cached routes.
2623 */
2624 (void) rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 0, NULL);
2625
2626 /* Extra ref held above; now free it */
2627 rtfree(rt);
2628}
2629
2630void
2631nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
2632{
2633#pragma unused(sa)
2634 struct sockaddr *gate = rt->rt_gateway;
2635 struct llinfo_nd6 *ln = rt->rt_llinfo;
2636 static struct sockaddr_dl null_sdl =
2637 { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK };
2638 struct ifnet *ifp = rt->rt_ifp;
2639 struct ifaddr *ifa;
2640 uint64_t timenow;
2641 char buf[MAX_IPv6_STR_LEN];
2642 struct nd_ifinfo *ndi = ND_IFINFO(rt->rt_ifp);
2643
2644 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
2645 VERIFY(nd6_init_done);
2646 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
2647 RT_LOCK_ASSERT_HELD(rt);
2648
2649 /*
2650 * We have rnh_lock held, see if we need to schedule the timer;
2651 * we might do this again below during RTM_RESOLVE, but doing it
2652 * now handles all other cases.
2653 */
2654 if (nd6_sched_timeout_want) {
2655 nd6_sched_timeout(NULL, NULL);
2656 }
2657
2658 if (rt->rt_flags & RTF_GATEWAY) {
2659 return;
2660 }
2661
2662 if (!nd6_need_cache(ifp) && !(rt->rt_flags & RTF_HOST)) {
2663 /*
2664 * This is probably an interface direct route for a link
2665 * which does not need neighbor caches (e.g. fe80::%lo0/64).
2666 * We do not need special treatment below for such a route.
2667 * Moreover, the RTF_LLINFO flag which would be set below
2668 * would annoy the ndp(8) command.
2669 */
2670 return;
2671 }
2672
2673 if (req == RTM_RESOLVE) {
2674 int no_nd_cache;
2675
2676 if (!nd6_need_cache(ifp)) { /* stf case */
2677 no_nd_cache = 1;
2678 } else {
2679 struct sockaddr_in6 sin6;
2680
2681 rtkey_to_sa6(rt, &sin6);
2682 /*
2683 * nd6_is_addr_neighbor() may call nd6_lookup(),
2684 * therefore we drop rt_lock to avoid deadlock
2685 * during the lookup.
2686 */
2687 RT_ADDREF_LOCKED(rt);
2688 RT_UNLOCK(rt);
2689 no_nd_cache = !nd6_is_addr_neighbor(&sin6, ifp, 1);
2690 RT_LOCK(rt);
2691 RT_REMREF_LOCKED(rt);
2692 }
2693
2694 /*
2695 * FreeBSD and BSD/OS often make a cloned host route based
2696 * on a less-specific route (e.g. the default route).
2697 * If the less specific route does not have a "gateway"
2698 * (this is the case when the route just goes to a p2p or an
2699 * stf interface), we'll mistakenly make a neighbor cache for
2700 * the host route, and will see strange neighbor solicitation
2701 * for the corresponding destination. In order to avoid the
2702 * confusion, we check if the destination of the route is
2703 * a neighbor in terms of neighbor discovery, and stop the
2704 * process if not. Additionally, we remove the LLINFO flag
2705 * so that ndp(8) will not try to get the neighbor information
2706 * of the destination.
2707 */
2708 if (no_nd_cache) {
2709 rt->rt_flags &= ~RTF_LLINFO;
2710 return;
2711 }
2712 }
2713
2714 timenow = net_uptime();
2715
2716 switch (req) {
2717 case RTM_ADD:
2718 /*
2719 * There is no backward compatibility :)
2720 *
2721 * if ((rt->rt_flags & RTF_HOST) == 0 &&
2722 * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff)
2723 * rt->rt_flags |= RTF_CLONING;
2724 */
2725 if ((rt->rt_flags & RTF_CLONING) ||
2726 ((rt->rt_flags & RTF_LLINFO) && ln == NULL)) {
2727 /*
2728 * Case 1: This route should come from a route to
2729 * interface (RTF_CLONING case) or the route should be
2730 * treated as on-link but is currently not
2731 * (RTF_LLINFO && ln == NULL case).
2732 */
2733 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
2734 gate = rt->rt_gateway;
2735 SDL(gate)->sdl_type = ifp->if_type;
2736 SDL(gate)->sdl_index = ifp->if_index;
2737 /*
2738 * In case we're called before 1.0 sec.
2739 * has elapsed.
2740 */
2741 if (ln != NULL) {
2742 ln_setexpire(ln,
2743 (ifp->if_eflags & IFEF_IPV6_ND6ALT)
2744 ? 0 : MAX(timenow, 1));
2745 }
2746 }
2747 if (rt->rt_flags & RTF_CLONING) {
2748 break;
2749 }
2750 }
2751 /*
2752 * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here.
2753 * We don't do that here since llinfo is not ready yet.
2754 *
2755 * There are also couple of other things to be discussed:
2756 * - unsolicited NA code needs improvement beforehand
2757 * - RFC4861 says we MAY send multicast unsolicited NA
2758 * (7.2.6 paragraph 4), however, it also says that we
2759 * SHOULD provide a mechanism to prevent multicast NA storm.
2760 * we don't have anything like it right now.
2761 * note that the mechanism needs a mutual agreement
2762 * between proxies, which means that we need to implement
2763 * a new protocol, or a new kludge.
2764 * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA.
2765 * we need to check ip6forwarding before sending it.
2766 * (or should we allow proxy ND configuration only for
2767 * routers? there's no mention about proxy ND from hosts)
2768 */
2769 OS_FALLTHROUGH;
2770 case RTM_RESOLVE:
2771 if (!(ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK))) {
2772 /*
2773 * Address resolution isn't necessary for a point to
2774 * point link, so we can skip this test for a p2p link.
2775 */
2776 if (gate->sa_family != AF_LINK ||
2777 gate->sa_len < sizeof(null_sdl)) {
2778 /* Don't complain in case of RTM_ADD */
2779 if (req == RTM_RESOLVE) {
2780 log(LOG_ERR, "%s: route to %s has bad "
2781 "gateway address (sa_family %u "
2782 "sa_len %u) on %s\n", __func__,
2783 inet_ntop(AF_INET6,
2784 &SIN6(rt_key(rt))->sin6_addr, buf,
2785 sizeof(buf)), gate->sa_family,
2786 gate->sa_len, if_name(ifp));
2787 }
2788 break;
2789 }
2790 SDL(gate)->sdl_type = ifp->if_type;
2791 SDL(gate)->sdl_index = ifp->if_index;
2792 }
2793 if (ln != NULL) {
2794 break; /* This happens on a route change */
2795 }
2796 /*
2797 * Case 2: This route may come from cloning, or a manual route
2798 * add with a LL address.
2799 */
2800 rt->rt_llinfo = ln = nd6_llinfo_alloc(Z_WAITOK);
2801
2802 nd6_allocated++;
2803 rt->rt_llinfo_get_ri = nd6_llinfo_get_ri;
2804 rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri;
2805 rt->rt_llinfo_purge = nd6_llinfo_purge;
2806 rt->rt_llinfo_free = nd6_llinfo_free;
2807 rt->rt_llinfo_refresh = nd6_llinfo_refresh;
2808 rt->rt_flags |= RTF_LLINFO;
2809 ln->ln_rt = rt;
2810 /* this is required for "ndp" command. - shin */
2811 /*
2812 * For interface's that do not perform NUD
2813 * neighbor cache entries must always be marked
2814 * reachable with no expiry
2815 */
2816 if ((req == RTM_ADD) ||
2817 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
2818 /*
2819 * gate should have some valid AF_LINK entry,
2820 * and ln->ln_expire should have some lifetime
2821 * which is specified by ndp command.
2822 */
2823 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2824 ln_setexpire(ln, 0);
2825 } else {
2826 /*
2827 * When req == RTM_RESOLVE, rt is created and
2828 * initialized in rtrequest(), so rt_expire is 0.
2829 */
2830 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_NOSTATE);
2831 /* In case we're called before 1.0 sec. has elapsed */
2832 ln_setexpire(ln, (ifp->if_eflags & IFEF_IPV6_ND6ALT) ?
2833 0 : MAX(timenow, 1));
2834 }
2835 LN_INSERTHEAD(ln);
2836 nd6_inuse++;
2837
2838 /* We have at least one entry; arm the timer if not already */
2839 nd6_sched_timeout(NULL, NULL);
2840
2841 /*
2842 * If we have too many cache entries, initiate immediate
2843 * purging for some "less recently used" entries. Note that
2844 * we cannot directly call nd6_free() here because it would
2845 * cause re-entering rtable related routines triggering an LOR
2846 * problem.
2847 */
2848 if (ip6_neighborgcthresh > 0 &&
2849 nd6_inuse >= ip6_neighborgcthresh) {
2850 int i;
2851
2852 for (i = 0; i < 10 && llinfo_nd6.ln_prev != ln; i++) {
2853 struct llinfo_nd6 *ln_end = llinfo_nd6.ln_prev;
2854 struct rtentry *rt_end = ln_end->ln_rt;
2855
2856 /* Move this entry to the head */
2857 RT_LOCK(rt_end);
2858 LN_DEQUEUE(ln_end);
2859 LN_INSERTHEAD(ln_end);
2860
2861 if (ln_end->ln_expire == 0) {
2862 RT_UNLOCK(rt_end);
2863 continue;
2864 }
2865 if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE) {
2866 ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_STALE);
2867 } else {
2868 ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_PURGE);
2869 }
2870 ln_setexpire(ln_end, timenow);
2871 RT_UNLOCK(rt_end);
2872 }
2873 }
2874
2875 /*
2876 * check if rt_key(rt) is one of my address assigned
2877 * to the interface.
2878 */
2879 ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp,
2880 &SIN6(rt_key(rt))->sin6_addr);
2881 if (ifa != NULL) {
2882 caddr_t macp = nd6_ifptomac(ifp);
2883 ln_setexpire(ln, 0);
2884 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2885 if (macp != NULL) {
2886 Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen);
2887 SDL(gate)->sdl_alen = ifp->if_addrlen;
2888 }
2889 if (nd6_useloopback) {
2890 if (rt->rt_ifp != lo_ifp) {
2891 /*
2892 * Purge any link-layer info caching.
2893 */
2894 if (rt->rt_llinfo_purge != NULL) {
2895 rt->rt_llinfo_purge(rt);
2896 }
2897
2898 /*
2899 * Adjust route ref count for the
2900 * interfaces.
2901 */
2902 if (rt->rt_if_ref_fn != NULL) {
2903 rt->rt_if_ref_fn(lo_ifp, 1);
2904 rt->rt_if_ref_fn(rt->rt_ifp,
2905 -1);
2906 }
2907 }
2908 rt->rt_ifp = lo_ifp;
2909 /*
2910 * If rmx_mtu is not locked, update it
2911 * to the MTU used by the new interface.
2912 */
2913 if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
2914 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
2915 }
2916 /*
2917 * Make sure rt_ifa be equal to the ifaddr
2918 * corresponding to the address.
2919 * We need this because when we refer
2920 * rt_ifa->ia6_flags in ip6_input, we assume
2921 * that the rt_ifa points to the address instead
2922 * of the loopback address.
2923 */
2924 if (ifa != rt->rt_ifa) {
2925 rtsetifa(rt, ifa);
2926 }
2927 }
2928 IFA_REMREF(ifa);
2929 } else if (rt->rt_flags & RTF_ANNOUNCE) {
2930 ln_setexpire(ln, 0);
2931 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2932
2933 /* join solicited node multicast for proxy ND */
2934 if (ifp->if_flags & IFF_MULTICAST) {
2935 struct in6_addr llsol;
2936 struct in6_multi *in6m;
2937 int error;
2938
2939 llsol = SIN6(rt_key(rt))->sin6_addr;
2940 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
2941 llsol.s6_addr32[1] = 0;
2942 llsol.s6_addr32[2] = htonl(1);
2943 llsol.s6_addr8[12] = 0xff;
2944 if (in6_setscope(&llsol, ifp, NULL)) {
2945 break;
2946 }
2947 error = in6_mc_join(ifp, &llsol,
2948 NULL, &in6m, 0);
2949 if (error) {
2950 nd6log(error, "%s: failed to join "
2951 "%s (errno=%d)\n", if_name(ifp),
2952 ip6_sprintf(&llsol), error);
2953 } else {
2954 IN6M_REMREF(in6m);
2955 }
2956 }
2957 }
2958 break;
2959
2960 case RTM_DELETE:
2961 if (ln == NULL) {
2962 break;
2963 }
2964 /* leave from solicited node multicast for proxy ND */
2965 if ((rt->rt_flags & RTF_ANNOUNCE) &&
2966 (ifp->if_flags & IFF_MULTICAST)) {
2967 struct in6_addr llsol;
2968 struct in6_multi *in6m;
2969
2970 llsol = SIN6(rt_key(rt))->sin6_addr;
2971 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
2972 llsol.s6_addr32[1] = 0;
2973 llsol.s6_addr32[2] = htonl(1);
2974 llsol.s6_addr8[12] = 0xff;
2975 if (in6_setscope(&llsol, ifp, NULL) == 0) {
2976 in6_multihead_lock_shared();
2977 IN6_LOOKUP_MULTI(&llsol, ifp, in6m);
2978 in6_multihead_lock_done();
2979 if (in6m != NULL) {
2980 in6_mc_leave(in6m, NULL);
2981 IN6M_REMREF(in6m);
2982 }
2983 }
2984 }
2985 nd6_inuse--;
2986 /*
2987 * Unchain it but defer the actual freeing until the route
2988 * itself is to be freed. rt->rt_llinfo still points to
2989 * llinfo_nd6, and likewise, ln->ln_rt stil points to this
2990 * route entry, except that RTF_LLINFO is now cleared.
2991 */
2992 if (ln->ln_flags & ND6_LNF_IN_USE) {
2993 LN_DEQUEUE(ln);
2994 }
2995
2996 /*
2997 * Purge any link-layer info caching.
2998 */
2999 if (rt->rt_llinfo_purge != NULL) {
3000 rt->rt_llinfo_purge(rt);
3001 }
3002
3003 rt->rt_flags &= ~RTF_LLINFO;
3004 if (ln->ln_hold != NULL) {
3005 m_freem_list(ln->ln_hold);
3006 ln->ln_hold = NULL;
3007 }
3008 }
3009}
3010
3011static int
3012nd6_siocgdrlst(void *data, int data_is_64)
3013{
3014 struct in6_drlist_32 *drl_32;
3015 struct nd_defrouter *dr;
3016 int i = 0;
3017
3018 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
3019
3020 dr = TAILQ_FIRST(&nd_defrouter_list);
3021
3022 /* XXX Handle mapped defrouter entries */
3023 /* For 64-bit process */
3024 if (data_is_64) {
3025 struct in6_drlist_64 *drl_64;
3026
3027 drl_64 = _MALLOC(sizeof(*drl_64), M_TEMP, M_WAITOK | M_ZERO);
3028 if (drl_64 == NULL) {
3029 return ENOMEM;
3030 }
3031
3032 /* preserve the interface name */
3033 bcopy(data, drl_64, sizeof(drl_64->ifname));
3034
3035 while (dr && i < DRLSTSIZ) {
3036 drl_64->defrouter[i].rtaddr = dr->rtaddr;
3037 if (IN6_IS_ADDR_LINKLOCAL(
3038 &drl_64->defrouter[i].rtaddr)) {
3039 /* XXX: need to this hack for KAME stack */
3040 drl_64->defrouter[i].rtaddr.s6_addr16[1] = 0;
3041 } else {
3042 log(LOG_ERR,
3043 "default router list contains a "
3044 "non-linklocal address(%s)\n",
3045 ip6_sprintf(&drl_64->defrouter[i].rtaddr));
3046 }
3047 drl_64->defrouter[i].flags = dr->flags;
3048 drl_64->defrouter[i].rtlifetime = (u_short)dr->rtlifetime;
3049 drl_64->defrouter[i].expire = (u_long)nddr_getexpire(dr);
3050 drl_64->defrouter[i].if_index = dr->ifp->if_index;
3051 i++;
3052 dr = TAILQ_NEXT(dr, dr_entry);
3053 }
3054 bcopy(drl_64, data, sizeof(*drl_64));
3055 _FREE(drl_64, M_TEMP);
3056 return 0;
3057 }
3058
3059 /* For 32-bit process */
3060 drl_32 = _MALLOC(sizeof(*drl_32), M_TEMP, M_WAITOK | M_ZERO);
3061 if (drl_32 == NULL) {
3062 return ENOMEM;
3063 }
3064
3065 /* preserve the interface name */
3066 bcopy(data, drl_32, sizeof(drl_32->ifname));
3067
3068 while (dr != NULL && i < DRLSTSIZ) {
3069 drl_32->defrouter[i].rtaddr = dr->rtaddr;
3070 if (IN6_IS_ADDR_LINKLOCAL(&drl_32->defrouter[i].rtaddr)) {
3071 /* XXX: need to this hack for KAME stack */
3072 drl_32->defrouter[i].rtaddr.s6_addr16[1] = 0;
3073 } else {
3074 log(LOG_ERR,
3075 "default router list contains a "
3076 "non-linklocal address(%s)\n",
3077 ip6_sprintf(&drl_32->defrouter[i].rtaddr));
3078 }
3079 drl_32->defrouter[i].flags = dr->flags;
3080 drl_32->defrouter[i].rtlifetime = (u_short)dr->rtlifetime;
3081 drl_32->defrouter[i].expire = (u_int32_t)nddr_getexpire(dr);
3082 drl_32->defrouter[i].if_index = dr->ifp->if_index;
3083 i++;
3084 dr = TAILQ_NEXT(dr, dr_entry);
3085 }
3086 bcopy(drl_32, data, sizeof(*drl_32));
3087 _FREE(drl_32, M_TEMP);
3088 return 0;
3089}
3090
3091/*
3092 * XXX meaning of fields, especialy "raflags", is very
3093 * differnet between RA prefix list and RR/static prefix list.
3094 * how about separating ioctls into two?
3095 */
3096static int
3097nd6_siocgprlst(void *data, int data_is_64)
3098{
3099 struct in6_prlist_32 *prl_32;
3100 struct nd_prefix *pr;
3101 int i = 0;
3102
3103 LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED);
3104
3105 pr = nd_prefix.lh_first;
3106
3107 /* XXX Handle mapped defrouter entries */
3108 /* For 64-bit process */
3109 if (data_is_64) {
3110 struct in6_prlist_64 *prl_64;
3111
3112 prl_64 = _MALLOC(sizeof(*prl_64), M_TEMP, M_WAITOK | M_ZERO);
3113 if (prl_64 == NULL) {
3114 return ENOMEM;
3115 }
3116
3117 /* preserve the interface name */
3118 bcopy(data, prl_64, sizeof(prl_64->ifname));
3119
3120 while (pr && i < PRLSTSIZ) {
3121 struct nd_pfxrouter *pfr;
3122 int j;
3123
3124 NDPR_LOCK(pr);
3125 (void) in6_embedscope(&prl_64->prefix[i].prefix,
3126 &pr->ndpr_prefix, NULL, NULL, NULL);
3127 prl_64->prefix[i].raflags = pr->ndpr_raf;
3128 prl_64->prefix[i].prefixlen = pr->ndpr_plen;
3129 prl_64->prefix[i].vltime = pr->ndpr_vltime;
3130 prl_64->prefix[i].pltime = pr->ndpr_pltime;
3131 prl_64->prefix[i].if_index = pr->ndpr_ifp->if_index;
3132 prl_64->prefix[i].expire = (u_long)ndpr_getexpire(pr);
3133
3134 pfr = pr->ndpr_advrtrs.lh_first;
3135 j = 0;
3136 while (pfr) {
3137 if (j < DRLSTSIZ) {
3138#define RTRADDR prl_64->prefix[i].advrtr[j]
3139 RTRADDR = pfr->router->rtaddr;
3140 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
3141 /* XXX: hack for KAME */
3142 RTRADDR.s6_addr16[1] = 0;
3143 } else {
3144 log(LOG_ERR,
3145 "a router(%s) advertises "
3146 "a prefix with "
3147 "non-link local address\n",
3148 ip6_sprintf(&RTRADDR));
3149 }
3150#undef RTRADDR
3151 }
3152 j++;
3153 pfr = pfr->pfr_next;
3154 }
3155 ASSERT(j <= USHRT_MAX);
3156 prl_64->prefix[i].advrtrs = (u_short)j;
3157 prl_64->prefix[i].origin = PR_ORIG_RA;
3158 NDPR_UNLOCK(pr);
3159
3160 i++;
3161 pr = pr->ndpr_next;
3162 }
3163 bcopy(prl_64, data, sizeof(*prl_64));
3164 _FREE(prl_64, M_TEMP);
3165 return 0;
3166 }
3167
3168 /* For 32-bit process */
3169 prl_32 = _MALLOC(sizeof(*prl_32), M_TEMP, M_WAITOK | M_ZERO);
3170 if (prl_32 == NULL) {
3171 return ENOMEM;
3172 }
3173
3174 /* preserve the interface name */
3175 bcopy(data, prl_32, sizeof(prl_32->ifname));
3176
3177 while (pr && i < PRLSTSIZ) {
3178 struct nd_pfxrouter *pfr;
3179 int j;
3180
3181 NDPR_LOCK(pr);
3182 (void) in6_embedscope(&prl_32->prefix[i].prefix,
3183 &pr->ndpr_prefix, NULL, NULL, NULL);
3184 prl_32->prefix[i].raflags = pr->ndpr_raf;
3185 prl_32->prefix[i].prefixlen = pr->ndpr_plen;
3186 prl_32->prefix[i].vltime = pr->ndpr_vltime;
3187 prl_32->prefix[i].pltime = pr->ndpr_pltime;
3188 prl_32->prefix[i].if_index = pr->ndpr_ifp->if_index;
3189 prl_32->prefix[i].expire = (u_int32_t)ndpr_getexpire(pr);
3190
3191 pfr = pr->ndpr_advrtrs.lh_first;
3192 j = 0;
3193 while (pfr) {
3194 if (j < DRLSTSIZ) {
3195#define RTRADDR prl_32->prefix[i].advrtr[j]
3196 RTRADDR = pfr->router->rtaddr;
3197 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
3198 /* XXX: hack for KAME */
3199 RTRADDR.s6_addr16[1] = 0;
3200 } else {
3201 log(LOG_ERR,
3202 "a router(%s) advertises "
3203 "a prefix with "
3204 "non-link local address\n",
3205 ip6_sprintf(&RTRADDR));
3206 }
3207#undef RTRADDR
3208 }
3209 j++;
3210 pfr = pfr->pfr_next;
3211 }
3212 ASSERT(j <= USHRT_MAX);
3213 prl_32->prefix[i].advrtrs = (u_short)j;
3214 prl_32->prefix[i].origin = PR_ORIG_RA;
3215 NDPR_UNLOCK(pr);
3216
3217 i++;
3218 pr = pr->ndpr_next;
3219 }
3220 bcopy(prl_32, data, sizeof(*prl_32));
3221 _FREE(prl_32, M_TEMP);
3222 return 0;
3223}
3224
3225int
3226nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
3227{
3228 struct nd_defrouter *dr;
3229 struct nd_prefix *pr;
3230 struct rtentry *rt;
3231 int error = 0;
3232
3233 VERIFY(ifp != NULL);
3234
3235 switch (cmd) {
3236 case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */
3237 case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */
3238 /*
3239 * obsolete API, use sysctl under net.inet6.icmp6
3240 */
3241 lck_mtx_lock(nd6_mutex);
3242 error = nd6_siocgdrlst(data, cmd == SIOCGDRLST_IN6_64);
3243 lck_mtx_unlock(nd6_mutex);
3244 break;
3245
3246 case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */
3247 case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */
3248 /*
3249 * obsolete API, use sysctl under net.inet6.icmp6
3250 */
3251 lck_mtx_lock(nd6_mutex);
3252 error = nd6_siocgprlst(data, cmd == SIOCGPRLST_IN6_64);
3253 lck_mtx_unlock(nd6_mutex);
3254 break;
3255
3256 case OSIOCGIFINFO_IN6: /* struct in6_ondireq */
3257 case SIOCGIFINFO_IN6: { /* struct in6_ondireq */
3258 u_int32_t linkmtu;
3259 struct in6_ondireq *ondi = (struct in6_ondireq *)(void *)data;
3260 struct nd_ifinfo *ndi;
3261 /*
3262 * SIOCGIFINFO_IN6 ioctl is encoded with in6_ondireq
3263 * instead of in6_ndireq, so we treat it as such.
3264 */
3265 ndi = ND_IFINFO(ifp);
3266 if ((NULL == ndi) || (FALSE == ndi->initialized)) {
3267 error = EINVAL;
3268 break;
3269 }
3270 lck_mtx_lock(&ndi->lock);
3271 linkmtu = IN6_LINKMTU(ifp);
3272 bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof(linkmtu));
3273 bcopy(&ndi->maxmtu, &ondi->ndi.maxmtu,
3274 sizeof(u_int32_t));
3275 bcopy(&ndi->basereachable, &ondi->ndi.basereachable,
3276 sizeof(u_int32_t));
3277 bcopy(&ndi->reachable, &ondi->ndi.reachable,
3278 sizeof(u_int32_t));
3279 bcopy(&ndi->retrans, &ondi->ndi.retrans,
3280 sizeof(u_int32_t));
3281 bcopy(&ndi->flags, &ondi->ndi.flags,
3282 sizeof(u_int32_t));
3283 bcopy(&ndi->recalctm, &ondi->ndi.recalctm,
3284 sizeof(int));
3285 ondi->ndi.chlim = ndi->chlim;
3286 /*
3287 * The below truncation is fine as we mostly use it for
3288 * debugging purpose.
3289 */
3290 ondi->ndi.receivedra = (uint8_t)ndi->ndefrouters;
3291 ondi->ndi.collision_count = (uint8_t)ndi->cga_collision_count;
3292 lck_mtx_unlock(&ndi->lock);
3293 break;
3294 }
3295
3296 case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */
3297 /*
3298 * XXX BSD has a bunch of checks here to ensure
3299 * that interface disabled flag is not reset if
3300 * link local address has failed DAD.
3301 * Investigate that part.
3302 */
3303 struct in6_ndireq *cndi = (struct in6_ndireq *)(void *)data;
3304 u_int32_t oflags, flags;
3305 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
3306
3307 /* XXX: almost all other fields of cndi->ndi is unused */
3308 if ((NULL == ndi) || !ndi->initialized) {
3309 error = EINVAL;
3310 break;
3311 }
3312
3313 lck_mtx_lock(&ndi->lock);
3314 oflags = ndi->flags;
3315 bcopy(&cndi->ndi.flags, &(ndi->flags), sizeof(flags));
3316 flags = ndi->flags;
3317 lck_mtx_unlock(&ndi->lock);
3318
3319 if (oflags == flags) {
3320 break;
3321 }
3322
3323 error = nd6_setifinfo(ifp, oflags, flags);
3324 break;
3325 }
3326
3327 case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */
3328 /* flush default router list */
3329 /*
3330 * xxx sumikawa: should not delete route if default
3331 * route equals to the top of default router list
3332 *
3333 * XXX TODO: Needs to be done for RTI as well
3334 * Is very specific flush command with ndp for default routers.
3335 */
3336 lck_mtx_lock(nd6_mutex);
3337 defrouter_reset();
3338 defrouter_select(ifp, NULL);
3339 lck_mtx_unlock(nd6_mutex);
3340 /* xxx sumikawa: flush prefix list */
3341 break;
3342
3343 case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */
3344 /* flush all the prefix advertised by routers */
3345 struct nd_prefix *next = NULL;
3346
3347 lck_mtx_lock(nd6_mutex);
3348 for (pr = nd_prefix.lh_first; pr; pr = next) {
3349 struct in6_ifaddr *ia = NULL;
3350 bool iterate_pfxlist_again = false;
3351
3352 next = pr->ndpr_next;
3353
3354 NDPR_LOCK(pr);
3355 if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr)) {
3356 NDPR_UNLOCK(pr);
3357 continue; /* XXX */
3358 }
3359 if (ifp != lo_ifp && pr->ndpr_ifp != ifp) {
3360 NDPR_UNLOCK(pr);
3361 continue;
3362 }
3363 /* do we really have to remove addresses as well? */
3364 NDPR_ADDREF(pr);
3365 NDPR_UNLOCK(pr);
3366 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
3367 bool from_begining = true;
3368 while (from_begining) {
3369 from_begining = false;
3370 TAILQ_FOREACH(ia, &in6_ifaddrhead, ia6_link) {
3371 IFA_LOCK(&ia->ia_ifa);
3372 if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
3373 IFA_UNLOCK(&ia->ia_ifa);
3374 continue;
3375 }
3376
3377 if (ia->ia6_ndpr == pr) {
3378 IFA_ADDREF_LOCKED(&ia->ia_ifa);
3379 IFA_UNLOCK(&ia->ia_ifa);
3380 lck_rw_done(&in6_ifaddr_rwlock);
3381 lck_mtx_unlock(nd6_mutex);
3382 in6_purgeaddr(&ia->ia_ifa);
3383 IFA_REMREF(&ia->ia_ifa);
3384 lck_mtx_lock(nd6_mutex);
3385 lck_rw_lock_exclusive(
3386 &in6_ifaddr_rwlock);
3387 /*
3388 * Purging the address caused
3389 * in6_ifaddr_rwlock to be
3390 * dropped and
3391 * reacquired; therefore search again
3392 * from the beginning of in6_ifaddrs.
3393 * The same applies for the prefix list.
3394 */
3395 iterate_pfxlist_again = true;
3396 from_begining = true;
3397 break;
3398 }
3399 IFA_UNLOCK(&ia->ia_ifa);
3400 }
3401 }
3402 lck_rw_done(&in6_ifaddr_rwlock);
3403 NDPR_LOCK(pr);
3404 prelist_remove(pr);
3405 NDPR_UNLOCK(pr);
3406 pfxlist_onlink_check();
3407 NDPR_REMREF(pr);
3408 if (iterate_pfxlist_again) {
3409 next = nd_prefix.lh_first;
3410 }
3411 }
3412 lck_mtx_unlock(nd6_mutex);
3413 break;
3414 }
3415
3416 case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */
3417 /* flush all the default routers */
3418 struct nd_defrouter *next;
3419 struct nd_drhead nd_defrouter_tmp;
3420
3421 TAILQ_INIT(&nd_defrouter_tmp);
3422 lck_mtx_lock(nd6_mutex);
3423 if ((dr = TAILQ_FIRST(&nd_defrouter_list)) != NULL) {
3424 /*
3425 * The first entry of the list may be stored in
3426 * the routing table, so we'll delete it later.
3427 */
3428 for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) {
3429 next = TAILQ_NEXT(dr, dr_entry);
3430 if (ifp == lo_ifp || dr->ifp == ifp) {
3431 /*
3432 * Remove the entry from default router list
3433 * and add it to the temp list.
3434 * nd_defrouter_tmp will be a local temporary
3435 * list as no one else can get the same
3436 * removed entry once it is removed from default
3437 * router list.
3438 * Remove the reference after calling defrtrlist_de
3439 */
3440 TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry);
3441 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
3442 }
3443 }
3444
3445 dr = TAILQ_FIRST(&nd_defrouter_list);
3446 if (ifp == lo_ifp ||
3447 dr->ifp == ifp) {
3448 TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry);
3449 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
3450 }
3451 }
3452
3453 /*
3454 * Keep the following separate from the above iteration of
3455 * nd_defrouter because it's not safe to call
3456 * defrtrlist_del while iterating global default
3457 * router list. Global list has to be traversed
3458 * while holding nd6_mutex throughout.
3459 *
3460 * The following call to defrtrlist_del should be
3461 * safe as we are iterating a local list of
3462 * default routers.
3463 */
3464 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, next) {
3465 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
3466 defrtrlist_del(dr, NULL);
3467 NDDR_REMREF(dr); /* remove list reference */
3468 }
3469
3470 /* For now flush RTI routes here as well to avoid any regressions */
3471 nd6_purge_interface_rti_entries((ifp == lo_ifp) ? NULL : ifp);
3472
3473 lck_mtx_unlock(nd6_mutex);
3474 break;
3475 }
3476
3477 case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */
3478 struct llinfo_nd6 *ln;
3479 struct in6_nbrinfo_32 nbi_32;
3480 struct in6_addr nb_addr; /* make local for safety */
3481
3482 bcopy(data, &nbi_32, sizeof(nbi_32));
3483 nb_addr = nbi_32.addr;
3484 /*
3485 * XXX: KAME specific hack for scoped addresses
3486 * XXXX: for other scopes than link-local?
3487 */
3488 if (IN6_IS_ADDR_LINKLOCAL(&nbi_32.addr) ||
3489 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_32.addr)) {
3490 u_int16_t *idp =
3491 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
3492
3493 if (*idp == 0) {
3494 *idp = htons(ifp->if_index);
3495 }
3496 }
3497
3498 /* Callee returns a locked route upon success */
3499 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
3500 error = EINVAL;
3501 break;
3502 }
3503 RT_LOCK_ASSERT_HELD(rt);
3504 ln = rt->rt_llinfo;
3505 nbi_32.state = ln->ln_state;
3506 nbi_32.asked = ln->ln_asked;
3507 nbi_32.isrouter = ln->ln_router;
3508 nbi_32.expire = (int)ln_getexpire(ln);
3509 RT_REMREF_LOCKED(rt);
3510 RT_UNLOCK(rt);
3511 bcopy(&nbi_32, data, sizeof(nbi_32));
3512 break;
3513 }
3514
3515 case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */
3516 struct llinfo_nd6 *ln;
3517 struct in6_nbrinfo_64 nbi_64;
3518 struct in6_addr nb_addr; /* make local for safety */
3519
3520 bcopy(data, &nbi_64, sizeof(nbi_64));
3521 nb_addr = nbi_64.addr;
3522 /*
3523 * XXX: KAME specific hack for scoped addresses
3524 * XXXX: for other scopes than link-local?
3525 */
3526 if (IN6_IS_ADDR_LINKLOCAL(&nbi_64.addr) ||
3527 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_64.addr)) {
3528 u_int16_t *idp =
3529 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
3530
3531 if (*idp == 0) {
3532 *idp = htons(ifp->if_index);
3533 }
3534 }
3535
3536 /* Callee returns a locked route upon success */
3537 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
3538 error = EINVAL;
3539 break;
3540 }
3541 RT_LOCK_ASSERT_HELD(rt);
3542 ln = rt->rt_llinfo;
3543 nbi_64.state = ln->ln_state;
3544 nbi_64.asked = ln->ln_asked;
3545 nbi_64.isrouter = ln->ln_router;
3546 nbi_64.expire = (int)ln_getexpire(ln);
3547 RT_REMREF_LOCKED(rt);
3548 RT_UNLOCK(rt);
3549 bcopy(&nbi_64, data, sizeof(nbi_64));
3550 break;
3551 }
3552
3553 case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
3554 case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
3555 struct in6_ndifreq_64 *ndif_64 =
3556 (struct in6_ndifreq_64 *)(void *)data;
3557 struct in6_ndifreq_32 *ndif_32 =
3558 (struct in6_ndifreq_32 *)(void *)data;
3559
3560 if (cmd == SIOCGDEFIFACE_IN6_64) {
3561 u_int64_t j = nd6_defifindex;
3562 __nochk_bcopy(&j, &ndif_64->ifindex, sizeof(j));
3563 } else {
3564 bcopy(&nd6_defifindex, &ndif_32->ifindex,
3565 sizeof(u_int32_t));
3566 }
3567 break;
3568 }
3569
3570 case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
3571 case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
3572 struct in6_ndifreq_64 *ndif_64 =
3573 (struct in6_ndifreq_64 *)(void *)data;
3574 struct in6_ndifreq_32 *ndif_32 =
3575 (struct in6_ndifreq_32 *)(void *)data;
3576 u_int32_t idx;
3577
3578 if (cmd == SIOCSDEFIFACE_IN6_64) {
3579 u_int64_t j;
3580 __nochk_bcopy(&ndif_64->ifindex, &j, sizeof(j));
3581 idx = (u_int32_t)j;
3582 } else {
3583 bcopy(&ndif_32->ifindex, &idx, sizeof(idx));
3584 }
3585
3586 error = nd6_setdefaultiface(idx);
3587 return error;
3588 /* NOTREACHED */
3589 }
3590 case SIOCGIFCGAPREP_IN6_32:
3591 case SIOCGIFCGAPREP_IN6_64: {
3592 /* get CGA parameters */
3593 union {
3594 struct in6_cgareq_32 *cga32;
3595 struct in6_cgareq_64 *cga64;
3596 void *data;
3597 } cgareq_u;
3598 struct nd_ifinfo *ndi;
3599 struct in6_cga_modifier *ndi_cga_mod;
3600 struct in6_cga_modifier *req_cga_mod;
3601
3602 ndi = ND_IFINFO(ifp);
3603 if ((NULL == ndi) || !ndi->initialized) {
3604 error = EINVAL;
3605 break;
3606 }
3607 cgareq_u.data = data;
3608 req_cga_mod = (cmd == SIOCGIFCGAPREP_IN6_64)
3609 ? &(cgareq_u.cga64->cgar_cgaprep.cga_modifier)
3610 : &(cgareq_u.cga32->cgar_cgaprep.cga_modifier);
3611 lck_mtx_lock(&ndi->lock);
3612 ndi_cga_mod = &(ndi->local_cga_modifier);
3613 bcopy(ndi_cga_mod, req_cga_mod, sizeof(*req_cga_mod));
3614 lck_mtx_unlock(&ndi->lock);
3615 break;
3616 }
3617 case SIOCSIFCGAPREP_IN6_32:
3618 case SIOCSIFCGAPREP_IN6_64:
3619 {
3620 /* set CGA parameters */
3621 struct in6_cgareq cgareq;
3622 int is64;
3623 struct nd_ifinfo *ndi;
3624 struct in6_cga_modifier *ndi_cga_mod;
3625 struct in6_cga_modifier *req_cga_mod;
3626
3627 ndi = ND_IFINFO(ifp);
3628 if ((NULL == ndi) || !ndi->initialized) {
3629 error = EINVAL;
3630 break;
3631 }
3632 is64 = (cmd == SIOCSIFCGAPREP_IN6_64);
3633 in6_cgareq_copy_from_user(data, is64, &cgareq);
3634 req_cga_mod = &cgareq.cgar_cgaprep.cga_modifier;
3635 lck_mtx_lock(&ndi->lock);
3636 ndi_cga_mod = &(ndi->local_cga_modifier);
3637 bcopy(req_cga_mod, ndi_cga_mod, sizeof(*ndi_cga_mod));
3638 ndi->cga_initialized = TRUE;
3639 ndi->cga_collision_count = 0;
3640 lck_mtx_unlock(&ndi->lock);
3641 break;
3642 }
3643 default:
3644 break;
3645 }
3646 return error;
3647}
3648
3649/*
3650 * Create neighbor cache entry and cache link-layer address,
3651 * on reception of inbound ND6 packets. (RS/RA/NS/redirect)
3652 */
3653void
3654nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr,
3655 int lladdrlen, int type, int code)
3656{
3657#pragma unused(lladdrlen)
3658 struct rtentry *rt = NULL;
3659 struct llinfo_nd6 *ln = NULL;
3660 int is_newentry;
3661 struct sockaddr_dl *sdl = NULL;
3662 int do_update;
3663 int olladdr;
3664 int llchange;
3665 short newstate = 0;
3666 uint64_t timenow;
3667 boolean_t sched_timeout = FALSE;
3668 struct nd_ifinfo *ndi = NULL;
3669
3670 if (ifp == NULL) {
3671 panic("ifp == NULL in nd6_cache_lladdr");
3672 }
3673 if (from == NULL) {
3674 panic("from == NULL in nd6_cache_lladdr");
3675 }
3676
3677 /* nothing must be updated for unspecified address */
3678 if (IN6_IS_ADDR_UNSPECIFIED(from)) {
3679 return;
3680 }
3681
3682 /*
3683 * Validation about ifp->if_addrlen and lladdrlen must be done in
3684 * the caller.
3685 */
3686 timenow = net_uptime();
3687
3688 rt = nd6_lookup(from, 0, ifp, 0);
3689 if (rt == NULL) {
3690 if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL) {
3691 return;
3692 }
3693 RT_LOCK_ASSERT_HELD(rt);
3694 is_newentry = 1;
3695 } else {
3696 RT_LOCK_ASSERT_HELD(rt);
3697 /* do nothing if static ndp is set */
3698 if (rt->rt_flags & RTF_STATIC) {
3699 RT_REMREF_LOCKED(rt);
3700 RT_UNLOCK(rt);
3701 return;
3702 }
3703 is_newentry = 0;
3704 }
3705
3706 if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) {
3707fail:
3708 RT_UNLOCK(rt);
3709 nd6_free(rt);
3710 rtfree(rt);
3711 return;
3712 }
3713 ln = (struct llinfo_nd6 *)rt->rt_llinfo;
3714 if (ln == NULL) {
3715 goto fail;
3716 }
3717 if (rt->rt_gateway == NULL) {
3718 goto fail;
3719 }
3720 if (rt->rt_gateway->sa_family != AF_LINK) {
3721 goto fail;
3722 }
3723 sdl = SDL(rt->rt_gateway);
3724
3725 olladdr = (sdl->sdl_alen) ? 1 : 0;
3726 if (olladdr && lladdr) {
3727 if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) {
3728 llchange = 1;
3729 } else {
3730 llchange = 0;
3731 }
3732 } else {
3733 llchange = 0;
3734 }
3735
3736 /*
3737 * newentry olladdr lladdr llchange (*=record)
3738 * 0 n n -- (1)
3739 * 0 y n -- (2)
3740 * 0 n y -- (3) * STALE
3741 * 0 y y n (4) *
3742 * 0 y y y (5) * STALE
3743 * 1 -- n -- (6) NOSTATE(= PASSIVE)
3744 * 1 -- y -- (7) * STALE
3745 */
3746
3747 if (lladdr != NULL) { /* (3-5) and (7) */
3748 /*
3749 * Record source link-layer address
3750 * XXX is it dependent to ifp->if_type?
3751 */
3752 sdl->sdl_alen = ifp->if_addrlen;
3753 bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen);
3754
3755 /* cache the gateway (sender HW) address */
3756 nd6_llreach_alloc(rt, ifp, LLADDR(sdl), sdl->sdl_alen, FALSE);
3757 }
3758
3759 if (is_newentry == 0) {
3760 if ((!olladdr && lladdr != NULL) || /* (3) */
3761 (olladdr && lladdr != NULL && llchange)) { /* (5) */
3762 do_update = 1;
3763 newstate = ND6_LLINFO_STALE;
3764 } else { /* (1-2,4) */
3765 do_update = 0;
3766 }
3767 } else {
3768 do_update = 1;
3769 if (lladdr == NULL) { /* (6) */
3770 newstate = ND6_LLINFO_NOSTATE;
3771 } else { /* (7) */
3772 newstate = ND6_LLINFO_STALE;
3773 }
3774 }
3775
3776 /*
3777 * For interface's that do not perform NUD
3778 * neighbor cache entres must always be marked
3779 * reachable with no expiry
3780 */
3781 ndi = ND_IFINFO(ifp);
3782 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
3783
3784 if (ndi && !(ndi->flags & ND6_IFF_PERFORMNUD)) {
3785 newstate = ND6_LLINFO_REACHABLE;
3786 ln_setexpire(ln, 0);
3787 }
3788
3789 if (do_update) {
3790 /*
3791 * Update the state of the neighbor cache.
3792 */
3793 ND6_CACHE_STATE_TRANSITION(ln, newstate);
3794
3795 if ((ln->ln_state == ND6_LLINFO_STALE) ||
3796 (ln->ln_state == ND6_LLINFO_REACHABLE)) {
3797 struct mbuf *m = ln->ln_hold;
3798 /*
3799 * XXX: since nd6_output() below will cause
3800 * state tansition to DELAY and reset the timer,
3801 * we must set the timer now, although it is actually
3802 * meaningless.
3803 */
3804 if (ln->ln_state == ND6_LLINFO_STALE) {
3805 ln_setexpire(ln, timenow + nd6_gctimer);
3806 }
3807
3808 ln->ln_hold = NULL;
3809 if (m != NULL) {
3810 struct sockaddr_in6 sin6;
3811
3812 rtkey_to_sa6(rt, &sin6);
3813 /*
3814 * we assume ifp is not a p2p here, so just
3815 * set the 2nd argument as the 1st one.
3816 */
3817 RT_UNLOCK(rt);
3818 nd6_output_list(ifp, ifp, m, &sin6, rt, NULL);
3819 RT_LOCK(rt);
3820 }
3821 } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
3822 /* probe right away */
3823 ln_setexpire(ln, timenow);
3824 sched_timeout = TRUE;
3825 }
3826 }
3827
3828 /*
3829 * ICMP6 type dependent behavior.
3830 *
3831 * NS: clear IsRouter if new entry
3832 * RS: clear IsRouter
3833 * RA: set IsRouter if there's lladdr
3834 * redir: clear IsRouter if new entry
3835 *
3836 * RA case, (1):
3837 * The spec says that we must set IsRouter in the following cases:
3838 * - If lladdr exist, set IsRouter. This means (1-5).
3839 * - If it is old entry (!newentry), set IsRouter. This means (7).
3840 * So, based on the spec, in (1-5) and (7) cases we must set IsRouter.
3841 * A quetion arises for (1) case. (1) case has no lladdr in the
3842 * neighbor cache, this is similar to (6).
3843 * This case is rare but we figured that we MUST NOT set IsRouter.
3844 *
3845 * newentry olladdr lladdr llchange NS RS RA redir
3846 * D R
3847 * 0 n n -- (1) c ? s
3848 * 0 y n -- (2) c s s
3849 * 0 n y -- (3) c s s
3850 * 0 y y n (4) c s s
3851 * 0 y y y (5) c s s
3852 * 1 -- n -- (6) c c c s
3853 * 1 -- y -- (7) c c s c s
3854 *
3855 * (c=clear s=set)
3856 */
3857 switch (type & 0xff) {
3858 case ND_NEIGHBOR_SOLICIT:
3859 /*
3860 * New entry must have is_router flag cleared.
3861 */
3862 if (is_newentry) { /* (6-7) */
3863 ln->ln_router = 0;
3864 }
3865 break;
3866 case ND_REDIRECT:
3867 /*
3868 * If the ICMP message is a Redirect to a better router, always
3869 * set the is_router flag. Otherwise, if the entry is newly
3870 * created, then clear the flag. [RFC 4861, sec 8.3]
3871 */
3872 if (code == ND_REDIRECT_ROUTER) {
3873 ln->ln_router = 1;
3874 } else if (is_newentry) { /* (6-7) */
3875 ln->ln_router = 0;
3876 }
3877 break;
3878 case ND_ROUTER_SOLICIT:
3879 /*
3880 * is_router flag must always be cleared.
3881 */
3882 ln->ln_router = 0;
3883 break;
3884 case ND_ROUTER_ADVERT:
3885 /*
3886 * Mark an entry with lladdr as a router.
3887 */
3888 if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */
3889 (is_newentry && lladdr)) { /* (7) */
3890 ln->ln_router = 1;
3891 }
3892 break;
3893 }
3894
3895 if (do_update) {
3896 int route_ev_code = 0;
3897
3898 if (llchange) {
3899 route_ev_code = ROUTE_LLENTRY_CHANGED;
3900 } else {
3901 route_ev_code = ROUTE_LLENTRY_RESOLVED;
3902 }
3903
3904 /* Enqueue work item to invoke callback for this route entry */
3905 route_event_enqueue_nwk_wq_entry(rt, NULL, route_ev_code, NULL, TRUE);
3906
3907 if (ln->ln_router || (rt->rt_flags & RTF_ROUTER)) {
3908 struct radix_node_head *rnh = NULL;
3909 struct route_event rt_ev;
3910 route_event_init(&rt_ev, rt, NULL, llchange ? ROUTE_LLENTRY_CHANGED :
3911 ROUTE_LLENTRY_RESOLVED);
3912 /*
3913 * We already have a valid reference on rt.
3914 * The function frees that before returning.
3915 * We therefore don't need an extra reference here
3916 */
3917 RT_UNLOCK(rt);
3918 lck_mtx_lock(rnh_lock);
3919
3920 rnh = rt_tables[AF_INET6];
3921 if (rnh != NULL) {
3922 (void) rnh->rnh_walktree(rnh, route_event_walktree,
3923 (void *)&rt_ev);
3924 }
3925 lck_mtx_unlock(rnh_lock);
3926 RT_LOCK(rt);
3927 }
3928 }
3929
3930 /*
3931 * When the link-layer address of a router changes, select the
3932 * best router again. In particular, when the neighbor entry is newly
3933 * created, it might affect the selection policy.
3934 * Question: can we restrict the first condition to the "is_newentry"
3935 * case?
3936 *
3937 * Note: Perform default router selection even when we are a router,
3938 * if Scoped Routing is enabled.
3939 */
3940 if (do_update && ln->ln_router) {
3941 /*
3942 * XXX TODO: This should also be iterated over router list
3943 * for route information option's router lists as well.
3944 */
3945 RT_REMREF_LOCKED(rt);
3946 RT_UNLOCK(rt);
3947 lck_mtx_lock(nd6_mutex);
3948 defrouter_select(ifp, NULL);
3949 nd6_router_select_rti_entries(ifp);
3950 lck_mtx_unlock(nd6_mutex);
3951 } else {
3952 RT_REMREF_LOCKED(rt);
3953 RT_UNLOCK(rt);
3954 }
3955 if (sched_timeout) {
3956 lck_mtx_lock(rnh_lock);
3957 nd6_sched_timeout(NULL, NULL);
3958 lck_mtx_unlock(rnh_lock);
3959 }
3960}
3961
3962static void
3963nd6_slowtimo(void *arg)
3964{
3965#pragma unused(arg)
3966 struct nd_ifinfo *nd6if = NULL;
3967 struct ifnet *ifp = NULL;
3968
3969 ifnet_head_lock_shared();
3970 for (ifp = ifnet_head.tqh_first; ifp;
3971 ifp = ifp->if_link.tqe_next) {
3972 nd6if = ND_IFINFO(ifp);
3973 if ((NULL == nd6if) || (FALSE == nd6if->initialized)) {
3974 continue;
3975 }
3976
3977 lck_mtx_lock(&nd6if->lock);
3978 if (nd6if->basereachable && /* already initialized */
3979 (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
3980 /*
3981 * Since reachable time rarely changes by router
3982 * advertisements, we SHOULD insure that a new random
3983 * value gets recomputed at least once every few hours.
3984 * (RFC 4861, 6.3.4)
3985 */
3986 nd6if->recalctm = nd6_recalc_reachtm_interval;
3987 nd6if->reachable =
3988 ND_COMPUTE_RTIME(nd6if->basereachable);
3989 }
3990 lck_mtx_unlock(&nd6if->lock);
3991 }
3992 ifnet_head_done();
3993 timeout(nd6_slowtimo, NULL, ND6_SLOWTIMER_INTERVAL * hz);
3994}
3995
3996int
3997nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
3998 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
3999{
4000 return nd6_output_list(ifp, origifp, m0, dst, hint0, adv);
4001}
4002
4003/*
4004 * nd6_output_list()
4005 *
4006 * Assumption: route determination for first packet can be correctly applied to
4007 * all packets in the chain.
4008 */
4009#define senderr(e) { error = (e); goto bad; }
4010int
4011nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
4012 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
4013{
4014 struct rtentry *rt = hint0, *hint = hint0;
4015 struct llinfo_nd6 *ln = NULL;
4016 int error = 0;
4017 uint64_t timenow;
4018 struct rtentry *rtrele = NULL;
4019 struct nd_ifinfo *ndi = NULL;
4020
4021 if (rt != NULL) {
4022 RT_LOCK_SPIN(rt);
4023 RT_ADDREF_LOCKED(rt);
4024 }
4025
4026 if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr) || !nd6_need_cache(ifp)) {
4027 if (rt != NULL) {
4028 RT_UNLOCK(rt);
4029 }
4030 goto sendpkt;
4031 }
4032
4033 /*
4034 * Next hop determination. Because we may involve the gateway route
4035 * in addition to the original route, locking is rather complicated.
4036 * The general concept is that regardless of whether the route points
4037 * to the original route or to the gateway route, this routine takes
4038 * an extra reference on such a route. This extra reference will be
4039 * released at the end.
4040 *
4041 * Care must be taken to ensure that the "hint0" route never gets freed
4042 * via rtfree(), since the caller may have stored it inside a struct
4043 * route with a reference held for that placeholder.
4044 *
4045 * This logic is similar to, though not exactly the same as the one
4046 * used by route_to_gwroute().
4047 */
4048 if (rt != NULL) {
4049 /*
4050 * We have a reference to "rt" by now (or below via rtalloc1),
4051 * which will either be released or freed at the end of this
4052 * routine.
4053 */
4054 RT_LOCK_ASSERT_HELD(rt);
4055 if (!(rt->rt_flags & RTF_UP)) {
4056 RT_REMREF_LOCKED(rt);
4057 RT_UNLOCK(rt);
4058 if ((hint = rt = rtalloc1_scoped(SA(dst), 1, 0,
4059 ifp->if_index)) != NULL) {
4060 RT_LOCK_SPIN(rt);
4061 if (rt->rt_ifp != ifp) {
4062 /* XXX: loop care? */
4063 RT_UNLOCK(rt);
4064 error = nd6_output_list(ifp, origifp, m0,
4065 dst, rt, adv);
4066 rtfree(rt);
4067 return error;
4068 }
4069 } else {
4070 senderr(EHOSTUNREACH);
4071 }
4072 }
4073
4074 if (rt->rt_flags & RTF_GATEWAY) {
4075 struct rtentry *gwrt;
4076 struct in6_ifaddr *ia6 = NULL;
4077 struct sockaddr_in6 gw6;
4078
4079 rtgw_to_sa6(rt, &gw6);
4080 /*
4081 * Must drop rt_lock since nd6_is_addr_neighbor()
4082 * calls nd6_lookup() and acquires rnh_lock.
4083 */
4084 RT_UNLOCK(rt);
4085
4086 /*
4087 * We skip link-layer address resolution and NUD
4088 * if the gateway is not a neighbor from ND point
4089 * of view, regardless of the value of nd_ifinfo.flags.
4090 * The second condition is a bit tricky; we skip
4091 * if the gateway is our own address, which is
4092 * sometimes used to install a route to a p2p link.
4093 */
4094 if (!nd6_is_addr_neighbor(&gw6, ifp, 0) ||
4095 (ia6 = in6ifa_ifpwithaddr(ifp, &gw6.sin6_addr))) {
4096 /*
4097 * We allow this kind of tricky route only
4098 * when the outgoing interface is p2p.
4099 * XXX: we may need a more generic rule here.
4100 */
4101 if (ia6 != NULL) {
4102 IFA_REMREF(&ia6->ia_ifa);
4103 }
4104 if ((ifp->if_flags & IFF_POINTOPOINT) == 0) {
4105 senderr(EHOSTUNREACH);
4106 }
4107 goto sendpkt;
4108 }
4109
4110 RT_LOCK_SPIN(rt);
4111 gw6 = *(SIN6(rt->rt_gateway));
4112
4113 /* If hint is now down, give up */
4114 if (!(rt->rt_flags & RTF_UP)) {
4115 RT_UNLOCK(rt);
4116 senderr(EHOSTUNREACH);
4117 }
4118
4119 /* If there's no gateway route, look it up */
4120 if ((gwrt = rt->rt_gwroute) == NULL) {
4121 RT_UNLOCK(rt);
4122 goto lookup;
4123 }
4124 /* Become a regular mutex */
4125 RT_CONVERT_LOCK(rt);
4126
4127 /*
4128 * Take gwrt's lock while holding route's lock;
4129 * this is okay since gwrt never points back
4130 * to rt, so no lock ordering issues.
4131 */
4132 RT_LOCK_SPIN(gwrt);
4133 if (!(gwrt->rt_flags & RTF_UP)) {
4134 rt->rt_gwroute = NULL;
4135 RT_UNLOCK(gwrt);
4136 RT_UNLOCK(rt);
4137 rtfree(gwrt);
4138lookup:
4139 lck_mtx_lock(rnh_lock);
4140 gwrt = rtalloc1_scoped_locked(SA(&gw6), 1, 0,
4141 ifp->if_index);
4142
4143 RT_LOCK(rt);
4144 /*
4145 * Bail out if the route is down, no route
4146 * to gateway, circular route, or if the
4147 * gateway portion of "rt" has changed.
4148 */
4149 if (!(rt->rt_flags & RTF_UP) ||
4150 gwrt == NULL || gwrt == rt ||
4151 !equal(SA(&gw6), rt->rt_gateway)) {
4152 if (gwrt == rt) {
4153 RT_REMREF_LOCKED(gwrt);
4154 gwrt = NULL;
4155 }
4156 RT_UNLOCK(rt);
4157 if (gwrt != NULL) {
4158 rtfree_locked(gwrt);
4159 }
4160 lck_mtx_unlock(rnh_lock);
4161 senderr(EHOSTUNREACH);
4162 }
4163 VERIFY(gwrt != NULL);
4164 /*
4165 * Set gateway route; callee adds ref to gwrt;
4166 * gwrt has an extra ref from rtalloc1() for
4167 * this routine.
4168 */
4169 rt_set_gwroute(rt, rt_key(rt), gwrt);
4170 RT_UNLOCK(rt);
4171 lck_mtx_unlock(rnh_lock);
4172 /* Remember to release/free "rt" at the end */
4173 rtrele = rt;
4174 rt = gwrt;
4175 } else {
4176 RT_ADDREF_LOCKED(gwrt);
4177 RT_UNLOCK(gwrt);
4178 RT_UNLOCK(rt);
4179 /* Remember to release/free "rt" at the end */
4180 rtrele = rt;
4181 rt = gwrt;
4182 }
4183 VERIFY(rt == gwrt);
4184
4185 /*
4186 * This is an opportunity to revalidate the parent
4187 * route's gwroute, in case it now points to a dead
4188 * route entry. Parent route won't go away since the
4189 * clone (hint) holds a reference to it. rt == gwrt.
4190 */
4191 RT_LOCK_SPIN(hint);
4192 if ((hint->rt_flags & (RTF_WASCLONED | RTF_UP)) ==
4193 (RTF_WASCLONED | RTF_UP)) {
4194 struct rtentry *prt = hint->rt_parent;
4195 VERIFY(prt != NULL);
4196
4197 RT_CONVERT_LOCK(hint);
4198 RT_ADDREF(prt);
4199 RT_UNLOCK(hint);
4200 rt_revalidate_gwroute(prt, rt);
4201 RT_REMREF(prt);
4202 } else {
4203 RT_UNLOCK(hint);
4204 }
4205
4206 RT_LOCK_SPIN(rt);
4207 /* rt == gwrt; if it is now down, give up */
4208 if (!(rt->rt_flags & RTF_UP)) {
4209 RT_UNLOCK(rt);
4210 rtfree(rt);
4211 rt = NULL;
4212 /* "rtrele" == original "rt" */
4213 senderr(EHOSTUNREACH);
4214 }
4215 }
4216
4217 /* Become a regular mutex */
4218 RT_CONVERT_LOCK(rt);
4219 }
4220
4221 /*
4222 * Address resolution or Neighbor Unreachability Detection
4223 * for the next hop.
4224 * At this point, the destination of the packet must be a unicast
4225 * or an anycast address(i.e. not a multicast).
4226 */
4227
4228 /* Look up the neighbor cache for the nexthop */
4229 if (rt && (rt->rt_flags & RTF_LLINFO) != 0) {
4230 ln = rt->rt_llinfo;
4231 } else {
4232 struct sockaddr_in6 sin6;
4233 /*
4234 * Clear out Scope ID field in case it is set.
4235 */
4236 sin6 = *dst;
4237 sin6.sin6_scope_id = 0;
4238 /*
4239 * Since nd6_is_addr_neighbor() internally calls nd6_lookup(),
4240 * the condition below is not very efficient. But we believe
4241 * it is tolerable, because this should be a rare case.
4242 * Must drop rt_lock since nd6_is_addr_neighbor() calls
4243 * nd6_lookup() and acquires rnh_lock.
4244 */
4245 if (rt != NULL) {
4246 RT_UNLOCK(rt);
4247 }
4248 if (nd6_is_addr_neighbor(&sin6, ifp, 0)) {
4249 /* "rtrele" may have been used, so clean up "rt" now */
4250 if (rt != NULL) {
4251 /* Don't free "hint0" */
4252 if (rt == hint0) {
4253 RT_REMREF(rt);
4254 } else {
4255 rtfree(rt);
4256 }
4257 }
4258 /* Callee returns a locked route upon success */
4259 rt = nd6_lookup(&dst->sin6_addr, 1, ifp, 0);
4260 if (rt != NULL) {
4261 RT_LOCK_ASSERT_HELD(rt);
4262 ln = rt->rt_llinfo;
4263 }
4264 } else if (rt != NULL) {
4265 RT_LOCK(rt);
4266 }
4267 }
4268
4269 if (!ln || !rt) {
4270 if (rt != NULL) {
4271 RT_UNLOCK(rt);
4272 }
4273 ndi = ND_IFINFO(ifp);
4274 VERIFY(ndi != NULL && ndi->initialized);
4275 lck_mtx_lock(&ndi->lock);
4276 if ((ifp->if_flags & IFF_POINTOPOINT) == 0 &&
4277 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
4278 lck_mtx_unlock(&ndi->lock);
4279 log(LOG_DEBUG,
4280 "nd6_output: can't allocate llinfo for %s "
4281 "(ln=0x%llx, rt=0x%llx)\n",
4282 ip6_sprintf(&dst->sin6_addr),
4283 (uint64_t)VM_KERNEL_ADDRPERM(ln),
4284 (uint64_t)VM_KERNEL_ADDRPERM(rt));
4285 senderr(EIO); /* XXX: good error? */
4286 }
4287 lck_mtx_unlock(&ndi->lock);
4288
4289 goto sendpkt; /* send anyway */
4290 }
4291
4292 net_update_uptime();
4293 timenow = net_uptime();
4294
4295 /* We don't have to do link-layer address resolution on a p2p link. */
4296 if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
4297 ln->ln_state < ND6_LLINFO_REACHABLE) {
4298 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
4299 ln_setexpire(ln, timenow + nd6_gctimer);
4300 }
4301
4302 /*
4303 * The first time we send a packet to a neighbor whose entry is
4304 * STALE, we have to change the state to DELAY and a sets a timer to
4305 * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do
4306 * neighbor unreachability detection on expiration.
4307 * (RFC 4861 7.3.3)
4308 */
4309 if (ln->ln_state == ND6_LLINFO_STALE) {
4310 ln->ln_asked = 0;
4311 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_DELAY);
4312 ln_setexpire(ln, timenow + nd6_delay);
4313 /* N.B.: we will re-arm the timer below. */
4314 _CASSERT(ND6_LLINFO_DELAY > ND6_LLINFO_INCOMPLETE);
4315 }
4316
4317 /*
4318 * If the neighbor cache entry has a state other than INCOMPLETE
4319 * (i.e. its link-layer address is already resolved), just
4320 * send the packet.
4321 */
4322 if (ln->ln_state > ND6_LLINFO_INCOMPLETE) {
4323 RT_UNLOCK(rt);
4324 /*
4325 * Move this entry to the head of the queue so that it is
4326 * less likely for this entry to be a target of forced
4327 * garbage collection (see nd6_rtrequest()). Do this only
4328 * if the entry is non-permanent (as permanent ones will
4329 * never be purged), and if the number of active entries
4330 * is at least half of the threshold.
4331 */
4332 if (ln->ln_state == ND6_LLINFO_DELAY ||
4333 (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
4334 nd6_inuse >= (ip6_neighborgcthresh >> 1))) {
4335 lck_mtx_lock(rnh_lock);
4336 if (ln->ln_state == ND6_LLINFO_DELAY) {
4337 nd6_sched_timeout(NULL, NULL);
4338 }
4339 if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
4340 nd6_inuse >= (ip6_neighborgcthresh >> 1)) {
4341 RT_LOCK_SPIN(rt);
4342 if (ln->ln_flags & ND6_LNF_IN_USE) {
4343 LN_DEQUEUE(ln);
4344 LN_INSERTHEAD(ln);
4345 }
4346 RT_UNLOCK(rt);
4347 }
4348 lck_mtx_unlock(rnh_lock);
4349 }
4350 goto sendpkt;
4351 }
4352
4353 /*
4354 * If this is a prefix proxy route, record the inbound interface
4355 * so that it can be excluded from the list of interfaces eligible
4356 * for forwarding the proxied NS in nd6_prproxy_ns_output().
4357 */
4358 if (rt->rt_flags & RTF_PROXY) {
4359 ln->ln_exclifp = ((origifp == ifp) ? NULL : origifp);
4360 }
4361
4362 /*
4363 * There is a neighbor cache entry, but no ethernet address
4364 * response yet. Replace the held mbuf (if any) with this
4365 * latest one.
4366 *
4367 * This code conforms to the rate-limiting rule described in Section
4368 * 7.2.2 of RFC 4861, because the timer is set correctly after sending
4369 * an NS below.
4370 */
4371 if (ln->ln_state == ND6_LLINFO_NOSTATE) {
4372 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE);
4373 }
4374 if (ln->ln_hold) {
4375 m_freem_list(ln->ln_hold);
4376 }
4377 ln->ln_hold = m0;
4378 if (!ND6_LLINFO_PERMANENT(ln) && ln->ln_asked == 0) {
4379 ln->ln_asked++;
4380 ndi = ND_IFINFO(ifp);
4381 VERIFY(ndi != NULL && ndi->initialized);
4382 lck_mtx_lock(&ndi->lock);
4383 ln_setexpire(ln, timenow + ndi->retrans / 1000);
4384 lck_mtx_unlock(&ndi->lock);
4385 RT_UNLOCK(rt);
4386 /* We still have a reference on rt (for ln) */
4387 if (ip6_forwarding) {
4388 nd6_prproxy_ns_output(ifp, origifp, NULL,
4389 &dst->sin6_addr, ln);
4390 } else {
4391 nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, NULL);
4392 }
4393 lck_mtx_lock(rnh_lock);
4394 nd6_sched_timeout(NULL, NULL);
4395 lck_mtx_unlock(rnh_lock);
4396 } else {
4397 RT_UNLOCK(rt);
4398 }
4399 /*
4400 * Move this entry to the head of the queue so that it is
4401 * less likely for this entry to be a target of forced
4402 * garbage collection (see nd6_rtrequest()). Do this only
4403 * if the entry is non-permanent (as permanent ones will
4404 * never be purged), and if the number of active entries
4405 * is at least half of the threshold.
4406 */
4407 if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
4408 nd6_inuse >= (ip6_neighborgcthresh >> 1)) {
4409 lck_mtx_lock(rnh_lock);
4410 RT_LOCK_SPIN(rt);
4411 if (ln->ln_flags & ND6_LNF_IN_USE) {
4412 LN_DEQUEUE(ln);
4413 LN_INSERTHEAD(ln);
4414 }
4415 /* Clean up "rt" now while we can */
4416 if (rt == hint0) {
4417 RT_REMREF_LOCKED(rt);
4418 RT_UNLOCK(rt);
4419 } else {
4420 RT_UNLOCK(rt);
4421 rtfree_locked(rt);
4422 }
4423 rt = NULL; /* "rt" has been taken care of */
4424 lck_mtx_unlock(rnh_lock);
4425 }
4426 error = 0;
4427 goto release;
4428
4429sendpkt:
4430 if (rt != NULL) {
4431 RT_LOCK_ASSERT_NOTHELD(rt);
4432 }
4433
4434 /* discard the packet if IPv6 operation is disabled on the interface */
4435 if (ifp->if_eflags & IFEF_IPV6_DISABLED) {
4436 error = ENETDOWN; /* better error? */
4437 goto bad;
4438 }
4439
4440 if (ifp->if_flags & IFF_LOOPBACK) {
4441 /* forwarding rules require the original scope_id */
4442 m0->m_pkthdr.rcvif = origifp;
4443 error = dlil_output(origifp, PF_INET6, m0, (caddr_t)rt,
4444 SA(dst), 0, adv);
4445 goto release;
4446 } else {
4447 /* Do not allow loopback address to wind up on a wire */
4448 struct ip6_hdr *ip6 = mtod(m0, struct ip6_hdr *);
4449
4450 if ((IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) ||
4451 IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst))) {
4452 ip6stat.ip6s_badscope++;
4453 error = EADDRNOTAVAIL;
4454 goto bad;
4455 }
4456 }
4457
4458 if (rt != NULL) {
4459 RT_LOCK_SPIN(rt);
4460 /* Mark use timestamp */
4461 if (rt->rt_llinfo != NULL) {
4462 nd6_llreach_use(rt->rt_llinfo);
4463 }
4464 RT_UNLOCK(rt);
4465 }
4466
4467 struct mbuf *mcur = m0;
4468 uint32_t pktcnt = 0;
4469
4470 while (mcur) {
4471 if (hint != NULL && nstat_collect) {
4472 int scnt;
4473
4474 if ((mcur->m_pkthdr.csum_flags & CSUM_TSO_IPV6) &&
4475 (mcur->m_pkthdr.tso_segsz > 0)) {
4476 scnt = mcur->m_pkthdr.len / mcur->m_pkthdr.tso_segsz;
4477 } else {
4478 scnt = 1;
4479 }
4480
4481 nstat_route_tx(hint, scnt, mcur->m_pkthdr.len, 0);
4482 }
4483 pktcnt++;
4484
4485 mcur->m_pkthdr.rcvif = NULL;
4486 mcur = mcur->m_nextpkt;
4487 }
4488 if (pktcnt > ip6_maxchainsent) {
4489 ip6_maxchainsent = pktcnt;
4490 }
4491 error = dlil_output(ifp, PF_INET6, m0, (caddr_t)rt, SA(dst), 0, adv);
4492 goto release;
4493
4494bad:
4495 if (m0 != NULL) {
4496 m_freem_list(m0);
4497 }
4498
4499release:
4500 /* Clean up "rt" unless it's already been done */
4501 if (rt != NULL) {
4502 RT_LOCK_SPIN(rt);
4503 if (rt == hint0) {
4504 RT_REMREF_LOCKED(rt);
4505 RT_UNLOCK(rt);
4506 } else {
4507 RT_UNLOCK(rt);
4508 rtfree(rt);
4509 }
4510 }
4511 /* And now clean up "rtrele" if there is any */
4512 if (rtrele != NULL) {
4513 RT_LOCK_SPIN(rtrele);
4514 if (rtrele == hint0) {
4515 RT_REMREF_LOCKED(rtrele);
4516 RT_UNLOCK(rtrele);
4517 } else {
4518 RT_UNLOCK(rtrele);
4519 rtfree(rtrele);
4520 }
4521 }
4522 return error;
4523}
4524#undef senderr
4525
4526int
4527nd6_need_cache(struct ifnet *ifp)
4528{
4529 /*
4530 * XXX: we currently do not make neighbor cache on any interface
4531 * other than ARCnet, Ethernet, FDDI and GIF.
4532 *
4533 * RFC2893 says:
4534 * - unidirectional tunnels needs no ND
4535 */
4536 switch (ifp->if_type) {
4537 case IFT_ARCNET:
4538 case IFT_ETHER:
4539 case IFT_FDDI:
4540 case IFT_IEEE1394:
4541 case IFT_L2VLAN:
4542 case IFT_IEEE8023ADLAG:
4543#if IFT_IEEE80211
4544 case IFT_IEEE80211:
4545#endif
4546 case IFT_GIF: /* XXX need more cases? */
4547 case IFT_PPP:
4548#if IFT_TUNNEL
4549 case IFT_TUNNEL:
4550#endif
4551 case IFT_BRIDGE:
4552 case IFT_CELLULAR:
4553 case IFT_6LOWPAN:
4554 return 1;
4555 default:
4556 return 0;
4557 }
4558}
4559
4560int
4561nd6_storelladdr(struct ifnet *ifp, struct rtentry *rt, struct mbuf *m,
4562 struct sockaddr *dst, u_char *desten)
4563{
4564 int i;
4565 struct sockaddr_dl *sdl;
4566
4567 if (m->m_flags & M_MCAST) {
4568 switch (ifp->if_type) {
4569 case IFT_ETHER:
4570 case IFT_FDDI:
4571 case IFT_L2VLAN:
4572 case IFT_IEEE8023ADLAG:
4573#if IFT_IEEE80211
4574 case IFT_IEEE80211:
4575#endif
4576 case IFT_BRIDGE:
4577 ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, desten);
4578 return 1;
4579 case IFT_IEEE1394:
4580 for (i = 0; i < ifp->if_addrlen; i++) {
4581 desten[i] = ~0;
4582 }
4583 return 1;
4584 case IFT_ARCNET:
4585 *desten = 0;
4586 return 1;
4587 default:
4588 return 0; /* caller will free mbuf */
4589 }
4590 }
4591
4592 if (rt == NULL) {
4593 /* this could happen, if we could not allocate memory */
4594 return 0; /* caller will free mbuf */
4595 }
4596 RT_LOCK(rt);
4597 if (rt->rt_gateway->sa_family != AF_LINK) {
4598 printf("nd6_storelladdr: something odd happens\n");
4599 RT_UNLOCK(rt);
4600 return 0; /* caller will free mbuf */
4601 }
4602 sdl = SDL(rt->rt_gateway);
4603 if (sdl->sdl_alen == 0) {
4604 /* this should be impossible, but we bark here for debugging */
4605 printf("nd6_storelladdr: sdl_alen == 0\n");
4606 RT_UNLOCK(rt);
4607 return 0; /* caller will free mbuf */
4608 }
4609
4610 bcopy(LLADDR(sdl), desten, sdl->sdl_alen);
4611 RT_UNLOCK(rt);
4612 return 1;
4613}
4614
4615/*
4616 * This is the ND pre-output routine; care must be taken to ensure that
4617 * the "hint" route never gets freed via rtfree(), since the caller may
4618 * have stored it inside a struct route with a reference held for that
4619 * placeholder.
4620 */
4621errno_t
4622nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest,
4623 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
4624 mbuf_t packet)
4625{
4626 route_t route = hint;
4627 errno_t result = 0;
4628 struct sockaddr_dl *sdl = NULL;
4629 size_t copy_len;
4630
4631 if (ifp == NULL || ip6_dest == NULL) {
4632 return EINVAL;
4633 }
4634
4635 if (ip6_dest->sin6_family != AF_INET6) {
4636 return EAFNOSUPPORT;
4637 }
4638
4639 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) {
4640 return ENETDOWN;
4641 }
4642
4643 if (hint != NULL) {
4644 /*
4645 * Callee holds a reference on the route and returns
4646 * with the route entry locked, upon success.
4647 */
4648 result = route_to_gwroute((const struct sockaddr *)ip6_dest,
4649 hint, &route);
4650 if (result != 0) {
4651 return result;
4652 }
4653 if (route != NULL) {
4654 RT_LOCK_ASSERT_HELD(route);
4655 }
4656 }
4657
4658 if ((packet != NULL && (packet->m_flags & M_MCAST) != 0) ||
4659 ((ifp->if_flags & IFF_MULTICAST) &&
4660 IN6_IS_ADDR_MULTICAST(&ip6_dest->sin6_addr))) {
4661 if (route != NULL) {
4662 RT_UNLOCK(route);
4663 }
4664 result = dlil_resolve_multi(ifp,
4665 (const struct sockaddr *)ip6_dest,
4666 SA(ll_dest), ll_dest_len);
4667 if (route != NULL) {
4668 RT_LOCK(route);
4669 }
4670 goto release;
4671 } else if (route == NULL) {
4672 /*
4673 * rdar://24596652
4674 * For unicast, lookup existing ND6 entries but
4675 * do not trigger a resolution
4676 */
4677 lck_mtx_lock(rnh_lock);
4678 route = rt_lookup(TRUE,
4679 __DECONST(struct sockaddr *, ip6_dest), NULL,
4680 rt_tables[AF_INET6], ifp->if_index);
4681 lck_mtx_unlock(rnh_lock);
4682
4683 if (route != NULL) {
4684 RT_LOCK(route);
4685 }
4686 }
4687
4688 if (route == NULL) {
4689 /*
4690 * This could happen, if we could not allocate memory or
4691 * if route_to_gwroute() didn't return a route.
4692 */
4693 result = ENOBUFS;
4694 goto release;
4695 }
4696
4697 if (route->rt_gateway->sa_family != AF_LINK) {
4698 nd6log0(error, "%s: route %s on %s%d gateway address not AF_LINK\n",
4699 __func__, ip6_sprintf(&ip6_dest->sin6_addr),
4700 route->rt_ifp->if_name, route->rt_ifp->if_unit);
4701 result = EADDRNOTAVAIL;
4702 goto release;
4703 }
4704
4705 sdl = SDL(route->rt_gateway);
4706 if (sdl->sdl_alen == 0) {
4707 /* this should be impossible, but we bark here for debugging */
4708 nd6log(error, "%s: route %s on %s%d sdl_alen == 0\n", __func__,
4709 ip6_sprintf(&ip6_dest->sin6_addr), route->rt_ifp->if_name,
4710 route->rt_ifp->if_unit);
4711 result = EHOSTUNREACH;
4712 goto release;
4713 }
4714
4715 copy_len = sdl->sdl_len <= ll_dest_len ? sdl->sdl_len : ll_dest_len;
4716 bcopy(sdl, ll_dest, copy_len);
4717
4718release:
4719 if (route != NULL) {
4720 if (route == hint) {
4721 RT_REMREF_LOCKED(route);
4722 RT_UNLOCK(route);
4723 } else {
4724 RT_UNLOCK(route);
4725 rtfree(route);
4726 }
4727 }
4728 return result;
4729}
4730
4731#if (DEVELOPMENT || DEBUG)
4732
4733static int sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS;
4734SYSCTL_PROC(_net_inet6_icmp6, OID_AUTO, nd6_lookup_ipv6,
4735 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
4736 sysctl_nd6_lookup_ipv6, "S", "");
4737
4738int
4739sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS
4740{
4741#pragma unused(oidp, arg1, arg2)
4742 int error = 0;
4743 struct nd6_lookup_ipv6_args nd6_lookup_ipv6_args;
4744 ifnet_t ifp = NULL;
4745
4746 /*
4747 * Only root can lookup MAC addresses
4748 */
4749 error = proc_suser(current_proc());
4750 if (error != 0) {
4751 nd6log0(error, "%s: proc_suser() error %d\n",
4752 __func__, error);
4753 goto done;
4754 }
4755 if (req->oldptr == USER_ADDR_NULL) {
4756 req->oldidx = sizeof(struct nd6_lookup_ipv6_args);
4757 }
4758 if (req->newptr == USER_ADDR_NULL) {
4759 goto done;
4760 }
4761 if (req->oldlen != sizeof(struct nd6_lookup_ipv6_args) ||
4762 req->newlen != sizeof(struct nd6_lookup_ipv6_args)) {
4763 error = EINVAL;
4764 nd6log0(error, "%s: bad req, error %d\n",
4765 __func__, error);
4766 goto done;
4767 }
4768 error = SYSCTL_IN(req, &nd6_lookup_ipv6_args,
4769 sizeof(struct nd6_lookup_ipv6_args));
4770 if (error != 0) {
4771 nd6log0(error, "%s: SYSCTL_IN() error %d\n",
4772 __func__, error);
4773 goto done;
4774 }
4775
4776 if (nd6_lookup_ipv6_args.ll_dest_len > sizeof(nd6_lookup_ipv6_args.ll_dest_)) {
4777 error = EINVAL;
4778 nd6log0(error, "%s: bad ll_dest_len, error %d\n",
4779 __func__, error);
4780 goto done;
4781 }
4782
4783 /* Make sure to terminate the string */
4784 nd6_lookup_ipv6_args.ifname[IFNAMSIZ - 1] = 0;
4785
4786 error = ifnet_find_by_name(nd6_lookup_ipv6_args.ifname, &ifp);
4787 if (error != 0) {
4788 nd6log0(error, "%s: ifnet_find_by_name() error %d\n",
4789 __func__, error);
4790 goto done;
4791 }
4792
4793 error = nd6_lookup_ipv6(ifp, &nd6_lookup_ipv6_args.ip6_dest,
4794 &nd6_lookup_ipv6_args.ll_dest_._sdl,
4795 nd6_lookup_ipv6_args.ll_dest_len, NULL, NULL);
4796 if (error != 0) {
4797 nd6log0(error, "%s: nd6_lookup_ipv6() error %d\n",
4798 __func__, error);
4799 goto done;
4800 }
4801
4802 error = SYSCTL_OUT(req, &nd6_lookup_ipv6_args,
4803 sizeof(struct nd6_lookup_ipv6_args));
4804 if (error != 0) {
4805 nd6log0(error, "%s: SYSCTL_OUT() error %d\n",
4806 __func__, error);
4807 goto done;
4808 }
4809done:
4810 return error;
4811}
4812
4813#endif /* (DEVELOPEMENT || DEBUG) */
4814
4815int
4816nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after)
4817{
4818 uint32_t b, a;
4819 int err = 0;
4820
4821 /*
4822 * Handle ND6_IFF_IFDISABLED
4823 */
4824 if ((before & ND6_IFF_IFDISABLED) ||
4825 (after & ND6_IFF_IFDISABLED)) {
4826 b = (before & ND6_IFF_IFDISABLED);
4827 a = (after & ND6_IFF_IFDISABLED);
4828
4829 if (b != a && (err = nd6_if_disable(ifp,
4830 ((int32_t)(a - b) > 0))) != 0) {
4831 goto done;
4832 }
4833 }
4834
4835 /*
4836 * Handle ND6_IFF_PROXY_PREFIXES
4837 */
4838 if ((before & ND6_IFF_PROXY_PREFIXES) ||
4839 (after & ND6_IFF_PROXY_PREFIXES)) {
4840 b = (before & ND6_IFF_PROXY_PREFIXES);
4841 a = (after & ND6_IFF_PROXY_PREFIXES);
4842
4843 if (b != a && (err = nd6_if_prproxy(ifp,
4844 ((int32_t)(a - b) > 0))) != 0) {
4845 goto done;
4846 }
4847 }
4848done:
4849 return err;
4850}
4851
4852/*
4853 * Enable/disable IPv6 on an interface, called as part of
4854 * setting/clearing ND6_IFF_IFDISABLED, or during DAD failure.
4855 */
4856int
4857nd6_if_disable(struct ifnet *ifp, boolean_t enable)
4858{
4859 if (enable) {
4860 if_set_eflags(ifp, IFEF_IPV6_DISABLED);
4861 } else {
4862 if_clear_eflags(ifp, IFEF_IPV6_DISABLED);
4863 }
4864
4865 return 0;
4866}
4867
4868static int
4869nd6_sysctl_drlist SYSCTL_HANDLER_ARGS
4870{
4871#pragma unused(oidp, arg1, arg2)
4872 char pbuf[MAX_IPv6_STR_LEN];
4873 struct nd_defrouter *dr;
4874 int error = 0;
4875
4876 if (req->newptr != USER_ADDR_NULL) {
4877 return EPERM;
4878 }
4879
4880 /* XXX Handle mapped defrouter entries */
4881 lck_mtx_lock(nd6_mutex);
4882 if (proc_is64bit(req->p)) {
4883 struct in6_defrouter_64 d;
4884
4885 bzero(&d, sizeof(d));
4886 d.rtaddr.sin6_family = AF_INET6;
4887 d.rtaddr.sin6_len = sizeof(d.rtaddr);
4888
4889 TAILQ_FOREACH(dr, &nd_defrouter_list, dr_entry) {
4890 d.rtaddr.sin6_addr = dr->rtaddr;
4891 if (in6_recoverscope(&d.rtaddr,
4892 &dr->rtaddr, dr->ifp) != 0) {
4893 log(LOG_ERR, "scope error in default router "
4894 "list (%s)\n", inet_ntop(AF_INET6,
4895 &dr->rtaddr, pbuf, sizeof(pbuf)));
4896 }
4897 d.flags = dr->flags;
4898 d.stateflags = dr->stateflags;
4899 d.rtlifetime = (u_short)dr->rtlifetime;
4900 d.expire = (int)nddr_getexpire(dr);
4901 d.if_index = dr->ifp->if_index;
4902 error = SYSCTL_OUT(req, &d, sizeof(d));
4903 if (error != 0) {
4904 break;
4905 }
4906 }
4907 } else {
4908 struct in6_defrouter_32 d;
4909
4910 bzero(&d, sizeof(d));
4911 d.rtaddr.sin6_family = AF_INET6;
4912 d.rtaddr.sin6_len = sizeof(d.rtaddr);
4913
4914 TAILQ_FOREACH(dr, &nd_defrouter_list, dr_entry) {
4915 d.rtaddr.sin6_addr = dr->rtaddr;
4916 if (in6_recoverscope(&d.rtaddr,
4917 &dr->rtaddr, dr->ifp) != 0) {
4918 log(LOG_ERR, "scope error in default router "
4919 "list (%s)\n", inet_ntop(AF_INET6,
4920 &dr->rtaddr, pbuf, sizeof(pbuf)));
4921 }
4922 d.flags = dr->flags;
4923 d.stateflags = dr->stateflags;
4924 d.rtlifetime = (u_short)dr->rtlifetime;
4925 d.expire = (int)nddr_getexpire(dr);
4926 d.if_index = dr->ifp->if_index;
4927 error = SYSCTL_OUT(req, &d, sizeof(d));
4928 if (error != 0) {
4929 break;
4930 }
4931 }
4932 }
4933 lck_mtx_unlock(nd6_mutex);
4934 return error;
4935}
4936
4937static int
4938nd6_sysctl_prlist SYSCTL_HANDLER_ARGS
4939{
4940#pragma unused(oidp, arg1, arg2)
4941 char pbuf[MAX_IPv6_STR_LEN];
4942 struct nd_pfxrouter *pfr;
4943 struct sockaddr_in6 s6;
4944 struct nd_prefix *pr;
4945 int error = 0;
4946
4947 if (req->newptr != USER_ADDR_NULL) {
4948 return EPERM;
4949 }
4950
4951 bzero(&s6, sizeof(s6));
4952 s6.sin6_family = AF_INET6;
4953 s6.sin6_len = sizeof(s6);
4954
4955 /* XXX Handle mapped defrouter entries */
4956 lck_mtx_lock(nd6_mutex);
4957 if (proc_is64bit(req->p)) {
4958 struct in6_prefix_64 p;
4959
4960 bzero(&p, sizeof(p));
4961 p.origin = PR_ORIG_RA;
4962
4963 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
4964 NDPR_LOCK(pr);
4965 p.prefix = pr->ndpr_prefix;
4966 if (in6_recoverscope(&p.prefix,
4967 &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0) {
4968 log(LOG_ERR, "scope error in "
4969 "prefix list (%s)\n", inet_ntop(AF_INET6,
4970 &p.prefix.sin6_addr, pbuf, sizeof(pbuf)));
4971 }
4972 p.raflags = pr->ndpr_raf;
4973 p.prefixlen = pr->ndpr_plen;
4974 p.vltime = pr->ndpr_vltime;
4975 p.pltime = pr->ndpr_pltime;
4976 p.if_index = pr->ndpr_ifp->if_index;
4977 p.expire = (u_long)ndpr_getexpire(pr);
4978 p.refcnt = pr->ndpr_addrcnt;
4979 p.flags = pr->ndpr_stateflags;
4980 p.advrtrs = 0;
4981 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry)
4982 p.advrtrs++;
4983 error = SYSCTL_OUT(req, &p, sizeof(p));
4984 if (error != 0) {
4985 NDPR_UNLOCK(pr);
4986 break;
4987 }
4988 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
4989 s6.sin6_addr = pfr->router->rtaddr;
4990 if (in6_recoverscope(&s6, &pfr->router->rtaddr,
4991 pfr->router->ifp) != 0) {
4992 log(LOG_ERR,
4993 "scope error in prefix list (%s)\n",
4994 inet_ntop(AF_INET6, &s6.sin6_addr,
4995 pbuf, sizeof(pbuf)));
4996 }
4997 error = SYSCTL_OUT(req, &s6, sizeof(s6));
4998 if (error != 0) {
4999 break;
5000 }
5001 }
5002 NDPR_UNLOCK(pr);
5003 if (error != 0) {
5004 break;
5005 }
5006 }
5007 } else {
5008 struct in6_prefix_32 p;
5009
5010 bzero(&p, sizeof(p));
5011 p.origin = PR_ORIG_RA;
5012
5013 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
5014 NDPR_LOCK(pr);
5015 p.prefix = pr->ndpr_prefix;
5016 if (in6_recoverscope(&p.prefix,
5017 &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0) {
5018 log(LOG_ERR,
5019 "scope error in prefix list (%s)\n",
5020 inet_ntop(AF_INET6, &p.prefix.sin6_addr,
5021 pbuf, sizeof(pbuf)));
5022 }
5023 p.raflags = pr->ndpr_raf;
5024 p.prefixlen = pr->ndpr_plen;
5025 p.vltime = pr->ndpr_vltime;
5026 p.pltime = pr->ndpr_pltime;
5027 p.if_index = pr->ndpr_ifp->if_index;
5028 p.expire = (u_int32_t)ndpr_getexpire(pr);
5029 p.refcnt = pr->ndpr_addrcnt;
5030 p.flags = pr->ndpr_stateflags;
5031 p.advrtrs = 0;
5032 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry)
5033 p.advrtrs++;
5034 error = SYSCTL_OUT(req, &p, sizeof(p));
5035 if (error != 0) {
5036 NDPR_UNLOCK(pr);
5037 break;
5038 }
5039 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
5040 s6.sin6_addr = pfr->router->rtaddr;
5041 if (in6_recoverscope(&s6, &pfr->router->rtaddr,
5042 pfr->router->ifp) != 0) {
5043 log(LOG_ERR,
5044 "scope error in prefix list (%s)\n",
5045 inet_ntop(AF_INET6, &s6.sin6_addr,
5046 pbuf, sizeof(pbuf)));
5047 }
5048 error = SYSCTL_OUT(req, &s6, sizeof(s6));
5049 if (error != 0) {
5050 break;
5051 }
5052 }
5053 NDPR_UNLOCK(pr);
5054 if (error != 0) {
5055 break;
5056 }
5057 }
5058 }
5059 lck_mtx_unlock(nd6_mutex);
5060
5061 return error;
5062}
5063
5064void
5065in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
5066{
5067 struct ifnet* ifp = ia->ia_ifp;
5068 uint32_t flags = IN6_IFF_TENTATIVE;
5069 uint32_t optdad = nd6_optimistic_dad;
5070 struct nd_ifinfo *ndi = NULL;
5071
5072 ndi = ND_IFINFO(ifp);
5073 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
5074 if (!(ndi->flags & ND6_IFF_DAD)) {
5075 return;
5076 }
5077
5078 if (optdad) {
5079 if (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE) {
5080 optdad = 0;
5081 } else {
5082 lck_mtx_lock(&ndi->lock);
5083 if ((ndi->flags & ND6_IFF_REPLICATED) != 0) {
5084 optdad = 0;
5085 }
5086 lck_mtx_unlock(&ndi->lock);
5087 }
5088 }
5089
5090 if (optdad) {
5091 if ((optdad & ND6_OPTIMISTIC_DAD_LINKLOCAL) &&
5092 IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) {
5093 flags = IN6_IFF_OPTIMISTIC;
5094 } else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) &&
5095 (ia->ia6_flags & IN6_IFF_AUTOCONF)) {
5096 if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
5097 if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY) {
5098 flags = IN6_IFF_OPTIMISTIC;
5099 }
5100 } else if (ia->ia6_flags & IN6_IFF_SECURED) {
5101 if (optdad & ND6_OPTIMISTIC_DAD_SECURED) {
5102 flags = IN6_IFF_OPTIMISTIC;
5103 }
5104 } else {
5105 /*
5106 * Keeping the behavior for temp and CGA
5107 * SLAAC addresses to have a knob for optimistic
5108 * DAD.
5109 * Other than that if ND6_OPTIMISTIC_DAD_AUTOCONF
5110 * is set, we should default to optimistic
5111 * DAD.
5112 * For now this means SLAAC addresses with interface
5113 * identifier derived from modified EUI-64 bit
5114 * identifiers.
5115 */
5116 flags = IN6_IFF_OPTIMISTIC;
5117 }
5118 } else if ((optdad & ND6_OPTIMISTIC_DAD_DYNAMIC) &&
5119 (ia->ia6_flags & IN6_IFF_DYNAMIC)) {
5120 if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
5121 if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY) {
5122 flags = IN6_IFF_OPTIMISTIC;
5123 }
5124 } else {
5125 flags = IN6_IFF_OPTIMISTIC;
5126 }
5127 } else if ((optdad & ND6_OPTIMISTIC_DAD_MANUAL) &&
5128 (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) {
5129 /*
5130 * rdar://17483438
5131 * Bypass tentative for address assignments
5132 * not covered above (e.g. manual) upon request
5133 */
5134 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr) &&
5135 !(ia->ia6_flags & IN6_IFF_AUTOCONF) &&
5136 !(ia->ia6_flags & IN6_IFF_DYNAMIC)) {
5137 flags = IN6_IFF_OPTIMISTIC;
5138 }
5139 }
5140 }
5141
5142 ia->ia6_flags &= ~(IN6_IFF_DUPLICATED | IN6_IFF_DADPROGRESS);
5143 ia->ia6_flags |= flags;
5144
5145 nd6log2(debug, "%s - %s ifp %s ia6_flags 0x%x\n",
5146 __func__,
5147 ip6_sprintf(&ia->ia_addr.sin6_addr),
5148 if_name(ia->ia_ifp),
5149 ia->ia6_flags);
5150}