]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet6/nd6.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / bsd / netinet6 / nd6.c
CommitLineData
b0d623f7 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
1c79356b
A
29/*
30 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the project nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58/*
59 * XXX
60 * KAME 970409 note:
61 * BSD/OS version heavily modifies this code, related to llinfo.
62 * Since we don't have BSD/OS version of net/route.c in our hand,
63 * I left the code mostly as it was in 970310. -- itojun
64 */
65
66#include <sys/param.h>
67#include <sys/systm.h>
68#include <sys/malloc.h>
69#include <sys/mbuf.h>
70#include <sys/socket.h>
71#include <sys/sockio.h>
72#include <sys/time.h>
73#include <sys/kernel.h>
2d21ac55 74#include <sys/sysctl.h>
1c79356b 75#include <sys/errno.h>
1c79356b
A
76#include <sys/syslog.h>
77#include <sys/protosw.h>
b0d623f7 78#include <sys/proc.h>
6d2010ae
A
79#include <sys/mcache.h>
80
39236c6e
A
81#include <dev/random/randomdev.h>
82
1c79356b 83#include <kern/queue.h>
b0d623f7 84#include <kern/zalloc.h>
1c79356b
A
85
86#include <net/if.h>
87#include <net/if_dl.h>
88#include <net/if_types.h>
6d2010ae 89#include <net/if_llreach.h>
1c79356b
A
90#include <net/route.h>
91#include <net/dlil.h>
6d2010ae 92#include <net/ntstat.h>
39236c6e 93#include <net/net_osdep.h>
1c79356b
A
94
95#include <netinet/in.h>
b0d623f7 96#include <netinet/in_arp.h>
1c79356b 97#include <netinet/if_ether.h>
1c79356b
A
98#include <netinet6/in6_var.h>
99#include <netinet/ip6.h>
100#include <netinet6/ip6_var.h>
101#include <netinet6/nd6.h>
6d2010ae 102#include <netinet6/scope6_var.h>
1c79356b
A
103#include <netinet/icmp6.h>
104
1c79356b 105#include "loop.h"
1c79356b 106
39236c6e
A
107#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */
108#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */
1c79356b 109
b0d623f7 110#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
1c79356b
A
111
112/* timer values */
113int nd6_prune = 1; /* walk list every 1 seconds */
39236c6e 114int nd6_prune_lazy = 5; /* lazily walk list every 5 seconds */
1c79356b
A
115int nd6_delay = 5; /* delay first probe time 5 second */
116int nd6_umaxtries = 3; /* maximum unicast query */
117int nd6_mmaxtries = 3; /* maximum multicast query */
118int nd6_useloopback = 1; /* use loopback interface for local traffic */
9bccf70c 119int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */
1c79356b
A
120
121/* preventing too many loops in ND option parsing */
122int nd6_maxndopt = 10; /* max # of ND options allowed */
123
6d2010ae 124int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */
9bccf70c
A
125
126#if ND6_DEBUG
127int nd6_debug = 1;
128#else
129int nd6_debug = 0;
130#endif
131
316670eb 132int nd6_optimistic_dad =
39236c6e
A
133 (ND6_OPTIMISTIC_DAD_LINKLOCAL|ND6_OPTIMISTIC_DAD_AUTOCONF|
134 ND6_OPTIMISTIC_DAD_TEMPORARY|ND6_OPTIMISTIC_DAD_DYNAMIC|
fe8ab488 135 ND6_OPTIMISTIC_DAD_SECURED|ND6_OPTIMISTIC_DAD_MANUAL);
6d2010ae 136
1c79356b
A
137/* for debugging? */
138static int nd6_inuse, nd6_allocated;
139
b0d623f7
A
140/*
141 * Synchronization notes:
142 *
143 * The global list of ND entries are stored in llinfo_nd6; an entry
144 * gets inserted into the list when the route is created and gets
145 * removed from the list when it is deleted; this is done as part
146 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in nd6_rtrequest().
147 *
148 * Because rnh_lock and rt_lock for the entry are held during those
149 * operations, the same locks (and thus lock ordering) must be used
150 * elsewhere to access the relevant data structure fields:
151 *
152 * ln_next, ln_prev, ln_rt
153 *
154 * - Routing lock (rnh_lock)
155 *
39236c6e 156 * ln_hold, ln_asked, ln_expire, ln_state, ln_router, ln_flags,
6d2010ae 157 * ln_llreach, ln_lastused
b0d623f7
A
158 *
159 * - Routing entry lock (rt_lock)
160 *
161 * Due to the dependency on rt_lock, llinfo_nd6 has the same lifetime
162 * as the route entry itself. When a route is deleted (RTM_DELETE),
163 * it is simply removed from the global list but the memory is not
164 * freed until the route itself is freed.
165 */
166struct llinfo_nd6 llinfo_nd6 = {
39236c6e
A
167 .ln_next = &llinfo_nd6,
168 .ln_prev = &llinfo_nd6,
b0d623f7
A
169};
170
3e170ce0
A
171static lck_grp_attr_t *nd_if_lock_grp_attr = NULL;
172static lck_grp_t *nd_if_lock_grp = NULL;
173static lck_attr_t *nd_if_lock_attr = NULL;
b0d623f7
A
174
175/* Protected by nd6_mutex */
1c79356b
A
176struct nd_drhead nd_defrouter;
177struct nd_prhead nd_prefix = { 0 };
178
39236c6e
A
179/*
180 * nd6_timeout() is scheduled on a demand basis. nd6_timeout_run is used
181 * to indicate whether or not a timeout has been scheduled. The rnh_lock
182 * mutex is used to protect this scheduling; it is a natural choice given
183 * the work done in the timer callback. Unfortunately, there are cases
184 * when nd6_timeout() needs to be scheduled while rnh_lock cannot be easily
185 * held, due to lock ordering. In those cases, we utilize a "demand" counter
186 * nd6_sched_timeout_want which can be atomically incremented without
187 * having to hold rnh_lock. On places where we acquire rnh_lock, such as
188 * nd6_rtrequest(), we check this counter and schedule the timer if it is
189 * non-zero. The increment happens on various places when we allocate
190 * new ND entries, default routers, prefixes and addresses.
191 */
192static int nd6_timeout_run; /* nd6_timeout is scheduled to run */
193static void nd6_timeout(void *);
194int nd6_sched_timeout_want; /* demand count for timer to be sched */
195static boolean_t nd6_fast_timer_on = FALSE;
196
197/* Serialization variables for nd6_service(), protected by rnh_lock */
198static boolean_t nd6_service_busy;
199static void *nd6_service_wc = &nd6_service_busy;
200static int nd6_service_waiters = 0;
6d2010ae 201
1c79356b
A
202int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL;
203static struct sockaddr_in6 all1_sa;
204
91447636 205static int regen_tmpaddr(struct in6_ifaddr *);
91447636 206extern lck_mtx_t *nd6_mutex;
1c79356b 207
39236c6e 208static struct llinfo_nd6 *nd6_llinfo_alloc(int);
b0d623f7 209static void nd6_llinfo_free(void *);
6d2010ae
A
210static void nd6_llinfo_purge(struct rtentry *);
211static void nd6_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
316670eb 212static void nd6_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
3e170ce0 213static void nd6_llinfo_refresh(struct rtentry *);
39236c6e 214static uint64_t ln_getexpire(struct llinfo_nd6 *);
1c79356b 215
39236c6e
A
216static void nd6_service(void *);
217static void nd6_slowtimo(void *);
218static int nd6_is_new_addr_neighbor(struct sockaddr_in6 *, struct ifnet *);
316670eb
A
219static int nd6_siocgdrlst(void *, int);
220static int nd6_siocgprlst(void *, int);
b0d623f7 221
39236c6e
A
222static int nd6_sysctl_drlist SYSCTL_HANDLER_ARGS;
223static int nd6_sysctl_prlist SYSCTL_HANDLER_ARGS;
224
b0d623f7
A
225/*
226 * Insertion and removal from llinfo_nd6 must be done with rnh_lock held.
227 */
39236c6e 228#define LN_DEQUEUE(_ln) do { \
b0d623f7
A
229 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \
230 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
231 (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \
232 (_ln)->ln_prev->ln_next = (_ln)->ln_next; \
233 (_ln)->ln_prev = (_ln)->ln_next = NULL; \
234 (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \
235} while (0)
236
39236c6e 237#define LN_INSERTHEAD(_ln) do { \
b0d623f7
A
238 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \
239 RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \
240 (_ln)->ln_next = llinfo_nd6.ln_next; \
241 llinfo_nd6.ln_next = (_ln); \
242 (_ln)->ln_prev = &llinfo_nd6; \
243 (_ln)->ln_next->ln_prev = (_ln); \
244 (_ln)->ln_flags |= ND6_LNF_IN_USE; \
245} while (0)
246
247static struct zone *llinfo_nd6_zone;
248#define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */
249#define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */
e2fac8b1 250
39236c6e
A
251extern int tvtohz(struct timeval *);
252
253static int nd6_init_done;
254
255SYSCTL_DECL(_net_inet6_icmp6);
256
257SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist,
fe8ab488 258 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
39236c6e
A
259 nd6_sysctl_drlist, "S,in6_defrouter", "");
260
261SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist,
fe8ab488 262 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
39236c6e
A
263 nd6_sysctl_prlist, "S,in6_defrouter", "");
264
3e170ce0
A
265SYSCTL_DECL(_net_inet6_ip6);
266
267static int ip6_maxchainsent = 0;
268SYSCTL_INT(_net_inet6_ip6, OID_AUTO, maxchainsent,
269 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxchainsent, 0,
270 "use dlil_output_list");
271
1c79356b 272void
39236c6e 273nd6_init(void)
1c79356b 274{
1c79356b
A
275 int i;
276
39236c6e 277 VERIFY(!nd6_init_done);
1c79356b
A
278
279 all1_sa.sin6_family = AF_INET6;
39236c6e
A
280 all1_sa.sin6_len = sizeof (struct sockaddr_in6);
281 for (i = 0; i < sizeof (all1_sa.sin6_addr); i++)
1c79356b
A
282 all1_sa.sin6_addr.s6_addr[i] = 0xff;
283
284 /* initialization of the default router list */
285 TAILQ_INIT(&nd_defrouter);
286
316670eb
A
287 nd_if_lock_grp_attr = lck_grp_attr_alloc_init();
288 nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr);
289 nd_if_lock_attr = lck_attr_alloc_init();
b0d623f7
A
290
291 llinfo_nd6_zone = zinit(sizeof (struct llinfo_nd6),
292 LLINFO_ND6_ZONE_MAX * sizeof (struct llinfo_nd6), 0,
293 LLINFO_ND6_ZONE_NAME);
294 if (llinfo_nd6_zone == NULL)
295 panic("%s: failed allocating llinfo_nd6_zone", __func__);
296
297 zone_change(llinfo_nd6_zone, Z_EXPAND, TRUE);
6d2010ae
A
298 zone_change(llinfo_nd6_zone, Z_CALLERACCT, FALSE);
299
300 nd6_nbr_init();
301 nd6_rtr_init();
316670eb 302 nd6_prproxy_init();
b0d623f7 303
1c79356b
A
304 nd6_init_done = 1;
305
306 /* start timer */
39236c6e 307 timeout(nd6_slowtimo, NULL, ND6_SLOWTIMER_INTERVAL * hz);
1c79356b
A
308}
309
b0d623f7 310static struct llinfo_nd6 *
39236c6e 311nd6_llinfo_alloc(int how)
b0d623f7 312{
39236c6e
A
313 struct llinfo_nd6 *ln;
314
315 ln = (how == M_WAITOK) ? zalloc(llinfo_nd6_zone) :
316 zalloc_noblock(llinfo_nd6_zone);
317 if (ln != NULL)
318 bzero(ln, sizeof (*ln));
319
320 return (ln);
b0d623f7
A
321}
322
323static void
324nd6_llinfo_free(void *arg)
325{
326 struct llinfo_nd6 *ln = arg;
327
328 if (ln->ln_next != NULL || ln->ln_prev != NULL) {
329 panic("%s: trying to free %p when it is in use", __func__, ln);
330 /* NOTREACHED */
331 }
332
333 /* Just in case there's anything there, free it */
334 if (ln->ln_hold != NULL) {
3e170ce0 335 m_freem_list(ln->ln_hold);
b0d623f7
A
336 ln->ln_hold = NULL;
337 }
338
6d2010ae
A
339 /* Purge any link-layer info caching */
340 VERIFY(ln->ln_rt->rt_llinfo == ln);
341 if (ln->ln_rt->rt_llinfo_purge != NULL)
342 ln->ln_rt->rt_llinfo_purge(ln->ln_rt);
343
b0d623f7
A
344 zfree(llinfo_nd6_zone, ln);
345}
346
6d2010ae
A
347static void
348nd6_llinfo_purge(struct rtentry *rt)
349{
350 struct llinfo_nd6 *ln = rt->rt_llinfo;
351
352 RT_LOCK_ASSERT_HELD(rt);
353 VERIFY(rt->rt_llinfo_purge == nd6_llinfo_purge && ln != NULL);
354
355 if (ln->ln_llreach != NULL) {
356 RT_CONVERT_LOCK(rt);
357 ifnet_llreach_free(ln->ln_llreach);
358 ln->ln_llreach = NULL;
359 }
360 ln->ln_lastused = 0;
361}
362
363static void
364nd6_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
365{
366 struct llinfo_nd6 *ln = rt->rt_llinfo;
367 struct if_llreach *lr = ln->ln_llreach;
368
369 if (lr == NULL) {
370 bzero(ri, sizeof (*ri));
316670eb
A
371 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
372 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
373 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
6d2010ae
A
374 } else {
375 IFLR_LOCK(lr);
376 /* Export to rt_reach_info structure */
377 ifnet_lr2ri(lr, ri);
316670eb
A
378 /* Export ND6 send expiration (calendar) time */
379 ri->ri_snd_expire =
380 ifnet_llreach_up2calexp(lr, ln->ln_lastused);
381 IFLR_UNLOCK(lr);
382 }
383}
384
385static void
386nd6_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
387{
388 struct llinfo_nd6 *ln = rt->rt_llinfo;
389 struct if_llreach *lr = ln->ln_llreach;
390
391 if (lr == NULL) {
392 bzero(iflri, sizeof (*iflri));
393 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
394 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
395 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
396 } else {
397 IFLR_LOCK(lr);
398 /* Export to ifnet_llreach_info structure */
399 ifnet_lr2iflri(lr, iflri);
400 /* Export ND6 send expiration (uptime) time */
401 iflri->iflri_snd_expire =
402 ifnet_llreach_up2upexp(lr, ln->ln_lastused);
6d2010ae
A
403 IFLR_UNLOCK(lr);
404 }
405}
406
3e170ce0
A
407static void
408nd6_llinfo_refresh(struct rtentry *rt)
409{
410 struct llinfo_nd6 *ln = rt->rt_llinfo;
411 uint64_t timenow = net_uptime();
412 /*
413 * Can't refresh permanent, static or entries that are
414 * not direct host entries
415 */
416 if (!ln || ln->ln_expire == 0 ||
417 (rt->rt_flags & RTF_STATIC) ||
418 !(rt->rt_flags & RTF_LLINFO)) {
419 return;
420 }
421
422 if ((ln->ln_state > ND6_LLINFO_INCOMPLETE) &&
423 (ln->ln_state < ND6_LLINFO_PROBE)) {
424 if (ln->ln_expire > timenow) {
39037602
A
425 ln_setexpire(ln, timenow);
426 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PROBE);
3e170ce0
A
427 }
428 }
429 return;
430}
431
39037602
A
432const char *
433ndcache_state2str(short ndp_state)
434{
435 const char *ndp_state_str = "UNKNOWN";
436 switch (ndp_state) {
437 case ND6_LLINFO_PURGE:
438 ndp_state_str = "ND6_LLINFO_PURGE";
439 break;
440 case ND6_LLINFO_NOSTATE:
441 ndp_state_str = "ND6_LLINFO_NOSTATE";
442 break;
443 case ND6_LLINFO_INCOMPLETE:
444 ndp_state_str = "ND6_LLINFO_INCOMPLETE";
445 break;
446 case ND6_LLINFO_REACHABLE:
447 ndp_state_str = "ND6_LLINFO_REACHABLE";
448 break;
449 case ND6_LLINFO_STALE:
450 ndp_state_str = "ND6_LLINFO_STALE";
451 break;
452 case ND6_LLINFO_DELAY:
453 ndp_state_str = "ND6_LLINFO_DELAY";
454 break;
455 case ND6_LLINFO_PROBE:
456 ndp_state_str = "ND6_LLINFO_PROBE";
457 break;
458 default:
459 /* Init'd to UNKNOWN */
460 break;
461 }
462 return ndp_state_str;
463}
464
39236c6e
A
465void
466ln_setexpire(struct llinfo_nd6 *ln, uint64_t expiry)
467{
468 ln->ln_expire = expiry;
469}
470
471static uint64_t
472ln_getexpire(struct llinfo_nd6 *ln)
473{
474 struct timeval caltime;
475 uint64_t expiry;
476
477 if (ln->ln_expire != 0) {
478 struct rtentry *rt = ln->ln_rt;
479
480 VERIFY(rt != NULL);
481 /* account for system time change */
482 getmicrotime(&caltime);
483
484 rt->base_calendartime +=
485 NET_CALCULATE_CLOCKSKEW(caltime,
486 rt->base_calendartime, net_uptime(), rt->base_uptime);
487
488 expiry = rt->base_calendartime +
489 ln->ln_expire - rt->base_uptime;
490 } else {
491 expiry = 0;
492 }
493 return (expiry);
494}
495
496void
497nd6_ifreset(struct ifnet *ifp)
498{
3e170ce0
A
499 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
500 VERIFY(NULL != ndi);
39236c6e 501 VERIFY(ndi->initialized);
3e170ce0 502
39236c6e
A
503 lck_mtx_assert(&ndi->lock, LCK_MTX_ASSERT_OWNED);
504 ndi->linkmtu = ifp->if_mtu;
505 ndi->chlim = IPV6_DEFHLIM;
506 ndi->basereachable = REACHABLE_TIME;
507 ndi->reachable = ND_COMPUTE_RTIME(ndi->basereachable);
508 ndi->retrans = RETRANS_TIMER;
509}
510
3e170ce0 511void
b0d623f7 512nd6_ifattach(struct ifnet *ifp)
1c79356b 513{
3e170ce0 514 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
1c79356b 515
3e170ce0 516 VERIFY(NULL != ndi);
39236c6e
A
517 if (!ndi->initialized) {
518 lck_mtx_init(&ndi->lock, nd_if_lock_grp, nd_if_lock_attr);
fe8ab488 519 ndi->flags = ND6_IFF_PERFORMNUD;
39037602 520 ndi->flags |= ND6_IFF_DAD;
39236c6e 521 ndi->initialized = TRUE;
b0d623f7 522 }
39236c6e
A
523
524 lck_mtx_lock(&ndi->lock);
525
3e170ce0 526 if (!(ifp->if_flags & IFF_MULTICAST)) {
39236c6e 527 ndi->flags |= ND6_IFF_IFDISABLED;
3e170ce0 528 }
39236c6e
A
529
530 nd6_ifreset(ifp);
531 lck_mtx_unlock(&ndi->lock);
316670eb 532 nd6_setmtu(ifp);
3e170ce0 533 return;
1c79356b
A
534}
535
3e170ce0 536#if 0
1c79356b 537/*
3e170ce0
A
538 * XXX Look more into this. Especially since we recycle ifnets and do delayed
539 * cleanup
1c79356b 540 */
3e170ce0
A
541void
542nd6_ifdetach(struct nd_ifinfo *nd)
543{
544 /* XXX destroy nd's lock? */
545 FREE(nd, M_IP6NDP);
546}
547#endif
548
1c79356b 549void
2d21ac55 550nd6_setmtu(struct ifnet *ifp)
1c79356b 551{
3e170ce0 552 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
b0d623f7 553 u_int32_t oldmaxmtu, maxmtu;
55e303ae 554
3e170ce0
A
555 if ((NULL == ndi) || (FALSE == ndi->initialized)) {
556 return;
55e303ae
A
557 }
558
316670eb 559 lck_mtx_lock(&ndi->lock);
55e303ae 560 oldmaxmtu = ndi->maxmtu;
1c79356b 561
2d21ac55
A
562 /*
563 * The ND level maxmtu is somewhat redundant to the interface MTU
564 * and is an implementation artifact of KAME. Instead of hard-
565 * limiting the maxmtu based on the interface type here, we simply
566 * take the if_mtu value since SIOCSIFMTU would have taken care of
567 * the sanity checks related to the maximum MTU allowed for the
568 * interface (a value that is known only by the interface layer),
569 * by sending the request down via ifnet_ioctl(). The use of the
6d2010ae
A
570 * ND level maxmtu and linkmtu are done via IN6_LINKMTU() which
571 * does further checking against if_mtu.
2d21ac55 572 */
b0d623f7 573 maxmtu = ndi->maxmtu = ifp->if_mtu;
1c79356b 574
2d21ac55 575 /*
39236c6e
A
576 * Decreasing the interface MTU under IPV6 minimum MTU may cause
577 * undesirable situation. We thus notify the operator of the change
578 * explicitly. The check for oldmaxmtu is necessary to restrict the
579 * log to the case of changing the MTU, not initializing it.
580 */
2d21ac55
A
581 if (oldmaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) {
582 log(LOG_NOTICE, "nd6_setmtu: "
39236c6e
A
583 "new link MTU on %s (%u) is too small for IPv6\n",
584 if_name(ifp), (uint32_t)ndi->maxmtu);
1c79356b 585 }
6d2010ae 586 ndi->linkmtu = ifp->if_mtu;
316670eb 587 lck_mtx_unlock(&ndi->lock);
2d21ac55
A
588
589 /* also adjust in6_maxmtu if necessary. */
3e170ce0 590 if (maxmtu > in6_maxmtu) {
2d21ac55 591 in6_setmaxmtu();
3e170ce0 592 }
1c79356b
A
593}
594
595void
39236c6e 596nd6_option_init(void *opt, int icmp6len, union nd_opts *ndopts)
1c79356b 597{
39236c6e 598 bzero(ndopts, sizeof (*ndopts));
1c79356b 599 ndopts->nd_opts_search = (struct nd_opt_hdr *)opt;
39236c6e
A
600 ndopts->nd_opts_last =
601 (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len);
1c79356b
A
602
603 if (icmp6len == 0) {
604 ndopts->nd_opts_done = 1;
605 ndopts->nd_opts_search = NULL;
606 }
607}
608
609/*
610 * Take one ND option.
611 */
612struct nd_opt_hdr *
39236c6e 613nd6_option(union nd_opts *ndopts)
1c79356b
A
614{
615 struct nd_opt_hdr *nd_opt;
616 int olen;
617
618 if (!ndopts)
619 panic("ndopts == NULL in nd6_option\n");
620 if (!ndopts->nd_opts_last)
621 panic("uninitialized ndopts in nd6_option\n");
622 if (!ndopts->nd_opts_search)
39236c6e 623 return (NULL);
1c79356b 624 if (ndopts->nd_opts_done)
39236c6e 625 return (NULL);
1c79356b
A
626
627 nd_opt = ndopts->nd_opts_search;
628
9bccf70c
A
629 /* make sure nd_opt_len is inside the buffer */
630 if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) {
39236c6e
A
631 bzero(ndopts, sizeof (*ndopts));
632 return (NULL);
9bccf70c
A
633 }
634
1c79356b
A
635 olen = nd_opt->nd_opt_len << 3;
636 if (olen == 0) {
637 /*
638 * Message validation requires that all included
639 * options have a length that is greater than zero.
640 */
39236c6e
A
641 bzero(ndopts, sizeof (*ndopts));
642 return (NULL);
1c79356b
A
643 }
644
645 ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen);
9bccf70c
A
646 if (ndopts->nd_opts_search > ndopts->nd_opts_last) {
647 /* option overruns the end of buffer, invalid */
39236c6e
A
648 bzero(ndopts, sizeof (*ndopts));
649 return (NULL);
9bccf70c
A
650 } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) {
651 /* reached the end of options chain */
1c79356b
A
652 ndopts->nd_opts_done = 1;
653 ndopts->nd_opts_search = NULL;
654 }
39236c6e 655 return (nd_opt);
1c79356b
A
656}
657
658/*
659 * Parse multiple ND options.
660 * This function is much easier to use, for ND routines that do not need
661 * multiple options of the same type.
662 */
663int
39236c6e 664nd6_options(union nd_opts *ndopts)
1c79356b
A
665{
666 struct nd_opt_hdr *nd_opt;
667 int i = 0;
668
6d2010ae
A
669 if (ndopts == NULL)
670 panic("ndopts == NULL in nd6_options");
671 if (ndopts->nd_opts_last == NULL)
672 panic("uninitialized ndopts in nd6_options");
673 if (ndopts->nd_opts_search == NULL)
39236c6e 674 return (0);
1c79356b
A
675
676 while (1) {
677 nd_opt = nd6_option(ndopts);
6d2010ae 678 if (nd_opt == NULL && ndopts->nd_opts_last == NULL) {
1c79356b
A
679 /*
680 * Message validation requires that all included
681 * options have a length that is greater than zero.
682 */
9bccf70c 683 icmp6stat.icp6s_nd_badopt++;
39236c6e
A
684 bzero(ndopts, sizeof (*ndopts));
685 return (-1);
1c79356b
A
686 }
687
6d2010ae 688 if (nd_opt == NULL)
1c79356b
A
689 goto skip1;
690
691 switch (nd_opt->nd_opt_type) {
692 case ND_OPT_SOURCE_LINKADDR:
693 case ND_OPT_TARGET_LINKADDR:
694 case ND_OPT_MTU:
695 case ND_OPT_REDIRECTED_HEADER:
39037602 696 case ND_OPT_NONCE:
1c79356b 697 if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
9bccf70c
A
698 nd6log((LOG_INFO,
699 "duplicated ND6 option found (type=%d)\n",
700 nd_opt->nd_opt_type));
1c79356b
A
701 /* XXX bark? */
702 } else {
39236c6e
A
703 ndopts->nd_opt_array[nd_opt->nd_opt_type] =
704 nd_opt;
1c79356b
A
705 }
706 break;
707 case ND_OPT_PREFIX_INFORMATION:
708 if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) {
39236c6e
A
709 ndopts->nd_opt_array[nd_opt->nd_opt_type] =
710 nd_opt;
1c79356b
A
711 }
712 ndopts->nd_opts_pi_end =
39236c6e 713 (struct nd_opt_prefix_info *)nd_opt;
1c79356b 714 break;
6d2010ae 715 case ND_OPT_RDNSS:
39236c6e
A
716 /* ignore */
717 break;
1c79356b
A
718 default:
719 /*
720 * Unknown options must be silently ignored,
721 * to accomodate future extension to the protocol.
722 */
9bccf70c 723 nd6log((LOG_DEBUG,
1c79356b 724 "nd6_options: unsupported option %d - "
9bccf70c 725 "option ignored\n", nd_opt->nd_opt_type));
1c79356b
A
726 }
727
728skip1:
729 i++;
730 if (i > nd6_maxndopt) {
731 icmp6stat.icp6s_nd_toomanyopt++;
9bccf70c 732 nd6log((LOG_INFO, "too many loop in nd opt\n"));
1c79356b
A
733 break;
734 }
735
736 if (ndopts->nd_opts_done)
737 break;
738 }
739
39236c6e 740 return (0);
1c79356b
A
741}
742
39236c6e
A
743struct nd6svc_arg {
744 int draining;
745 uint32_t killed;
746 uint32_t aging_lazy;
747 uint32_t aging;
748 uint32_t sticky;
749 uint32_t found;
750};
751
752/*
753 * ND6 service routine to expire default route list and prefix list
754 */
755static void
756nd6_service(void *arg)
0b4e3aa0 757{
39236c6e 758 struct nd6svc_arg *ap = arg;
9bccf70c 759 struct llinfo_nd6 *ln;
39037602
A
760 struct nd_defrouter *dr = NULL;
761 struct nd_prefix *pr = NULL;
91447636 762 struct ifnet *ifp = NULL;
9bccf70c 763 struct in6_ifaddr *ia6, *nia6;
39236c6e 764 uint64_t timenow;
3e170ce0 765 bool send_nc_failure_kev = false;
39037602
A
766 struct nd_drhead nd_defrouter_tmp;
767 struct nd_defrouter *ndr = NULL;
91447636 768
39236c6e
A
769 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
770 /*
771 * Since we may drop rnh_lock and nd6_mutex below, we want
772 * to run this entire operation single threaded.
773 */
774 while (nd6_service_busy) {
775 nd6log2((LOG_DEBUG, "%s: %s is blocked by %d waiters\n",
776 __func__, ap->draining ? "drainer" : "timer",
777 nd6_service_waiters));
778 nd6_service_waiters++;
779 (void) msleep(nd6_service_wc, rnh_lock, (PZERO-1),
780 __func__, NULL);
781 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
782 }
783
784 /* We are busy now; tell everyone else to go away */
785 nd6_service_busy = TRUE;
786
787 net_update_uptime();
788 timenow = net_uptime();
b0d623f7 789again:
3e170ce0
A
790 /*
791 * send_nc_failure_kev gets set when default router's IPv6 address
792 * can't be resolved.
793 * That can happen either:
794 * 1. When the entry has resolved once but can't be
795 * resolved later and the neighbor cache entry for gateway is deleted
796 * after max probe attempts.
797 *
798 * 2. When the entry is in ND6_LLINFO_INCOMPLETE but can not be resolved
799 * after max neighbor address resolution attempts.
800 *
801 * Both set send_nc_failure_kev to true. ifp is also set to the previous
802 * neighbor cache entry's route's ifp.
803 * Once we are done sending the notification, set send_nc_failure_kev
804 * to false to stop sending false notifications for non default router
805 * neighbors.
806 *
807 * We may to send more information like Gateway's IP that could not be
808 * resolved, however right now we do not install more than one default
809 * route per interface in the routing table.
810 */
811 if (send_nc_failure_kev && ifp->if_addrlen == IF_LLREACH_MAXLEN) {
812 struct kev_msg ev_msg;
813 struct kev_nd6_ndfailure nd6_ndfailure;
814 bzero(&ev_msg, sizeof(ev_msg));
815 bzero(&nd6_ndfailure, sizeof(nd6_ndfailure));
816 ev_msg.vendor_code = KEV_VENDOR_APPLE;
817 ev_msg.kev_class = KEV_NETWORK_CLASS;
818 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
819 ev_msg.event_code = KEV_ND6_NDFAILURE;
820
821 nd6_ndfailure.link_data.if_family = ifp->if_family;
822 nd6_ndfailure.link_data.if_unit = ifp->if_unit;
823 strlcpy(nd6_ndfailure.link_data.if_name,
824 ifp->if_name,
825 sizeof(nd6_ndfailure.link_data.if_name));
826 ev_msg.dv[0].data_ptr = &nd6_ndfailure;
827 ev_msg.dv[0].data_length =
828 sizeof(nd6_ndfailure);
39037602 829 dlil_post_complete_msg(NULL, &ev_msg);
3e170ce0
A
830 }
831
832 send_nc_failure_kev = false;
833 ifp = NULL;
b0d623f7
A
834 /*
835 * The global list llinfo_nd6 is modified by nd6_request() and is
836 * therefore protected by rnh_lock. For obvious reasons, we cannot
837 * hold rnh_lock across calls that might lead to code paths which
838 * attempt to acquire rnh_lock, else we deadlock. Hence for such
839 * cases we drop rt_lock and rnh_lock, make the calls, and repeat the
840 * loop. To ensure that we don't process the same entry more than
841 * once in a single timeout, we mark the "already-seen" entries with
842 * ND6_LNF_TIMER_SKIP flag. At the end of the loop, we do a second
843 * pass thru the entries and clear the flag so they can be processed
844 * during the next timeout.
845 */
39236c6e
A
846 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
847
1c79356b 848 ln = llinfo_nd6.ln_next;
b0d623f7 849 while (ln != NULL && ln != &llinfo_nd6) {
1c79356b 850 struct rtentry *rt;
1c79356b 851 struct sockaddr_in6 *dst;
b0d623f7 852 struct llinfo_nd6 *next;
316670eb 853 u_int32_t retrans, flags;
3e170ce0 854 struct nd_ifinfo *ndi = NULL;
b0d623f7
A
855
856 /* ln_next/prev/rt is protected by rnh_lock */
857 next = ln->ln_next;
858 rt = ln->ln_rt;
859 RT_LOCK(rt);
1c79356b 860
b0d623f7
A
861 /* We've seen this already; skip it */
862 if (ln->ln_flags & ND6_LNF_TIMER_SKIP) {
863 RT_UNLOCK(rt);
1c79356b
A
864 ln = next;
865 continue;
866 }
39236c6e 867 ap->found++;
b0d623f7
A
868
869 /* rt->rt_ifp should never be NULL */
1c79356b 870 if ((ifp = rt->rt_ifp) == NULL) {
b0d623f7
A
871 panic("%s: ln(%p) rt(%p) rt_ifp == NULL", __func__,
872 ln, rt);
873 /* NOTREACHED */
1c79356b 874 }
e2fac8b1 875
b0d623f7
A
876 /* rt_llinfo must always be equal to ln */
877 if ((struct llinfo_nd6 *)rt->rt_llinfo != ln) {
878 panic("%s: rt_llinfo(%p) is not equal to ln(%p)",
39236c6e 879 __func__, rt->rt_llinfo, ln);
b0d623f7
A
880 /* NOTREACHED */
881 }
e2fac8b1 882
b0d623f7 883 /* rt_key should never be NULL */
39236c6e 884 dst = SIN6(rt_key(rt));
b0d623f7
A
885 if (dst == NULL) {
886 panic("%s: rt(%p) key is NULL ln(%p)", __func__,
887 rt, ln);
888 /* NOTREACHED */
1c79356b 889 }
55e303ae 890
b0d623f7
A
891 /* Set the flag in case we jump to "again" */
892 ln->ln_flags |= ND6_LNF_TIMER_SKIP;
893
39236c6e
A
894 if (ln->ln_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
895 ap->sticky++;
896 } else if (ap->draining && (rt->rt_refcnt == 0)) {
897 /*
898 * If we are draining, immediately purge non-static
899 * entries without oustanding route refcnt.
900 */
901 if (ln->ln_state > ND6_LLINFO_INCOMPLETE)
39037602 902 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
39236c6e 903 else
39037602 904 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PURGE);
39236c6e
A
905 ln_setexpire(ln, timenow);
906 }
907
908 /*
909 * If the entry has not expired, skip it. Take note on the
910 * state, as entries that are in the STALE state are simply
911 * waiting to be garbage collected, in which case we can
912 * relax the callout scheduling (use nd6_prune_lazy).
913 */
914 if (ln->ln_expire > timenow) {
915 switch (ln->ln_state) {
916 case ND6_LLINFO_STALE:
917 ap->aging_lazy++;
918 break;
919 default:
920 ap->aging++;
921 break;
922 }
b0d623f7 923 RT_UNLOCK(rt);
55e303ae
A
924 ln = next;
925 continue;
926 }
b0d623f7 927
3e170ce0
A
928 ndi = ND_IFINFO(ifp);
929 VERIFY(ndi->initialized);
930 retrans = ndi->retrans;
931 flags = ndi->flags;
b0d623f7
A
932
933 RT_LOCK_ASSERT_HELD(rt);
1c79356b
A
934
935 switch (ln->ln_state) {
936 case ND6_LLINFO_INCOMPLETE:
937 if (ln->ln_asked < nd6_mmaxtries) {
39236c6e 938 struct ifnet *exclifp = ln->ln_exclifp;
1c79356b 939 ln->ln_asked++;
39236c6e 940 ln_setexpire(ln, timenow + retrans / 1000);
b0d623f7
A
941 RT_ADDREF_LOCKED(rt);
942 RT_UNLOCK(rt);
943 lck_mtx_unlock(rnh_lock);
316670eb 944 if (ip6_forwarding) {
39236c6e
A
945 nd6_prproxy_ns_output(ifp, exclifp,
946 NULL, &dst->sin6_addr, ln);
316670eb
A
947 } else {
948 nd6_ns_output(ifp, NULL,
39037602 949 &dst->sin6_addr, ln, NULL);
316670eb 950 }
b0d623f7 951 RT_REMREF(rt);
39236c6e
A
952 ap->aging++;
953 lck_mtx_lock(rnh_lock);
1c79356b
A
954 } else {
955 struct mbuf *m = ln->ln_hold;
55e303ae 956 ln->ln_hold = NULL;
3e170ce0 957 send_nc_failure_kev = (rt->rt_flags & RTF_ROUTER) ? true : false;
b0d623f7 958 if (m != NULL) {
39236c6e 959 RT_ADDREF_LOCKED(rt);
b0d623f7
A
960 RT_UNLOCK(rt);
961 lck_mtx_unlock(rnh_lock);
3e170ce0
A
962
963 struct mbuf *mnext;
964 while (m) {
965 mnext = m->m_nextpkt;
966 m->m_nextpkt = NULL;
967 m->m_pkthdr.rcvif = ifp;
968 icmp6_error_flag(m, ICMP6_DST_UNREACH,
969 ICMP6_DST_UNREACH_ADDR, 0, 0);
970 m = mnext;
971 }
b0d623f7 972 } else {
39236c6e 973 RT_ADDREF_LOCKED(rt);
b0d623f7
A
974 RT_UNLOCK(rt);
975 lck_mtx_unlock(rnh_lock);
1c79356b 976 }
b0d623f7 977 nd6_free(rt);
39236c6e
A
978 ap->killed++;
979 lck_mtx_lock(rnh_lock);
980 rtfree_locked(rt);
1c79356b 981 }
39236c6e 982 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
983 goto again;
984
1c79356b 985 case ND6_LLINFO_REACHABLE:
39236c6e 986 if (ln->ln_expire != 0) {
39037602 987 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
39236c6e
A
988 ln_setexpire(ln, timenow + nd6_gctimer);
989 ap->aging_lazy++;
9bccf70c 990 }
b0d623f7 991 RT_UNLOCK(rt);
1c79356b 992 break;
9bccf70c
A
993
994 case ND6_LLINFO_STALE:
e2fac8b1 995 case ND6_LLINFO_PURGE:
39236c6e
A
996 /* Garbage Collection(RFC 4861 5.3) */
997 if (ln->ln_expire != 0) {
998 RT_ADDREF_LOCKED(rt);
b0d623f7
A
999 RT_UNLOCK(rt);
1000 lck_mtx_unlock(rnh_lock);
1001 nd6_free(rt);
39236c6e
A
1002 ap->killed++;
1003 lck_mtx_lock(rnh_lock);
1004 rtfree_locked(rt);
b0d623f7
A
1005 goto again;
1006 } else {
1007 RT_UNLOCK(rt);
1008 }
9bccf70c
A
1009 break;
1010
1c79356b 1011 case ND6_LLINFO_DELAY:
316670eb 1012 if ((flags & ND6_IFF_PERFORMNUD) != 0) {
1c79356b
A
1013 /* We need NUD */
1014 ln->ln_asked = 1;
39037602 1015 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PROBE);
39236c6e 1016 ln_setexpire(ln, timenow + retrans / 1000);
b0d623f7
A
1017 RT_ADDREF_LOCKED(rt);
1018 RT_UNLOCK(rt);
1019 lck_mtx_unlock(rnh_lock);
1c79356b 1020 nd6_ns_output(ifp, &dst->sin6_addr,
39037602 1021 &dst->sin6_addr, ln, NULL);
b0d623f7 1022 RT_REMREF(rt);
39236c6e
A
1023 ap->aging++;
1024 lck_mtx_lock(rnh_lock);
b0d623f7 1025 goto again;
9bccf70c 1026 }
39037602 1027 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); /* XXX */
39236c6e 1028 ln_setexpire(ln, timenow + nd6_gctimer);
b0d623f7 1029 RT_UNLOCK(rt);
39236c6e 1030 ap->aging_lazy++;
1c79356b 1031 break;
b0d623f7 1032
1c79356b
A
1033 case ND6_LLINFO_PROBE:
1034 if (ln->ln_asked < nd6_umaxtries) {
1035 ln->ln_asked++;
39236c6e 1036 ln_setexpire(ln, timenow + retrans / 1000);
b0d623f7
A
1037 RT_ADDREF_LOCKED(rt);
1038 RT_UNLOCK(rt);
1039 lck_mtx_unlock(rnh_lock);
1c79356b 1040 nd6_ns_output(ifp, &dst->sin6_addr,
39037602 1041 &dst->sin6_addr, ln, NULL);
b0d623f7 1042 RT_REMREF(rt);
39236c6e
A
1043 ap->aging++;
1044 lck_mtx_lock(rnh_lock);
1c79356b 1045 } else {
3e170ce0 1046 send_nc_failure_kev = (rt->rt_flags & RTF_ROUTER) ? true : false;
39236c6e 1047 RT_ADDREF_LOCKED(rt);
b0d623f7
A
1048 RT_UNLOCK(rt);
1049 lck_mtx_unlock(rnh_lock);
1050 nd6_free(rt);
39236c6e
A
1051 ap->killed++;
1052 lck_mtx_lock(rnh_lock);
1053 rtfree_locked(rt);
1c79356b 1054 }
39236c6e 1055 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1056 goto again;
1057
1058 default:
1059 RT_UNLOCK(rt);
1c79356b 1060 break;
1c79356b
A
1061 }
1062 ln = next;
1063 }
b0d623f7
A
1064 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1065
1066 /* Now clear the flag from all entries */
1067 ln = llinfo_nd6.ln_next;
1068 while (ln != NULL && ln != &llinfo_nd6) {
1069 struct rtentry *rt = ln->ln_rt;
1070 struct llinfo_nd6 *next = ln->ln_next;
1071
1072 RT_LOCK_SPIN(rt);
1073 if (ln->ln_flags & ND6_LNF_TIMER_SKIP)
1074 ln->ln_flags &= ~ND6_LNF_TIMER_SKIP;
1075 RT_UNLOCK(rt);
1076 ln = next;
1077 }
1078 lck_mtx_unlock(rnh_lock);
1079
9bccf70c 1080 /* expire default router list */
39037602
A
1081 TAILQ_INIT(&nd_defrouter_tmp);
1082
91447636 1083 lck_mtx_lock(nd6_mutex);
39037602 1084 TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) {
39236c6e
A
1085 ap->found++;
1086 if (dr->expire != 0 && dr->expire < timenow) {
39037602
A
1087 if (dr->ifp != NULL &&
1088 dr->ifp->if_type == IFT_CELLULAR) {
1089 /*
1090 * Some buggy cellular gateways may not send
1091 * periodic router advertisements.
1092 * Or they may send it with router lifetime
1093 * value that is less than the configured Max and Min
1094 * Router Advertisement interval.
1095 * To top that an idle device may not wake up
1096 * when periodic RA is received on cellular
1097 * interface.
1098 * We could send RS on every wake but RFC
1099 * 4861 precludes that.
1100 * The addresses are of infinite lifetimes
1101 * and are tied to the lifetime of the bearer,
1102 * so keeping the addresses and just getting rid of
1103 * the router does not help us anyways.
1104 * If there's network renumbering, a lifetime with
1105 * value 0 would remove the default router.
1106 * Also it will get deleted as part of purge when
1107 * the PDP context is torn down and configured again.
1108 * For that reason, do not expire the default router
1109 * learned on cellular interface. Ever.
1110 */
1111 dr->expire += dr->rtlifetime;
1112 nd6log2((LOG_DEBUG,
1113 "%s: Refreshing expired default router entry "
1114 "%s for interface %s\n", __func__,
1115 ip6_sprintf(&dr->rtaddr), if_name(dr->ifp)));
1116 } else {
1117 ap->killed++;
1118 /*
1119 * Remove the entry from default router list
1120 * and add it to the temp list.
1121 * nd_defrouter_tmp will be a local temporary
1122 * list as no one else can get the same
1123 * removed entry once it is removed from default
1124 * router list.
1125 * Remove the reference after calling defrtrlist_del
1126 */
1127 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
1128 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
1129 }
1c79356b 1130 } else {
39236c6e
A
1131 if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC))
1132 ap->sticky++;
1133 else
1134 ap->aging_lazy++;
1c79356b
A
1135 }
1136 }
39037602
A
1137
1138 /*
1139 * Keep the following separate from the above
1140 * iteration of nd_defrouter because it's not safe
1141 * to call defrtrlist_del while iterating global default
1142 * router list. Global list has to be traversed
1143 * while holding nd6_mutex throughout.
1144 *
1145 * The following call to defrtrlist_del should be
1146 * safe as we are iterating a local list of
1147 * default routers.
1148 */
1149 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) {
1150 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
1151 defrtrlist_del(dr);
1152 NDDR_REMREF(dr); /* remove list reference */
1153 }
6d2010ae 1154 lck_mtx_unlock(nd6_mutex);
1c79356b 1155
9bccf70c
A
1156 /*
1157 * expire interface addresses.
1158 * in the past the loop was inside prefix expiry processing.
1159 * However, from a stricter speci-confrmance standpoint, we should
1160 * rather separate address lifetimes and prefix lifetimes.
1161 */
6d2010ae
A
1162addrloop:
1163 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
91447636 1164 for (ia6 = in6_ifaddrs; ia6; ia6 = nia6) {
39236c6e 1165 ap->found++;
9bccf70c 1166 nia6 = ia6->ia_next;
6d2010ae
A
1167 IFA_LOCK(&ia6->ia_ifa);
1168 /*
1169 * Extra reference for ourselves; it's no-op if
1170 * we don't have to regenerate temporary address,
1171 * otherwise it protects the address from going
1172 * away since we drop in6_ifaddr_rwlock below.
1173 */
1174 IFA_ADDREF_LOCKED(&ia6->ia_ifa);
9bccf70c 1175 /* check address lifetime */
39236c6e 1176 if (IFA6_IS_INVALID(ia6, timenow)) {
9bccf70c
A
1177 /*
1178 * If the expiring address is temporary, try
1179 * regenerating a new one. This would be useful when
55e303ae 1180 * we suspended a laptop PC, then turned it on after a
9bccf70c
A
1181 * period that could invalidate all temporary
1182 * addresses. Although we may have to restart the
1183 * loop (see below), it must be after purging the
1184 * address. Otherwise, we'd see an infinite loop of
316670eb 1185 * regeneration.
9bccf70c
A
1186 */
1187 if (ip6_use_tempaddr &&
1188 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) {
316670eb
A
1189 /*
1190 * NOTE: We have to drop the lock here
1191 * because regen_tmpaddr() eventually calls
1192 * in6_update_ifa(), which must take the lock
1193 * and would otherwise cause a hang. This is
1194 * safe because the goto addrloop leads to a
1195 * re-evaluation of the in6_ifaddrs list
2d21ac55 1196 */
6d2010ae
A
1197 IFA_UNLOCK(&ia6->ia_ifa);
1198 lck_rw_done(&in6_ifaddr_rwlock);
1199 (void) regen_tmpaddr(ia6);
1200 } else {
1201 IFA_UNLOCK(&ia6->ia_ifa);
1202 lck_rw_done(&in6_ifaddr_rwlock);
9bccf70c
A
1203 }
1204
6d2010ae
A
1205 /*
1206 * Purging the address would have caused
1207 * in6_ifaddr_rwlock to be dropped and reacquired;
1208 * therefore search again from the beginning
1209 * of in6_ifaddrs list.
1210 */
1211 in6_purgeaddr(&ia6->ia_ifa);
39236c6e 1212 ap->killed++;
9bccf70c 1213
b0d623f7 1214 /* Release extra reference taken above */
6d2010ae
A
1215 IFA_REMREF(&ia6->ia_ifa);
1216 goto addrloop;
55e303ae 1217 }
39236c6e
A
1218 /*
1219 * The lazy timer runs every nd6_prune_lazy seconds with at
1220 * most "2 * nd6_prune_lazy - 1" leeway. We consider the worst
1221 * case here and make sure we schedule the regular timer if an
1222 * interface address is about to expire.
1223 */
1224 if (IFA6_IS_INVALID(ia6, timenow + 3 * nd6_prune_lazy))
1225 ap->aging++;
1226 else
1227 ap->aging_lazy++;
6d2010ae 1228 IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa);
39236c6e 1229 if (IFA6_IS_DEPRECATED(ia6, timenow)) {
9bccf70c
A
1230 int oldflags = ia6->ia6_flags;
1231
1232 ia6->ia6_flags |= IN6_IFF_DEPRECATED;
1233
1234 /*
1235 * If a temporary address has just become deprecated,
1236 * regenerate a new one if possible.
1237 */
1238 if (ip6_use_tempaddr &&
1239 (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
1240 (oldflags & IN6_IFF_DEPRECATED) == 0) {
1241
2d21ac55 1242 /* see NOTE above */
6d2010ae
A
1243 IFA_UNLOCK(&ia6->ia_ifa);
1244 lck_rw_done(&in6_ifaddr_rwlock);
9bccf70c
A
1245 if (regen_tmpaddr(ia6) == 0) {
1246 /*
1247 * A new temporary address is
1248 * generated.
1249 * XXX: this means the address chain
1250 * has changed while we are still in
1251 * the loop. Although the change
1252 * would not cause disaster (because
55e303ae
A
1253 * it's not a deletion, but an
1254 * addition,) we'd rather restart the
316670eb 1255 * loop just for safety. Or does this
9bccf70c
A
1256 * significantly reduce performance??
1257 */
6d2010ae
A
1258 /* Release extra reference */
1259 IFA_REMREF(&ia6->ia_ifa);
9bccf70c
A
1260 goto addrloop;
1261 }
6d2010ae
A
1262 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
1263 } else {
1264 IFA_UNLOCK(&ia6->ia_ifa);
1c79356b 1265 }
55e303ae 1266 } else {
9bccf70c
A
1267 /*
1268 * A new RA might have made a deprecated address
1269 * preferred.
1270 */
1271 ia6->ia6_flags &= ~IN6_IFF_DEPRECATED;
6d2010ae 1272 IFA_UNLOCK(&ia6->ia_ifa);
1c79356b 1273 }
6d2010ae
A
1274 lck_rw_assert(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
1275 /* Release extra reference taken above */
1276 IFA_REMREF(&ia6->ia_ifa);
9bccf70c 1277 }
6d2010ae
A
1278 lck_rw_done(&in6_ifaddr_rwlock);
1279
1280 lck_mtx_lock(nd6_mutex);
9bccf70c
A
1281 /* expire prefix list */
1282 pr = nd_prefix.lh_first;
39236c6e
A
1283 while (pr != NULL) {
1284 ap->found++;
1c79356b
A
1285 /*
1286 * check prefix lifetime.
1287 * since pltime is just for autoconf, pltime processing for
1288 * prefix is not necessary.
1c79356b 1289 */
6d2010ae 1290 NDPR_LOCK(pr);
fe8ab488
A
1291 if (pr->ndpr_stateflags & NDPRF_PROCESSED_SERVICE ||
1292 pr->ndpr_stateflags & NDPRF_DEFUNCT) {
1293 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
6d2010ae
A
1294 NDPR_UNLOCK(pr);
1295 pr = pr->ndpr_next;
1296 continue;
1297 }
39236c6e 1298 if (pr->ndpr_expire != 0 && pr->ndpr_expire < timenow) {
1c79356b
A
1299 /*
1300 * address expiration and prefix expiration are
9bccf70c 1301 * separate. NEVER perform in6_purgeaddr here.
1c79356b 1302 */
39236c6e 1303 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
6d2010ae
A
1304 NDPR_ADDREF_LOCKED(pr);
1305 prelist_remove(pr);
1306 NDPR_UNLOCK(pr);
1307 NDPR_REMREF(pr);
fe8ab488 1308 pfxlist_onlink_check();
6d2010ae 1309 pr = nd_prefix.lh_first;
39236c6e 1310 ap->killed++;
6d2010ae 1311 } else {
39236c6e
A
1312 if (pr->ndpr_expire == 0 ||
1313 (pr->ndpr_stateflags & NDPRF_STATIC))
1314 ap->sticky++;
1315 else
1316 ap->aging_lazy++;
1317 pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE;
6d2010ae 1318 NDPR_UNLOCK(pr);
1c79356b 1319 pr = pr->ndpr_next;
6d2010ae
A
1320 }
1321 }
1322 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
1323 NDPR_LOCK(pr);
39236c6e 1324 pr->ndpr_stateflags &= ~NDPRF_PROCESSED_SERVICE;
6d2010ae
A
1325 NDPR_UNLOCK(pr);
1326 }
91447636 1327 lck_mtx_unlock(nd6_mutex);
39236c6e
A
1328
1329 lck_mtx_lock(rnh_lock);
1330 /* We're done; let others enter */
1331 nd6_service_busy = FALSE;
1332 if (nd6_service_waiters > 0) {
1333 nd6_service_waiters = 0;
1334 wakeup(nd6_service_wc);
1335 }
1336}
1337
39037602
A
1338
1339static int nd6_need_draining = 0;
1340
39236c6e
A
1341void
1342nd6_drain(void *arg)
1343{
1344#pragma unused(arg)
39236c6e
A
1345 nd6log2((LOG_DEBUG, "%s: draining ND6 entries\n", __func__));
1346
1347 lck_mtx_lock(rnh_lock);
39037602
A
1348 nd6_need_draining = 1;
1349 nd6_sched_timeout(NULL, NULL);
39236c6e
A
1350 lck_mtx_unlock(rnh_lock);
1351}
1352
1353/*
1354 * We use the ``arg'' variable to decide whether or not the timer we're
1355 * running is the fast timer. We do this to reset the nd6_fast_timer_on
1356 * variable so that later we don't end up ignoring a ``fast timer''
1357 * request if the 5 second timer is running (see nd6_sched_timeout).
1358 */
1359static void
1360nd6_timeout(void *arg)
1361{
1362 struct nd6svc_arg sarg;
fe8ab488 1363 uint32_t buf;
39236c6e
A
1364
1365 lck_mtx_lock(rnh_lock);
1366 bzero(&sarg, sizeof (sarg));
39037602
A
1367 if (nd6_need_draining != 0) {
1368 nd6_need_draining = 0;
1369 sarg.draining = 1;
1370 }
39236c6e
A
1371 nd6_service(&sarg);
1372 nd6log2((LOG_DEBUG, "%s: found %u, aging_lazy %u, aging %u, "
1373 "sticky %u, killed %u\n", __func__, sarg.found, sarg.aging_lazy,
1374 sarg.aging, sarg.sticky, sarg.killed));
1375 /* re-arm the timer if there's work to do */
1376 nd6_timeout_run--;
1377 VERIFY(nd6_timeout_run >= 0 && nd6_timeout_run < 2);
1378 if (arg == &nd6_fast_timer_on)
1379 nd6_fast_timer_on = FALSE;
1380 if (sarg.aging_lazy > 0 || sarg.aging > 0 || nd6_sched_timeout_want) {
1381 struct timeval atv, ltv, *leeway;
1382 int lazy = nd6_prune_lazy;
1383
1384 if (sarg.aging > 0 || lazy < 1) {
1385 atv.tv_usec = 0;
1386 atv.tv_sec = nd6_prune;
1387 leeway = NULL;
1388 } else {
1389 VERIFY(lazy >= 1);
1390 atv.tv_usec = 0;
1391 atv.tv_sec = MAX(nd6_prune, lazy);
1392 ltv.tv_usec = 0;
fe8ab488
A
1393 read_frandom(&buf, sizeof(buf));
1394 ltv.tv_sec = MAX(buf % lazy, 1) * 2;
39236c6e
A
1395 leeway = &ltv;
1396 }
1397 nd6_sched_timeout(&atv, leeway);
1398 } else if (nd6_debug) {
1399 nd6log2((LOG_DEBUG, "%s: not rescheduling timer\n", __func__));
1400 }
1401 lck_mtx_unlock(rnh_lock);
1402}
1403
1404void
1405nd6_sched_timeout(struct timeval *atv, struct timeval *ltv)
1406{
1407 struct timeval tv;
1408
1409 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1410 if (atv == NULL) {
1411 tv.tv_usec = 0;
1412 tv.tv_sec = MAX(nd6_prune, 1);
1413 atv = &tv;
1414 ltv = NULL; /* ignore leeway */
1415 }
1416 /* see comments on top of this file */
1417 if (nd6_timeout_run == 0) {
1418 if (ltv == NULL) {
1419 nd6log2((LOG_DEBUG, "%s: timer scheduled in "
1420 "T+%llus.%lluu (demand %d)\n", __func__,
1421 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1422 nd6_sched_timeout_want));
1423 nd6_fast_timer_on = TRUE;
1424 timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
1425 } else {
1426 nd6log2((LOG_DEBUG, "%s: timer scheduled in "
1427 "T+%llus.%lluu with %llus.%lluu leeway "
1428 "(demand %d)\n", __func__, (uint64_t)atv->tv_sec,
1429 (uint64_t)atv->tv_usec, (uint64_t)ltv->tv_sec,
1430 (uint64_t)ltv->tv_usec, nd6_sched_timeout_want));
1431 nd6_fast_timer_on = FALSE;
1432 timeout_with_leeway(nd6_timeout, NULL,
1433 tvtohz(atv), tvtohz(ltv));
1434 }
1435 nd6_timeout_run++;
1436 nd6_sched_timeout_want = 0;
1437 } else if (nd6_timeout_run == 1 && ltv == NULL &&
1438 nd6_fast_timer_on == FALSE) {
1439 nd6log2((LOG_DEBUG, "%s: fast timer scheduled in "
1440 "T+%llus.%lluu (demand %d)\n", __func__,
1441 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1442 nd6_sched_timeout_want));
1443 nd6_fast_timer_on = TRUE;
1444 nd6_sched_timeout_want = 0;
1445 nd6_timeout_run++;
1446 timeout(nd6_timeout, &nd6_fast_timer_on, tvtohz(atv));
1447 } else {
1448 if (ltv == NULL) {
1449 nd6log2((LOG_DEBUG, "%s: not scheduling timer: "
1450 "timers %d, fast_timer %d, T+%llus.%lluu\n",
1451 __func__, nd6_timeout_run, nd6_fast_timer_on,
1452 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec));
1453 } else {
1454 nd6log2((LOG_DEBUG, "%s: not scheduling timer: "
1455 "timers %d, fast_timer %d, T+%llus.%lluu "
1456 "with %llus.%lluu leeway\n", __func__,
1457 nd6_timeout_run, nd6_fast_timer_on,
1458 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec,
1459 (uint64_t)ltv->tv_sec, (uint64_t)ltv->tv_usec));
1460 }
1461 }
d1ecb069
A
1462}
1463
316670eb
A
1464/*
1465 * ND6 router advertisement kernel notification
1466 */
1467void
1468nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list,
1469 u_int32_t list_length, u_int32_t mtu, char *dl_addr, u_int32_t dl_addr_len)
1470{
1471 struct kev_msg ev_msg;
1472 struct kev_nd6_ra_data nd6_ra_msg_data;
1473 struct nd_prefix_list *itr = prefix_list;
1474
39236c6e
A
1475 bzero(&ev_msg, sizeof (struct kev_msg));
1476 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1477 ev_msg.kev_class = KEV_NETWORK_CLASS;
1478 ev_msg.kev_subclass = KEV_ND6_SUBCLASS;
1479 ev_msg.event_code = code;
316670eb 1480
39236c6e 1481 bzero(&nd6_ra_msg_data, sizeof (nd6_ra_msg_data));
316670eb
A
1482 nd6_ra_msg_data.lladdrlen = (dl_addr_len <= ND6_ROUTER_LL_SIZE) ?
1483 dl_addr_len : ND6_ROUTER_LL_SIZE;
1484 bcopy(dl_addr, &nd6_ra_msg_data.lladdr, nd6_ra_msg_data.lladdrlen);
1485
1486 if (mtu > 0 && mtu >= IPV6_MMTU) {
1487 nd6_ra_msg_data.mtu = mtu;
1488 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_MTU;
1489 }
1490
1491 if (list_length > 0 && prefix_list != NULL) {
1492 nd6_ra_msg_data.list_length = list_length;
1493 nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_PREFIX;
1494 }
1495
1496 while (itr != NULL && nd6_ra_msg_data.list_index < list_length) {
1497 bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix,
1498 sizeof (nd6_ra_msg_data.prefix.prefix));
1499 nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf;
1500 nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen;
1501 nd6_ra_msg_data.prefix.origin = PR_ORIG_RA;
1502 nd6_ra_msg_data.prefix.vltime = itr->pr.ndpr_vltime;
1503 nd6_ra_msg_data.prefix.pltime = itr->pr.ndpr_pltime;
39236c6e 1504 nd6_ra_msg_data.prefix.expire = ndpr_getexpire(&itr->pr);
316670eb
A
1505 nd6_ra_msg_data.prefix.flags = itr->pr.ndpr_stateflags;
1506 nd6_ra_msg_data.prefix.refcnt = itr->pr.ndpr_addrcnt;
1507 nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index;
1508
1509 /* send the message up */
39236c6e
A
1510 ev_msg.dv[0].data_ptr = &nd6_ra_msg_data;
1511 ev_msg.dv[0].data_length = sizeof (nd6_ra_msg_data);
1512 ev_msg.dv[1].data_length = 0;
39037602 1513 dlil_post_complete_msg(NULL, &ev_msg);
316670eb
A
1514
1515 /* clean up for the next prefix */
39236c6e 1516 bzero(&nd6_ra_msg_data.prefix, sizeof (nd6_ra_msg_data.prefix));
316670eb
A
1517 itr = itr->next;
1518 nd6_ra_msg_data.list_index++;
1519 }
1520}
1521
d1ecb069 1522/*
39236c6e 1523 * Regenerate deprecated/invalidated temporary address
d1ecb069 1524 */
9bccf70c 1525static int
39236c6e 1526regen_tmpaddr(struct in6_ifaddr *ia6)
9bccf70c
A
1527{
1528 struct ifaddr *ifa;
1529 struct ifnet *ifp;
1530 struct in6_ifaddr *public_ifa6 = NULL;
39236c6e 1531 uint64_t timenow = net_uptime();
9bccf70c
A
1532
1533 ifp = ia6->ia_ifa.ifa_ifp;
6d2010ae 1534 ifnet_lock_shared(ifp);
39236c6e 1535 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
9bccf70c
A
1536 struct in6_ifaddr *it6;
1537
6d2010ae
A
1538 IFA_LOCK(ifa);
1539 if (ifa->ifa_addr->sa_family != AF_INET6) {
1540 IFA_UNLOCK(ifa);
9bccf70c 1541 continue;
6d2010ae 1542 }
9bccf70c
A
1543 it6 = (struct in6_ifaddr *)ifa;
1544
1545 /* ignore no autoconf addresses. */
6d2010ae
A
1546 if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
1547 IFA_UNLOCK(ifa);
9bccf70c 1548 continue;
6d2010ae 1549 }
9bccf70c 1550 /* ignore autoconf addresses with different prefixes. */
6d2010ae
A
1551 if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr) {
1552 IFA_UNLOCK(ifa);
9bccf70c 1553 continue;
6d2010ae 1554 }
9bccf70c
A
1555 /*
1556 * Now we are looking at an autoconf address with the same
1557 * prefix as ours. If the address is temporary and is still
1558 * preferred, do not create another one. It would be rare, but
1559 * could happen, for example, when we resume a laptop PC after
1560 * a long period.
1561 */
1562 if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 &&
39236c6e 1563 !IFA6_IS_DEPRECATED(it6, timenow)) {
6d2010ae
A
1564 IFA_UNLOCK(ifa);
1565 if (public_ifa6 != NULL)
1566 IFA_REMREF(&public_ifa6->ia_ifa);
9bccf70c
A
1567 public_ifa6 = NULL;
1568 break;
1569 }
1570
1571 /*
1572 * This is a public autoconf address that has the same prefix
1573 * as ours. If it is preferred, keep it. We can't break the
1574 * loop here, because there may be a still-preferred temporary
1575 * address with the prefix.
1576 */
39236c6e 1577 if (!IFA6_IS_DEPRECATED(it6, timenow)) {
6d2010ae
A
1578 IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */
1579 IFA_UNLOCK(ifa);
1580 if (public_ifa6 != NULL)
1581 IFA_REMREF(&public_ifa6->ia_ifa);
1582 public_ifa6 = it6;
1583 } else {
1584 IFA_UNLOCK(ifa);
1585 }
9bccf70c 1586 }
91447636 1587 ifnet_lock_done(ifp);
9bccf70c
A
1588
1589 if (public_ifa6 != NULL) {
1590 int e;
1591
39236c6e 1592 if ((e = in6_tmpifadd(public_ifa6, 0)) != 0) {
9bccf70c
A
1593 log(LOG_NOTICE, "regen_tmpaddr: failed to create a new"
1594 " tmp addr,errno=%d\n", e);
6d2010ae 1595 IFA_REMREF(&public_ifa6->ia_ifa);
39236c6e 1596 return (-1);
9bccf70c 1597 }
6d2010ae 1598 IFA_REMREF(&public_ifa6->ia_ifa);
39236c6e 1599 return (0);
9bccf70c
A
1600 }
1601
39236c6e 1602 return (-1);
9bccf70c
A
1603}
1604
1c79356b
A
1605/*
1606 * Nuke neighbor cache/prefix/default router management table, right before
1607 * ifp goes away.
1608 */
1609void
39236c6e 1610nd6_purge(struct ifnet *ifp)
1c79356b 1611{
b0d623f7 1612 struct llinfo_nd6 *ln;
6d2010ae 1613 struct nd_defrouter *dr, *ndr;
1c79356b 1614 struct nd_prefix *pr, *npr;
fe8ab488 1615 boolean_t removed;
39037602
A
1616 struct nd_drhead nd_defrouter_tmp;
1617
1618 TAILQ_INIT(&nd_defrouter_tmp);
1c79356b
A
1619
1620 /* Nuke default router list entries toward ifp */
91447636 1621 lck_mtx_lock(nd6_mutex);
39037602
A
1622 TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) {
1623 if (dr->ifp != ifp)
1624 continue;
1c79356b 1625 /*
39037602
A
1626 * Remove the entry from default router list
1627 * and add it to the temp list.
1628 * nd_defrouter_tmp will be a local temporary
1629 * list as no one else can get the same
1630 * removed entry once it is removed from default
1631 * router list.
1632 * Remove the reference after calling defrtrlist_del.
1633 *
1634 * The uninstalled entries have to be iterated first
1635 * when we call defrtrlist_del.
1636 * This is to ensure that we don't end up calling
1637 * default router selection when there are other
1638 * uninstalled candidate default routers on
1639 * the interface.
1640 * If we don't respect that order, we may end
1641 * up missing out on some entries.
1642 *
1643 * For that reason, installed ones must be inserted
1644 * at the tail and uninstalled ones at the head
1c79356b 1645 */
39037602 1646 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
6d2010ae 1647
39037602
A
1648 if (dr->stateflags & NDDRF_INSTALLED)
1649 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
1650 else
1651 TAILQ_INSERT_HEAD(&nd_defrouter_tmp, dr, dr_entry);
1652 }
6d2010ae 1653
39037602
A
1654 /*
1655 * The following call to defrtrlist_del should be
1656 * safe as we are iterating a local list of
1657 * default routers.
1658 *
1659 * We don't really need nd6_mutex here but keeping
1660 * it as it is to avoid changing assertios held in
1661 * the functions in the call-path.
1662 */
1663 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) {
1664 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
1665 defrtrlist_del(dr);
1666 NDDR_REMREF(dr); /* remove list reference */
1c79356b
A
1667 }
1668
1669 /* Nuke prefix list entries toward ifp */
fe8ab488 1670 removed = FALSE;
1c79356b 1671 for (pr = nd_prefix.lh_first; pr; pr = npr) {
6d2010ae 1672 NDPR_LOCK(pr);
fe8ab488
A
1673 npr = pr->ndpr_next;
1674 if (pr->ndpr_ifp == ifp &&
1675 !(pr->ndpr_stateflags & NDPRF_DEFUNCT)) {
6d2010ae
A
1676 /*
1677 * Because if_detach() does *not* release prefixes
1678 * while purging addresses the reference count will
1679 * still be above zero. We therefore reset it to
1680 * make sure that the prefix really gets purged.
1681 */
1682 pr->ndpr_addrcnt = 0;
1683
9bccf70c
A
1684 /*
1685 * Previously, pr->ndpr_addr is removed as well,
1686 * but I strongly believe we don't have to do it.
1687 * nd6_purge() is only called from in6_ifdetach(),
1688 * which removes all the associated interface addresses
1689 * by itself.
1690 * (jinmei@kame.net 20010129)
1691 */
6d2010ae
A
1692 NDPR_ADDREF_LOCKED(pr);
1693 prelist_remove(pr);
1694 NDPR_UNLOCK(pr);
1695 NDPR_REMREF(pr);
fe8ab488
A
1696 removed = TRUE;
1697 npr = nd_prefix.lh_first;
6d2010ae
A
1698 } else {
1699 NDPR_UNLOCK(pr);
1c79356b
A
1700 }
1701 }
fe8ab488
A
1702 if (removed)
1703 pfxlist_onlink_check();
6d2010ae 1704 lck_mtx_unlock(nd6_mutex);
1c79356b
A
1705
1706 /* cancel default outgoing interface setting */
b0d623f7 1707 if (nd6_defifindex == ifp->if_index) {
1c79356b 1708 nd6_setdefaultiface(0);
b0d623f7 1709 }
1c79356b 1710
316670eb
A
1711 /*
1712 * Perform default router selection even when we are a router,
1713 * if Scoped Routing is enabled.
1714 */
39037602
A
1715 lck_mtx_lock(nd6_mutex);
1716 /* refresh default router list */
1717 defrouter_select(ifp);
1718 lck_mtx_unlock(nd6_mutex);
1c79356b
A
1719
1720 /*
1721 * Nuke neighbor cache entries for the ifp.
1722 * Note that rt->rt_ifp may not be the same as ifp,
1723 * due to KAME goto ours hack. See RTM_RESOLVE case in
1724 * nd6_rtrequest(), and ip6_input().
1725 */
b0d623f7
A
1726again:
1727 lck_mtx_lock(rnh_lock);
1c79356b 1728 ln = llinfo_nd6.ln_next;
b0d623f7 1729 while (ln != NULL && ln != &llinfo_nd6) {
1c79356b 1730 struct rtentry *rt;
b0d623f7 1731 struct llinfo_nd6 *nln;
1c79356b
A
1732
1733 nln = ln->ln_next;
1734 rt = ln->ln_rt;
b0d623f7
A
1735 RT_LOCK(rt);
1736 if (rt->rt_gateway != NULL &&
1737 rt->rt_gateway->sa_family == AF_LINK &&
1738 SDL(rt->rt_gateway)->sdl_index == ifp->if_index) {
39236c6e 1739 RT_ADDREF_LOCKED(rt);
b0d623f7
A
1740 RT_UNLOCK(rt);
1741 lck_mtx_unlock(rnh_lock);
1742 /*
39236c6e 1743 * See comments on nd6_service() for reasons why
b0d623f7
A
1744 * this loop is repeated; we bite the costs of
1745 * going thru the same llinfo_nd6 more than once
1746 * here, since this purge happens during detach,
1747 * and that unlike the timer case, it's possible
1748 * there's more than one purges happening at the
1749 * same time (thus a flag wouldn't buy anything).
1750 */
1751 nd6_free(rt);
39236c6e 1752 RT_REMREF(rt);
b0d623f7
A
1753 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1754 goto again;
1755 } else {
1756 RT_UNLOCK(rt);
1c79356b
A
1757 }
1758 ln = nln;
1759 }
b0d623f7 1760 lck_mtx_unlock(rnh_lock);
1c79356b
A
1761}
1762
b0d623f7
A
1763/*
1764 * Upon success, the returned route will be locked and the caller is
1765 * responsible for releasing the reference and doing RT_UNLOCK(rt).
1766 * This routine does not require rnh_lock to be held by the caller,
1767 * although it needs to be indicated of such a case in order to call
1768 * the correct variant of the relevant routing routines.
1769 */
1c79356b 1770struct rtentry *
39236c6e 1771nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked)
1c79356b
A
1772{
1773 struct rtentry *rt;
1774 struct sockaddr_in6 sin6;
6d2010ae 1775 unsigned int ifscope;
1c79356b 1776
39236c6e
A
1777 bzero(&sin6, sizeof (sin6));
1778 sin6.sin6_len = sizeof (struct sockaddr_in6);
1c79356b
A
1779 sin6.sin6_family = AF_INET6;
1780 sin6.sin6_addr = *addr6;
b0d623f7 1781
6d2010ae
A
1782 ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE;
1783 if (rt_locked) {
1784 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
39236c6e 1785 rt = rtalloc1_scoped_locked(SA(&sin6), create, 0, ifscope);
6d2010ae 1786 } else {
39236c6e 1787 rt = rtalloc1_scoped(SA(&sin6), create, 0, ifscope);
6d2010ae 1788 }
b0d623f7
A
1789
1790 if (rt != NULL) {
1791 RT_LOCK(rt);
1792 if ((rt->rt_flags & RTF_LLINFO) == 0) {
1793 /*
6d2010ae
A
1794 * This is the case for the default route.
1795 * If we want to create a neighbor cache for the
1796 * address, we should free the route for the
1797 * destination and allocate an interface route.
b0d623f7
A
1798 */
1799 if (create) {
1800 RT_UNLOCK(rt);
1801 if (rt_locked)
1802 rtfree_locked(rt);
1803 else
1804 rtfree(rt);
1805 rt = NULL;
1806 }
1c79356b
A
1807 }
1808 }
b0d623f7 1809 if (rt == NULL) {
1c79356b 1810 if (create && ifp) {
b0d623f7 1811 struct ifaddr *ifa;
6d2010ae 1812 u_int32_t ifa_flags;
1c79356b
A
1813 int e;
1814
1815 /*
1816 * If no route is available and create is set,
1817 * we allocate a host route for the destination
1818 * and treat it like an interface route.
1819 * This hack is necessary for a neighbor which can't
1820 * be covered by our own prefix.
1821 */
39236c6e 1822 ifa = ifaof_ifpforaddr(SA(&sin6), ifp);
b0d623f7 1823 if (ifa == NULL)
39236c6e 1824 return (NULL);
1c79356b
A
1825
1826 /*
55e303ae 1827 * Create a new route. RTF_LLINFO is necessary
1c79356b
A
1828 * to create a Neighbor Cache entry for the
1829 * destination in nd6_rtrequest which will be
55e303ae 1830 * called in rtrequest via ifa->ifa_rtrequest.
1c79356b 1831 */
b0d623f7
A
1832 if (!rt_locked)
1833 lck_mtx_lock(rnh_lock);
6d2010ae
A
1834 IFA_LOCK_SPIN(ifa);
1835 ifa_flags = ifa->ifa_flags;
1836 IFA_UNLOCK(ifa);
1837 if ((e = rtrequest_scoped_locked(RTM_ADD,
39236c6e 1838 SA(&sin6), ifa->ifa_addr, SA(&all1_sa),
6d2010ae
A
1839 (ifa_flags | RTF_HOST | RTF_LLINFO) &
1840 ~RTF_CLONING, &rt, ifscope)) != 0) {
91447636 1841 if (e != EEXIST)
b0d623f7
A
1842 log(LOG_ERR, "%s: failed to add route "
1843 "for a neighbor(%s), errno=%d\n",
1844 __func__, ip6_sprintf(addr6), e);
91447636 1845 }
b0d623f7
A
1846 if (!rt_locked)
1847 lck_mtx_unlock(rnh_lock);
6d2010ae 1848 IFA_REMREF(ifa);
b0d623f7 1849 if (rt == NULL)
39236c6e 1850 return (NULL);
b0d623f7
A
1851
1852 RT_LOCK(rt);
1c79356b 1853 if (rt->rt_llinfo) {
b0d623f7 1854 struct llinfo_nd6 *ln = rt->rt_llinfo;
39037602
A
1855 struct nd_ifinfo *ndi = ND_IFINFO(rt->rt_ifp);
1856
1857 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
1858 /*
1859 * For interface's that do not perform NUD
1860 * neighbor cache entres must always be marked
1861 * reachable with no expiry
1862 */
1863 if (ndi->flags & ND6_IFF_PERFORMNUD) {
1864 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_NOSTATE);
1865 } else {
1866 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
1867 ln_setexpire(ln, 0);
1868 }
1c79356b 1869 }
91447636 1870 } else {
39236c6e 1871 return (NULL);
91447636 1872 }
1c79356b 1873 }
b0d623f7 1874 RT_LOCK_ASSERT_HELD(rt);
1c79356b
A
1875 /*
1876 * Validation for the entry.
55e303ae
A
1877 * Note that the check for rt_llinfo is necessary because a cloned
1878 * route from a parent route that has the L flag (e.g. the default
1879 * route to a p2p interface) may have the flag, too, while the
1880 * destination is not actually a neighbor.
1c79356b 1881 * XXX: we can't use rt->rt_ifp to check for the interface, since
39236c6e
A
1882 * it might be the loopback interface if the entry is for our
1883 * own address on a non-loopback interface. Instead, we should
1884 * use rt->rt_ifa->ifa_ifp, which would specify the REAL
6d2010ae
A
1885 * interface.
1886 * Note also that ifa_ifp and ifp may differ when we connect two
1887 * interfaces to a same link, install a link prefix to an interface,
1888 * and try to install a neighbor cache on an interface that does not
1889 * have a route to the prefix.
316670eb
A
1890 *
1891 * If the address is from a proxied prefix, the ifa_ifp and ifp might
1892 * not match, because nd6_na_input() could have modified the ifp
1893 * of the route to point to the interface where the NA arrived on,
1894 * hence the test for RTF_PROXY.
1c79356b 1895 */
316670eb 1896 if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 ||
39236c6e 1897 rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL ||
316670eb
A
1898 (ifp && rt->rt_ifa->ifa_ifp != ifp &&
1899 !(rt->rt_flags & RTF_PROXY))) {
b0d623f7
A
1900 RT_REMREF_LOCKED(rt);
1901 RT_UNLOCK(rt);
1c79356b 1902 if (create) {
b0d623f7
A
1903 log(LOG_DEBUG, "%s: failed to lookup %s "
1904 "(if = %s)\n", __func__, ip6_sprintf(addr6),
1905 ifp ? if_name(ifp) : "unspec");
1c79356b
A
1906 /* xxx more logs... kazu */
1907 }
39236c6e 1908 return (NULL);
b0d623f7
A
1909 }
1910 /*
1911 * Caller needs to release reference and call RT_UNLOCK(rt).
1912 */
39236c6e 1913 return (rt);
1c79356b
A
1914}
1915
1916/*
6d2010ae
A
1917 * Test whether a given IPv6 address is a neighbor or not, ignoring
1918 * the actual neighbor cache. The neighbor cache is ignored in order
1919 * to not reenter the routing code from within itself.
1c79356b 1920 */
6d2010ae 1921static int
39236c6e 1922nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp)
1c79356b 1923{
6d2010ae
A
1924 struct nd_prefix *pr;
1925 struct ifaddr *dstaddr;
1c79356b 1926
6d2010ae 1927 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
1c79356b 1928
9bccf70c
A
1929 /*
1930 * A link-local address is always a neighbor.
6d2010ae 1931 * XXX: a link does not necessarily specify a single interface.
9bccf70c 1932 */
6d2010ae
A
1933 if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) {
1934 struct sockaddr_in6 sin6_copy;
1935 u_int32_t zone;
1936
1937 /*
1938 * We need sin6_copy since sa6_recoverscope() may modify the
1939 * content (XXX).
1940 */
1941 sin6_copy = *addr;
316670eb 1942 if (sa6_recoverscope(&sin6_copy, FALSE))
6d2010ae
A
1943 return (0); /* XXX: should be impossible */
1944 if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone))
1945 return (0);
1946 if (sin6_copy.sin6_scope_id == zone)
1947 return (1);
1948 else
1949 return (0);
1950 }
1c79356b
A
1951
1952 /*
1953 * If the address matches one of our addresses,
1954 * it should be a neighbor.
6d2010ae
A
1955 * If the address matches one of our on-link prefixes, it should be a
1956 * neighbor.
1c79356b 1957 */
6d2010ae
A
1958 for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) {
1959 NDPR_LOCK(pr);
1960 if (pr->ndpr_ifp != ifp) {
1961 NDPR_UNLOCK(pr);
1962 continue;
1963 }
1964 if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) {
1965 NDPR_UNLOCK(pr);
91447636 1966 continue;
6d2010ae
A
1967 }
1968 if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr,
1969 &addr->sin6_addr, &pr->ndpr_mask)) {
1970 NDPR_UNLOCK(pr);
1971 return (1);
1972 }
1973 NDPR_UNLOCK(pr);
1974 }
1c79356b 1975
6d2010ae
A
1976 /*
1977 * If the address is assigned on the node of the other side of
1978 * a p2p interface, the address should be a neighbor.
1979 */
39236c6e 1980 dstaddr = ifa_ifwithdstaddr(SA(addr));
6d2010ae
A
1981 if (dstaddr != NULL) {
1982 if (dstaddr->ifa_ifp == ifp) {
1983 IFA_REMREF(dstaddr);
1984 return (1);
1c79356b 1985 }
6d2010ae
A
1986 IFA_REMREF(dstaddr);
1987 dstaddr = NULL;
1c79356b 1988 }
6d2010ae 1989
6d2010ae
A
1990 return (0);
1991}
1992
1993
1994/*
1995 * Detect if a given IPv6 address identifies a neighbor on a given link.
1996 * XXX: should take care of the destination of a p2p link?
1997 */
1998int
39236c6e
A
1999nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp,
2000 int rt_locked)
6d2010ae
A
2001{
2002 struct rtentry *rt;
2003
2004 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED);
2005 lck_mtx_lock(nd6_mutex);
2006 if (nd6_is_new_addr_neighbor(addr, ifp)) {
2007 lck_mtx_unlock(nd6_mutex);
2008 return (1);
2009 }
2010 lck_mtx_unlock(nd6_mutex);
1c79356b
A
2011
2012 /*
2013 * Even if the address matches none of our addresses, it might be
6d2010ae 2014 * in the neighbor cache.
1c79356b 2015 */
b0d623f7
A
2016 if ((rt = nd6_lookup(&addr->sin6_addr, 0, ifp, rt_locked)) != NULL) {
2017 RT_LOCK_ASSERT_HELD(rt);
2018 RT_REMREF_LOCKED(rt);
2019 RT_UNLOCK(rt);
6d2010ae 2020 return (1);
b0d623f7 2021 }
1c79356b 2022
6d2010ae 2023 return (0);
1c79356b
A
2024}
2025
2026/*
2027 * Free an nd6 llinfo entry.
6d2010ae
A
2028 * Since the function would cause significant changes in the kernel, DO NOT
2029 * make it global, unless you have a strong reason for the change, and are sure
2030 * that the change is safe.
1c79356b 2031 */
b0d623f7 2032void
39236c6e 2033nd6_free(struct rtentry *rt)
1c79356b 2034{
b0d623f7
A
2035 struct llinfo_nd6 *ln;
2036 struct in6_addr in6;
1c79356b
A
2037 struct nd_defrouter *dr;
2038
b0d623f7
A
2039 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
2040 RT_LOCK_ASSERT_NOTHELD(rt);
2041 lck_mtx_lock(nd6_mutex);
2042
2043 RT_LOCK(rt);
2044 RT_ADDREF_LOCKED(rt); /* Extra ref */
2045 ln = rt->rt_llinfo;
39236c6e 2046 in6 = SIN6(rt_key(rt))->sin6_addr;
b0d623f7
A
2047
2048 /*
2049 * Prevent another thread from modifying rt_key, rt_gateway
2050 * via rt_setgate() after the rt_lock is dropped by marking
2051 * the route as defunct.
2052 */
2053 rt->rt_flags |= RTF_CONDEMNED;
2054
1c79356b 2055 /*
316670eb
A
2056 * We used to have pfctlinput(PRC_HOSTDEAD) here. Even though it is
2057 * not harmful, it was not really necessary. Perform default router
2058 * selection even when we are a router, if Scoped Routing is enabled.
1c79356b 2059 */
39037602 2060 dr = defrouter_lookup(&SIN6(rt_key(rt))->sin6_addr, rt->rt_ifp);
1c79356b 2061
39037602
A
2062 if ((ln && ln->ln_router) || dr) {
2063 /*
2064 * rt6_flush must be called whether or not the neighbor
2065 * is in the Default Router List.
2066 * See a corresponding comment in nd6_na_input().
2067 */
2068 RT_UNLOCK(rt);
2069 lck_mtx_unlock(nd6_mutex);
2070 rt6_flush(&in6, rt->rt_ifp);
2071 lck_mtx_lock(nd6_mutex);
2072 } else {
2073 RT_UNLOCK(rt);
2074 }
1c79356b 2075
39037602
A
2076 if (dr) {
2077 NDDR_REMREF(dr);
2078 /*
2079 * Unreachablity of a router might affect the default
2080 * router selection and on-link detection of advertised
2081 * prefixes.
2082 */
1c79356b 2083
39037602
A
2084 /*
2085 * Temporarily fake the state to choose a new default
2086 * router and to perform on-link determination of
2087 * prefixes correctly.
2088 * Below the state will be set correctly,
2089 * or the entry itself will be deleted.
2090 */
2091 RT_LOCK_SPIN(rt);
2092 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE);
9bccf70c 2093
39037602
A
2094 /*
2095 * Since defrouter_select() does not affect the
2096 * on-link determination and MIP6 needs the check
2097 * before the default router selection, we perform
2098 * the check now.
2099 */
b0d623f7 2100 RT_UNLOCK(rt);
39037602 2101 pfxlist_onlink_check();
1c79356b 2102
39037602
A
2103 /*
2104 * refresh default router list
2105 */
2106 defrouter_select(rt->rt_ifp);
2107 }
2108 RT_LOCK_ASSERT_NOTHELD(rt);
b0d623f7 2109 lck_mtx_unlock(nd6_mutex);
9bccf70c
A
2110 /*
2111 * Detach the route from the routing tree and the list of neighbor
2112 * caches, and disable the route entry not to be used in already
2113 * cached routes.
2114 */
39236c6e 2115 (void) rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 0, NULL);
9bccf70c 2116
b0d623f7
A
2117 /* Extra ref held above; now free it */
2118 rtfree(rt);
1c79356b
A
2119}
2120
1c79356b 2121void
39236c6e 2122nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
1c79356b 2123{
39236c6e 2124#pragma unused(sa)
1c79356b 2125 struct sockaddr *gate = rt->rt_gateway;
b0d623f7 2126 struct llinfo_nd6 *ln = rt->rt_llinfo;
39236c6e
A
2127 static struct sockaddr_dl null_sdl =
2128 { .sdl_len = sizeof (null_sdl), .sdl_family = AF_LINK };
1c79356b
A
2129 struct ifnet *ifp = rt->rt_ifp;
2130 struct ifaddr *ifa;
39236c6e
A
2131 uint64_t timenow;
2132 char buf[MAX_IPv6_STR_LEN];
39037602 2133 struct nd_ifinfo *ndi = ND_IFINFO(rt->rt_ifp);
91447636 2134
39037602 2135 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
39236c6e 2136 VERIFY(nd6_init_done);
b0d623f7
A
2137 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
2138 RT_LOCK_ASSERT_HELD(rt);
1c79356b 2139
39236c6e
A
2140 /*
2141 * We have rnh_lock held, see if we need to schedule the timer;
2142 * we might do this again below during RTM_RESOLVE, but doing it
2143 * now handles all other cases.
2144 */
2145 if (nd6_sched_timeout_want)
2146 nd6_sched_timeout(NULL, NULL);
2147
2148 if (rt->rt_flags & RTF_GATEWAY)
1c79356b
A
2149 return;
2150
39236c6e 2151 if (!nd6_need_cache(ifp) && !(rt->rt_flags & RTF_HOST)) {
9bccf70c
A
2152 /*
2153 * This is probably an interface direct route for a link
2154 * which does not need neighbor caches (e.g. fe80::%lo0/64).
2155 * We do not need special treatment below for such a route.
2156 * Moreover, the RTF_LLINFO flag which would be set below
2157 * would annoy the ndp(8) command.
2158 */
2159 return;
2160 }
2161
b0d623f7
A
2162 if (req == RTM_RESOLVE) {
2163 int no_nd_cache;
2164
2165 if (!nd6_need_cache(ifp)) { /* stf case */
2166 no_nd_cache = 1;
2167 } else {
6d2010ae
A
2168 struct sockaddr_in6 sin6;
2169
2170 rtkey_to_sa6(rt, &sin6);
b0d623f7
A
2171 /*
2172 * nd6_is_addr_neighbor() may call nd6_lookup(),
2173 * therefore we drop rt_lock to avoid deadlock
6d2010ae 2174 * during the lookup.
b0d623f7
A
2175 */
2176 RT_ADDREF_LOCKED(rt);
2177 RT_UNLOCK(rt);
6d2010ae 2178 no_nd_cache = !nd6_is_addr_neighbor(&sin6, ifp, 1);
b0d623f7
A
2179 RT_LOCK(rt);
2180 RT_REMREF_LOCKED(rt);
2181 }
2182
55e303ae
A
2183 /*
2184 * FreeBSD and BSD/OS often make a cloned host route based
2185 * on a less-specific route (e.g. the default route).
2186 * If the less specific route does not have a "gateway"
2187 * (this is the case when the route just goes to a p2p or an
2188 * stf interface), we'll mistakenly make a neighbor cache for
2189 * the host route, and will see strange neighbor solicitation
2190 * for the corresponding destination. In order to avoid the
2191 * confusion, we check if the destination of the route is
2192 * a neighbor in terms of neighbor discovery, and stop the
2193 * process if not. Additionally, we remove the LLINFO flag
2194 * so that ndp(8) will not try to get the neighbor information
2195 * of the destination.
2196 */
b0d623f7
A
2197 if (no_nd_cache) {
2198 rt->rt_flags &= ~RTF_LLINFO;
2199 return;
2200 }
55e303ae
A
2201 }
2202
39236c6e
A
2203 timenow = net_uptime();
2204
1c79356b
A
2205 switch (req) {
2206 case RTM_ADD:
2207 /*
2208 * There is no backward compatibility :)
2209 *
2210 * if ((rt->rt_flags & RTF_HOST) == 0 &&
39236c6e
A
2211 * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff)
2212 * rt->rt_flags |= RTF_CLONING;
1c79356b 2213 */
6d2010ae
A
2214 if ((rt->rt_flags & RTF_CLONING) ||
2215 ((rt->rt_flags & RTF_LLINFO) && ln == NULL)) {
1c79356b 2216 /*
6d2010ae
A
2217 * Case 1: This route should come from a route to
2218 * interface (RTF_CLONING case) or the route should be
2219 * treated as on-link but is currently not
2220 * (RTF_LLINFO && ln == NULL case).
1c79356b 2221 */
39236c6e 2222 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
b0d623f7
A
2223 gate = rt->rt_gateway;
2224 SDL(gate)->sdl_type = ifp->if_type;
2225 SDL(gate)->sdl_index = ifp->if_index;
2226 /*
2227 * In case we're called before 1.0 sec.
2228 * has elapsed.
2229 */
39236c6e
A
2230 if (ln != NULL) {
2231 ln_setexpire(ln,
db609669 2232 (ifp->if_eflags & IFEF_IPV6_ND6ALT)
39236c6e
A
2233 ? 0 : MAX(timenow, 1));
2234 }
1c79356b 2235 }
39236c6e 2236 if (rt->rt_flags & RTF_CLONING)
1c79356b
A
2237 break;
2238 }
2239 /*
2240 * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here.
2241 * We don't do that here since llinfo is not ready yet.
2242 *
2243 * There are also couple of other things to be discussed:
2244 * - unsolicited NA code needs improvement beforehand
39236c6e 2245 * - RFC4861 says we MAY send multicast unsolicited NA
1c79356b
A
2246 * (7.2.6 paragraph 4), however, it also says that we
2247 * SHOULD provide a mechanism to prevent multicast NA storm.
2248 * we don't have anything like it right now.
9bccf70c 2249 * note that the mechanism needs a mutual agreement
1c79356b 2250 * between proxies, which means that we need to implement
9bccf70c 2251 * a new protocol, or a new kludge.
39236c6e 2252 * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA.
1c79356b
A
2253 * we need to check ip6forwarding before sending it.
2254 * (or should we allow proxy ND configuration only for
2255 * routers? there's no mention about proxy ND from hosts)
2256 */
1c79356b
A
2257 /* FALLTHROUGH */
2258 case RTM_RESOLVE:
39236c6e 2259 if (!(ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK))) {
1c79356b
A
2260 /*
2261 * Address resolution isn't necessary for a point to
2262 * point link, so we can skip this test for a p2p link.
2263 */
2264 if (gate->sa_family != AF_LINK ||
39236c6e 2265 gate->sa_len < sizeof (null_sdl)) {
6d2010ae
A
2266 /* Don't complain in case of RTM_ADD */
2267 if (req == RTM_RESOLVE) {
39236c6e
A
2268 log(LOG_ERR, "%s: route to %s has bad "
2269 "gateway address (sa_family %u "
2270 "sa_len %u) on %s\n", __func__,
2271 inet_ntop(AF_INET6,
2272 &SIN6(rt_key(rt))->sin6_addr, buf,
2273 sizeof (buf)), gate->sa_family,
2274 gate->sa_len, if_name(ifp));
6d2010ae 2275 }
1c79356b
A
2276 break;
2277 }
2278 SDL(gate)->sdl_type = ifp->if_type;
2279 SDL(gate)->sdl_index = ifp->if_index;
2280 }
2281 if (ln != NULL)
2282 break; /* This happens on a route change */
2283 /*
2284 * Case 2: This route may come from cloning, or a manual route
2285 * add with a LL address.
2286 */
39236c6e
A
2287 rt->rt_llinfo = ln = nd6_llinfo_alloc(M_WAITOK);
2288 if (ln == NULL)
1c79356b 2289 break;
b0d623f7 2290
1c79356b 2291 nd6_allocated++;
39236c6e
A
2292 rt->rt_llinfo_get_ri = nd6_llinfo_get_ri;
2293 rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri;
2294 rt->rt_llinfo_purge = nd6_llinfo_purge;
2295 rt->rt_llinfo_free = nd6_llinfo_free;
3e170ce0 2296 rt->rt_llinfo_refresh = nd6_llinfo_refresh;
39236c6e 2297 rt->rt_flags |= RTF_LLINFO;
1c79356b
A
2298 ln->ln_rt = rt;
2299 /* this is required for "ndp" command. - shin */
39037602
A
2300 /*
2301 * For interface's that do not perform NUD
2302 * neighbor cache entries must always be marked
2303 * reachable with no expiry
2304 */
2305 if ((req == RTM_ADD) ||
2306 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
39236c6e 2307 /*
1c79356b
A
2308 * gate should have some valid AF_LINK entry,
2309 * and ln->ln_expire should have some lifetime
2310 * which is specified by ndp command.
2311 */
39037602
A
2312 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
2313 ln_setexpire(ln, 0);
1c79356b 2314 } else {
39236c6e 2315 /*
1c79356b
A
2316 * When req == RTM_RESOLVE, rt is created and
2317 * initialized in rtrequest(), so rt_expire is 0.
2318 */
39037602 2319 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_NOSTATE);
b0d623f7 2320 /* In case we're called before 1.0 sec. has elapsed */
39236c6e
A
2321 ln_setexpire(ln, (ifp->if_eflags & IFEF_IPV6_ND6ALT) ?
2322 0 : MAX(timenow, 1));
1c79356b 2323 }
b0d623f7 2324 LN_INSERTHEAD(ln);
39236c6e
A
2325 nd6_inuse++;
2326
2327 /* We have at least one entry; arm the timer if not already */
2328 nd6_sched_timeout(NULL, NULL);
b0d623f7
A
2329
2330 /*
2331 * If we have too many cache entries, initiate immediate
2332 * purging for some "less recently used" entries. Note that
2333 * we cannot directly call nd6_free() here because it would
2334 * cause re-entering rtable related routines triggering an LOR
2335 * problem.
2336 */
39236c6e 2337 if (ip6_neighborgcthresh > 0 &&
b0d623f7
A
2338 nd6_inuse >= ip6_neighborgcthresh) {
2339 int i;
2340
2341 for (i = 0; i < 10 && llinfo_nd6.ln_prev != ln; i++) {
2342 struct llinfo_nd6 *ln_end = llinfo_nd6.ln_prev;
2343 struct rtentry *rt_end = ln_end->ln_rt;
2344
2345 /* Move this entry to the head */
2346 RT_LOCK(rt_end);
2347 LN_DEQUEUE(ln_end);
2348 LN_INSERTHEAD(ln_end);
2349
2350 if (ln_end->ln_expire == 0) {
2351 RT_UNLOCK(rt_end);
2352 continue;
2353 }
2354 if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE)
39037602 2355 ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_STALE);
b0d623f7 2356 else
39037602 2357 ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_PURGE);
39236c6e 2358 ln_setexpire(ln_end, timenow);
b0d623f7
A
2359 RT_UNLOCK(rt_end);
2360 }
2361 }
1c79356b
A
2362
2363 /*
2364 * check if rt_key(rt) is one of my address assigned
2365 * to the interface.
2366 */
2367 ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp,
39236c6e
A
2368 &SIN6(rt_key(rt))->sin6_addr);
2369 if (ifa != NULL) {
1c79356b 2370 caddr_t macp = nd6_ifptomac(ifp);
39236c6e 2371 ln_setexpire(ln, 0);
39037602 2372 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
39236c6e 2373 if (macp != NULL) {
1c79356b
A
2374 Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen);
2375 SDL(gate)->sdl_alen = ifp->if_addrlen;
2376 }
2377 if (nd6_useloopback) {
6d2010ae
A
2378 if (rt->rt_ifp != lo_ifp) {
2379 /*
2380 * Purge any link-layer info caching.
2381 */
2382 if (rt->rt_llinfo_purge != NULL)
2383 rt->rt_llinfo_purge(rt);
2384
2385 /*
2386 * Adjust route ref count for the
2387 * interfaces.
2388 */
2389 if (rt->rt_if_ref_fn != NULL) {
2390 rt->rt_if_ref_fn(lo_ifp, 1);
39236c6e
A
2391 rt->rt_if_ref_fn(rt->rt_ifp,
2392 -1);
6d2010ae 2393 }
d1ecb069 2394 }
39236c6e
A
2395 rt->rt_ifp = lo_ifp;
2396 /*
2397 * If rmx_mtu is not locked, update it
2398 * to the MTU used by the new interface.
2399 */
2400 if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
2401 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1c79356b
A
2402 /*
2403 * Make sure rt_ifa be equal to the ifaddr
2404 * corresponding to the address.
2405 * We need this because when we refer
2406 * rt_ifa->ia6_flags in ip6_input, we assume
2407 * that the rt_ifa points to the address instead
2408 * of the loopback address.
2409 */
2410 if (ifa != rt->rt_ifa) {
9bccf70c 2411 rtsetifa(rt, ifa);
1c79356b
A
2412 }
2413 }
6d2010ae 2414 IFA_REMREF(ifa);
1c79356b 2415 } else if (rt->rt_flags & RTF_ANNOUNCE) {
39236c6e 2416 ln_setexpire(ln, 0);
39037602 2417 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_REACHABLE);
1c79356b
A
2418
2419 /* join solicited node multicast for proxy ND */
2420 if (ifp->if_flags & IFF_MULTICAST) {
2421 struct in6_addr llsol;
6d2010ae 2422 struct in6_multi *in6m;
1c79356b
A
2423 int error;
2424
2425 llsol = SIN6(rt_key(rt))->sin6_addr;
6d2010ae 2426 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
1c79356b
A
2427 llsol.s6_addr32[1] = 0;
2428 llsol.s6_addr32[2] = htonl(1);
2429 llsol.s6_addr8[12] = 0xff;
6d2010ae
A
2430 if (in6_setscope(&llsol, ifp, NULL))
2431 break;
39236c6e
A
2432 error = in6_mc_join(ifp, &llsol,
2433 NULL, &in6m, 0);
6d2010ae 2434 if (error) {
9bccf70c
A
2435 nd6log((LOG_ERR, "%s: failed to join "
2436 "%s (errno=%d)\n", if_name(ifp),
2437 ip6_sprintf(&llsol), error));
6d2010ae
A
2438 } else {
2439 IN6M_REMREF(in6m);
9bccf70c 2440 }
1c79356b
A
2441 }
2442 }
2443 break;
2444
2445 case RTM_DELETE:
6d2010ae 2446 if (ln == NULL)
1c79356b
A
2447 break;
2448 /* leave from solicited node multicast for proxy ND */
39236c6e
A
2449 if ((rt->rt_flags & RTF_ANNOUNCE) &&
2450 (ifp->if_flags & IFF_MULTICAST)) {
1c79356b
A
2451 struct in6_addr llsol;
2452 struct in6_multi *in6m;
2453
2454 llsol = SIN6(rt_key(rt))->sin6_addr;
6d2010ae 2455 llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL;
1c79356b
A
2456 llsol.s6_addr32[1] = 0;
2457 llsol.s6_addr32[2] = htonl(1);
2458 llsol.s6_addr8[12] = 0xff;
6d2010ae
A
2459 if (in6_setscope(&llsol, ifp, NULL) == 0) {
2460 in6_multihead_lock_shared();
2461 IN6_LOOKUP_MULTI(&llsol, ifp, in6m);
2462 in6_multihead_lock_done();
2463 if (in6m != NULL) {
2464 in6_mc_leave(in6m, NULL);
2465 IN6M_REMREF(in6m);
2466 }
2467 }
1c79356b
A
2468 }
2469 nd6_inuse--;
b0d623f7
A
2470 /*
2471 * Unchain it but defer the actual freeing until the route
2472 * itself is to be freed. rt->rt_llinfo still points to
2473 * llinfo_nd6, and likewise, ln->ln_rt stil points to this
2474 * route entry, except that RTF_LLINFO is now cleared.
2475 */
2476 if (ln->ln_flags & ND6_LNF_IN_USE)
2477 LN_DEQUEUE(ln);
6d2010ae
A
2478
2479 /*
2480 * Purge any link-layer info caching.
2481 */
2482 if (rt->rt_llinfo_purge != NULL)
2483 rt->rt_llinfo_purge(rt);
2484
1c79356b 2485 rt->rt_flags &= ~RTF_LLINFO;
6d2010ae 2486 if (ln->ln_hold != NULL) {
3e170ce0 2487 m_freem_list(ln->ln_hold);
6d2010ae
A
2488 ln->ln_hold = NULL;
2489 }
1c79356b
A
2490 }
2491}
2492
316670eb 2493static int
b0d623f7 2494nd6_siocgdrlst(void *data, int data_is_64)
1c79356b 2495{
316670eb 2496 struct in6_drlist_32 *drl_32;
b0d623f7
A
2497 struct nd_defrouter *dr;
2498 int i = 0;
1c79356b 2499
b0d623f7
A
2500 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
2501
b0d623f7 2502 dr = TAILQ_FIRST(&nd_defrouter);
316670eb 2503
39037602 2504 /* XXX Handle mapped defrouter entries */
316670eb 2505 /* For 64-bit process */
b0d623f7 2506 if (data_is_64) {
316670eb
A
2507 struct in6_drlist_64 *drl_64;
2508
2509 drl_64 = _MALLOC(sizeof (*drl_64), M_TEMP, M_WAITOK|M_ZERO);
2510 if (drl_64 == NULL)
2511 return (ENOMEM);
2512
2513 /* preserve the interface name */
2514 bcopy(data, drl_64, sizeof (drl_64->ifname));
2515
1c79356b 2516 while (dr && i < DRLSTSIZ) {
b0d623f7 2517 drl_64->defrouter[i].rtaddr = dr->rtaddr;
39236c6e
A
2518 if (IN6_IS_ADDR_LINKLOCAL(
2519 &drl_64->defrouter[i].rtaddr)) {
1c79356b 2520 /* XXX: need to this hack for KAME stack */
b0d623f7
A
2521 drl_64->defrouter[i].rtaddr.s6_addr16[1] = 0;
2522 } else {
1c79356b
A
2523 log(LOG_ERR,
2524 "default router list contains a "
2525 "non-linklocal address(%s)\n",
b0d623f7
A
2526 ip6_sprintf(&drl_64->defrouter[i].rtaddr));
2527 }
2528 drl_64->defrouter[i].flags = dr->flags;
2529 drl_64->defrouter[i].rtlifetime = dr->rtlifetime;
39236c6e 2530 drl_64->defrouter[i].expire = nddr_getexpire(dr);
b0d623f7 2531 drl_64->defrouter[i].if_index = dr->ifp->if_index;
1c79356b
A
2532 i++;
2533 dr = TAILQ_NEXT(dr, dr_entry);
2534 }
316670eb
A
2535 bcopy(drl_64, data, sizeof (*drl_64));
2536 _FREE(drl_64, M_TEMP);
2537 return (0);
b0d623f7 2538 }
316670eb 2539
b0d623f7 2540 /* For 32-bit process */
316670eb
A
2541 drl_32 = _MALLOC(sizeof (*drl_32), M_TEMP, M_WAITOK|M_ZERO);
2542 if (drl_32 == NULL)
2543 return (ENOMEM);
2544
2545 /* preserve the interface name */
2546 bcopy(data, drl_32, sizeof (drl_32->ifname));
2547
39236c6e 2548 while (dr != NULL && i < DRLSTSIZ) {
b0d623f7
A
2549 drl_32->defrouter[i].rtaddr = dr->rtaddr;
2550 if (IN6_IS_ADDR_LINKLOCAL(&drl_32->defrouter[i].rtaddr)) {
2551 /* XXX: need to this hack for KAME stack */
2552 drl_32->defrouter[i].rtaddr.s6_addr16[1] = 0;
2553 } else {
2554 log(LOG_ERR,
2555 "default router list contains a "
2556 "non-linklocal address(%s)\n",
2557 ip6_sprintf(&drl_32->defrouter[i].rtaddr));
2558 }
2559 drl_32->defrouter[i].flags = dr->flags;
2560 drl_32->defrouter[i].rtlifetime = dr->rtlifetime;
39236c6e 2561 drl_32->defrouter[i].expire = nddr_getexpire(dr);
b0d623f7
A
2562 drl_32->defrouter[i].if_index = dr->ifp->if_index;
2563 i++;
2564 dr = TAILQ_NEXT(dr, dr_entry);
2565 }
316670eb
A
2566 bcopy(drl_32, data, sizeof (*drl_32));
2567 _FREE(drl_32, M_TEMP);
2568 return (0);
b0d623f7
A
2569}
2570
316670eb
A
2571/*
2572 * XXX meaning of fields, especialy "raflags", is very
2573 * differnet between RA prefix list and RR/static prefix list.
2574 * how about separating ioctls into two?
2575 */
2576static int
b0d623f7
A
2577nd6_siocgprlst(void *data, int data_is_64)
2578{
316670eb 2579 struct in6_prlist_32 *prl_32;
b0d623f7 2580 struct nd_prefix *pr;
b0d623f7
A
2581 int i = 0;
2582
2583 lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED);
316670eb 2584
b0d623f7 2585 pr = nd_prefix.lh_first;
316670eb 2586
39037602 2587 /* XXX Handle mapped defrouter entries */
316670eb 2588 /* For 64-bit process */
b0d623f7 2589 if (data_is_64) {
316670eb
A
2590 struct in6_prlist_64 *prl_64;
2591
2592 prl_64 = _MALLOC(sizeof (*prl_64), M_TEMP, M_WAITOK|M_ZERO);
2593 if (prl_64 == NULL)
2594 return (ENOMEM);
2595
2596 /* preserve the interface name */
2597 bcopy(data, prl_64, sizeof (prl_64->ifname));
2598
1c79356b
A
2599 while (pr && i < PRLSTSIZ) {
2600 struct nd_pfxrouter *pfr;
2601 int j;
2602
6d2010ae 2603 NDPR_LOCK(pr);
b0d623f7 2604 (void) in6_embedscope(&prl_64->prefix[i].prefix,
6d2010ae 2605 &pr->ndpr_prefix, NULL, NULL, NULL);
b0d623f7
A
2606 prl_64->prefix[i].raflags = pr->ndpr_raf;
2607 prl_64->prefix[i].prefixlen = pr->ndpr_plen;
2608 prl_64->prefix[i].vltime = pr->ndpr_vltime;
2609 prl_64->prefix[i].pltime = pr->ndpr_pltime;
2610 prl_64->prefix[i].if_index = pr->ndpr_ifp->if_index;
39236c6e 2611 prl_64->prefix[i].expire = ndpr_getexpire(pr);
1c79356b
A
2612
2613 pfr = pr->ndpr_advrtrs.lh_first;
2614 j = 0;
9bccf70c 2615 while (pfr) {
1c79356b 2616 if (j < DRLSTSIZ) {
39236c6e 2617#define RTRADDR prl_64->prefix[i].advrtr[j]
1c79356b
A
2618 RTRADDR = pfr->router->rtaddr;
2619 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
2620 /* XXX: hack for KAME */
2621 RTRADDR.s6_addr16[1] = 0;
b0d623f7 2622 } else {
1c79356b
A
2623 log(LOG_ERR,
2624 "a router(%s) advertises "
2625 "a prefix with "
2626 "non-link local address\n",
2627 ip6_sprintf(&RTRADDR));
b0d623f7 2628 }
1c79356b
A
2629#undef RTRADDR
2630 }
2631 j++;
2632 pfr = pfr->pfr_next;
2633 }
b0d623f7
A
2634 prl_64->prefix[i].advrtrs = j;
2635 prl_64->prefix[i].origin = PR_ORIG_RA;
6d2010ae 2636 NDPR_UNLOCK(pr);
1c79356b
A
2637
2638 i++;
2639 pr = pr->ndpr_next;
2640 }
316670eb
A
2641 bcopy(prl_64, data, sizeof (*prl_64));
2642 _FREE(prl_64, M_TEMP);
2643 return (0);
b0d623f7 2644 }
316670eb 2645
b0d623f7 2646 /* For 32-bit process */
316670eb
A
2647 prl_32 = _MALLOC(sizeof (*prl_32), M_TEMP, M_WAITOK|M_ZERO);
2648 if (prl_32 == NULL)
2649 return (ENOMEM);
2650
2651 /* preserve the interface name */
2652 bcopy(data, prl_32, sizeof (prl_32->ifname));
2653
b0d623f7
A
2654 while (pr && i < PRLSTSIZ) {
2655 struct nd_pfxrouter *pfr;
2656 int j;
2657
6d2010ae 2658 NDPR_LOCK(pr);
b0d623f7 2659 (void) in6_embedscope(&prl_32->prefix[i].prefix,
6d2010ae 2660 &pr->ndpr_prefix, NULL, NULL, NULL);
b0d623f7
A
2661 prl_32->prefix[i].raflags = pr->ndpr_raf;
2662 prl_32->prefix[i].prefixlen = pr->ndpr_plen;
2663 prl_32->prefix[i].vltime = pr->ndpr_vltime;
2664 prl_32->prefix[i].pltime = pr->ndpr_pltime;
2665 prl_32->prefix[i].if_index = pr->ndpr_ifp->if_index;
39236c6e 2666 prl_32->prefix[i].expire = ndpr_getexpire(pr);
b0d623f7
A
2667
2668 pfr = pr->ndpr_advrtrs.lh_first;
2669 j = 0;
2670 while (pfr) {
2671 if (j < DRLSTSIZ) {
39236c6e 2672#define RTRADDR prl_32->prefix[i].advrtr[j]
b0d623f7
A
2673 RTRADDR = pfr->router->rtaddr;
2674 if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) {
2675 /* XXX: hack for KAME */
2676 RTRADDR.s6_addr16[1] = 0;
2677 } else {
2678 log(LOG_ERR,
2679 "a router(%s) advertises "
2680 "a prefix with "
2681 "non-link local address\n",
2682 ip6_sprintf(&RTRADDR));
2683 }
2684#undef RTRADDR
2685 }
2686 j++;
2687 pfr = pfr->pfr_next;
2688 }
2689 prl_32->prefix[i].advrtrs = j;
2690 prl_32->prefix[i].origin = PR_ORIG_RA;
6d2010ae 2691 NDPR_UNLOCK(pr);
b0d623f7
A
2692
2693 i++;
2694 pr = pr->ndpr_next;
2695 }
316670eb
A
2696 bcopy(prl_32, data, sizeof (*prl_32));
2697 _FREE(prl_32, M_TEMP);
2698 return (0);
b0d623f7
A
2699}
2700
2701int
2702nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp)
2703{
6d2010ae 2704 struct nd_defrouter *dr;
b0d623f7
A
2705 struct nd_prefix *pr;
2706 struct rtentry *rt;
3e170ce0 2707 int error = 0;
39236c6e
A
2708
2709 VERIFY(ifp != NULL);
b0d623f7
A
2710
2711 switch (cmd) {
316670eb
A
2712 case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */
2713 case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */
b0d623f7
A
2714 /*
2715 * obsolete API, use sysctl under net.inet6.icmp6
2716 */
2717 lck_mtx_lock(nd6_mutex);
316670eb 2718 error = nd6_siocgdrlst(data, cmd == SIOCGDRLST_IN6_64);
91447636 2719 lck_mtx_unlock(nd6_mutex);
9bccf70c 2720 break;
b0d623f7 2721
316670eb
A
2722 case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */
2723 case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */
b0d623f7
A
2724 /*
2725 * obsolete API, use sysctl under net.inet6.icmp6
2726 */
2727 lck_mtx_lock(nd6_mutex);
316670eb 2728 error = nd6_siocgprlst(data, cmd == SIOCGPRLST_IN6_64);
b0d623f7 2729 lck_mtx_unlock(nd6_mutex);
1c79356b 2730 break;
b0d623f7 2731
316670eb
A
2732 case OSIOCGIFINFO_IN6: /* struct in6_ondireq */
2733 case SIOCGIFINFO_IN6: { /* struct in6_ondireq */
2734 u_int32_t linkmtu;
2735 struct in6_ondireq *ondi = (struct in6_ondireq *)(void *)data;
2736 struct nd_ifinfo *ndi;
b0d623f7
A
2737 /*
2738 * SIOCGIFINFO_IN6 ioctl is encoded with in6_ondireq
2739 * instead of in6_ndireq, so we treat it as such.
2740 */
316670eb 2741 ndi = ND_IFINFO(ifp);
3e170ce0 2742 if ((NULL == ndi) || (FALSE == ndi->initialized)){
9bccf70c
A
2743 error = EINVAL;
2744 break;
2745 }
316670eb
A
2746 lck_mtx_lock(&ndi->lock);
2747 linkmtu = IN6_LINKMTU(ifp);
2748 bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof (linkmtu));
3e170ce0 2749 bcopy(&ndi->maxmtu, &ondi->ndi.maxmtu,
316670eb 2750 sizeof (u_int32_t));
3e170ce0 2751 bcopy(&ndi->basereachable, &ondi->ndi.basereachable,
316670eb 2752 sizeof (u_int32_t));
3e170ce0 2753 bcopy(&ndi->reachable, &ondi->ndi.reachable,
316670eb 2754 sizeof (u_int32_t));
3e170ce0 2755 bcopy(&ndi->retrans, &ondi->ndi.retrans,
316670eb 2756 sizeof (u_int32_t));
3e170ce0 2757 bcopy(&ndi->flags, &ondi->ndi.flags,
316670eb 2758 sizeof (u_int32_t));
3e170ce0 2759 bcopy(&ndi->recalctm, &ondi->ndi.recalctm,
316670eb 2760 sizeof (int));
3e170ce0 2761 ondi->ndi.chlim = ndi->chlim;
316670eb
A
2762 ondi->ndi.receivedra = 0;
2763 lck_mtx_unlock(&ndi->lock);
1c79356b 2764 break;
316670eb 2765 }
b0d623f7 2766
316670eb 2767 case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */
3e170ce0
A
2768 /*
2769 * XXX BSD has a bunch of checks here to ensure
2770 * that interface disabled flag is not reset if
2771 * link local address has failed DAD.
2772 * Investigate that part.
2773 */
316670eb
A
2774 struct in6_ndireq *cndi = (struct in6_ndireq *)(void *)data;
2775 u_int32_t oflags, flags;
3e170ce0 2776 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
316670eb
A
2777
2778 /* XXX: almost all other fields of cndi->ndi is unused */
3e170ce0 2779 if ((NULL == ndi) || !ndi->initialized) {
9bccf70c
A
2780 error = EINVAL;
2781 break;
2782 }
3e170ce0 2783
316670eb 2784 lck_mtx_lock(&ndi->lock);
3e170ce0
A
2785 oflags = ndi->flags;
2786 bcopy(&cndi->ndi.flags, &(ndi->flags), sizeof (flags));
2787 flags = ndi->flags;
316670eb 2788 lck_mtx_unlock(&ndi->lock);
316670eb 2789
3e170ce0 2790 if (oflags == flags) {
316670eb 2791 break;
3e170ce0 2792 }
316670eb
A
2793
2794 error = nd6_setifinfo(ifp, oflags, flags);
1c79356b 2795 break;
316670eb 2796 }
b0d623f7 2797
316670eb 2798 case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */
1c79356b
A
2799 /* flush default router list */
2800 /*
2801 * xxx sumikawa: should not delete route if default
2802 * route equals to the top of default router list
2803 */
91447636 2804 lck_mtx_lock(nd6_mutex);
6d2010ae
A
2805 defrouter_reset();
2806 defrouter_select(ifp);
91447636 2807 lck_mtx_unlock(nd6_mutex);
1c79356b
A
2808 /* xxx sumikawa: flush prefix list */
2809 break;
b0d623f7 2810
316670eb 2811 case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */
1c79356b 2812 /* flush all the prefix advertised by routers */
4bd07ac2 2813 struct nd_prefix *next = NULL;
1c79356b 2814
6d2010ae 2815 lck_mtx_lock(nd6_mutex);
1c79356b 2816 for (pr = nd_prefix.lh_first; pr; pr = next) {
4bd07ac2
A
2817 struct in6_ifaddr *ia = NULL;
2818 bool iterate_pfxlist_again = false;
9bccf70c 2819
1c79356b 2820 next = pr->ndpr_next;
9bccf70c 2821
6d2010ae
A
2822 NDPR_LOCK(pr);
2823 if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr)) {
2824 NDPR_UNLOCK(pr);
9bccf70c 2825 continue; /* XXX */
6d2010ae
A
2826 }
2827 if (ifp != lo_ifp && pr->ndpr_ifp != ifp) {
2828 NDPR_UNLOCK(pr);
2829 continue;
2830 }
9bccf70c 2831 /* do we really have to remove addresses as well? */
6d2010ae
A
2832 NDPR_ADDREF_LOCKED(pr);
2833 NDPR_UNLOCK(pr);
2834 lck_rw_lock_exclusive(&in6_ifaddr_rwlock);
2835 ia = in6_ifaddrs;
2836 while (ia != NULL) {
2837 IFA_LOCK(&ia->ia_ifa);
2838 if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) {
2839 IFA_UNLOCK(&ia->ia_ifa);
2840 ia = ia->ia_next;
2841 continue;
2842 }
9bccf70c 2843
6d2010ae
A
2844 if (ia->ia6_ndpr == pr) {
2845 IFA_ADDREF_LOCKED(&ia->ia_ifa);
2846 IFA_UNLOCK(&ia->ia_ifa);
2847 lck_rw_done(&in6_ifaddr_rwlock);
2848 lck_mtx_unlock(nd6_mutex);
2849 in6_purgeaddr(&ia->ia_ifa);
316670eb 2850 IFA_REMREF(&ia->ia_ifa);
6d2010ae 2851 lck_mtx_lock(nd6_mutex);
39236c6e
A
2852 lck_rw_lock_exclusive(
2853 &in6_ifaddr_rwlock);
6d2010ae
A
2854 /*
2855 * Purging the address caused
2856 * in6_ifaddr_rwlock to be
2857 * dropped and
2858 * reacquired; therefore search again
2859 * from the beginning of in6_ifaddrs.
2860 * The same applies for the prefix list.
2861 */
2862 ia = in6_ifaddrs;
4bd07ac2 2863 iterate_pfxlist_again = true;
9bccf70c 2864 continue;
6d2010ae
A
2865 }
2866 IFA_UNLOCK(&ia->ia_ifa);
2867 ia = ia->ia_next;
9bccf70c 2868 }
6d2010ae
A
2869 lck_rw_done(&in6_ifaddr_rwlock);
2870 NDPR_LOCK(pr);
2871 prelist_remove(pr);
2872 NDPR_UNLOCK(pr);
39236c6e 2873 pfxlist_onlink_check();
6d2010ae 2874 NDPR_REMREF(pr);
4bd07ac2
A
2875 if (iterate_pfxlist_again) {
2876 next = nd_prefix.lh_first;
2877 }
1c79356b 2878 }
91447636 2879 lck_mtx_unlock(nd6_mutex);
1c79356b 2880 break;
b0d623f7
A
2881 }
2882
316670eb 2883 case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */
1c79356b 2884 /* flush all the default routers */
2d21ac55 2885 struct nd_defrouter *next;
39037602 2886 struct nd_drhead nd_defrouter_tmp;
1c79356b 2887
39037602 2888 TAILQ_INIT(&nd_defrouter_tmp);
91447636 2889 lck_mtx_lock(nd6_mutex);
1c79356b
A
2890 if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) {
2891 /*
2892 * The first entry of the list may be stored in
2893 * the routing table, so we'll delete it later.
2894 */
2895 for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) {
2896 next = TAILQ_NEXT(dr, dr_entry);
39037602
A
2897 if (ifp == lo_ifp || dr->ifp == ifp) {
2898 /*
2899 * Remove the entry from default router list
2900 * and add it to the temp list.
2901 * nd_defrouter_tmp will be a local temporary
2902 * list as no one else can get the same
2903 * removed entry once it is removed from default
2904 * router list.
2905 * Remove the reference after calling defrtrlist_de
2906 */
2907 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
2908 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
2909 }
1c79356b 2910 }
39037602
A
2911
2912 dr = TAILQ_FIRST(&nd_defrouter);
6d2010ae 2913 if (ifp == lo_ifp ||
39037602
A
2914 dr->ifp == ifp) {
2915 TAILQ_REMOVE(&nd_defrouter, dr, dr_entry);
2916 TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry);
2917 }
2918 }
2919
2920 /*
2921 * Keep the following separate from the above iteration of
2922 * nd_defrouter because it's not safe to call
2923 * defrtrlist_del while iterating global default
2924 * router list. Global list has to be traversed
2925 * while holding nd6_mutex throughout.
2926 *
2927 * The following call to defrtrlist_del should be
2928 * safe as we are iterating a local list of
2929 * default routers.
2930 */
2931 TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, next) {
2932 TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry);
2933 defrtrlist_del(dr);
2934 NDDR_REMREF(dr); /* remove list reference */
1c79356b 2935 }
91447636 2936 lck_mtx_unlock(nd6_mutex);
1c79356b 2937 break;
b0d623f7
A
2938 }
2939
316670eb 2940 case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */
1c79356b 2941 struct llinfo_nd6 *ln;
316670eb
A
2942 struct in6_nbrinfo_32 nbi_32;
2943 struct in6_addr nb_addr; /* make local for safety */
1c79356b 2944
316670eb
A
2945 bcopy(data, &nbi_32, sizeof (nbi_32));
2946 nb_addr = nbi_32.addr;
1c79356b
A
2947 /*
2948 * XXX: KAME specific hack for scoped addresses
39236c6e 2949 * XXXX: for other scopes than link-local?
1c79356b 2950 */
316670eb
A
2951 if (IN6_IS_ADDR_LINKLOCAL(&nbi_32.addr) ||
2952 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_32.addr)) {
2953 u_int16_t *idp =
2954 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
1c79356b
A
2955
2956 if (*idp == 0)
2957 *idp = htons(ifp->if_index);
2958 }
2959
b0d623f7 2960 /* Callee returns a locked route upon success */
91447636 2961 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
1c79356b 2962 error = EINVAL;
1c79356b
A
2963 break;
2964 }
b0d623f7
A
2965 RT_LOCK_ASSERT_HELD(rt);
2966 ln = rt->rt_llinfo;
316670eb
A
2967 nbi_32.state = ln->ln_state;
2968 nbi_32.asked = ln->ln_asked;
2969 nbi_32.isrouter = ln->ln_router;
39236c6e 2970 nbi_32.expire = ln_getexpire(ln);
b0d623f7
A
2971 RT_REMREF_LOCKED(rt);
2972 RT_UNLOCK(rt);
316670eb 2973 bcopy(&nbi_32, data, sizeof (nbi_32));
1c79356b 2974 break;
b0d623f7
A
2975 }
2976
316670eb 2977 case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */
b0d623f7 2978 struct llinfo_nd6 *ln;
316670eb
A
2979 struct in6_nbrinfo_64 nbi_64;
2980 struct in6_addr nb_addr; /* make local for safety */
b0d623f7 2981
316670eb
A
2982 bcopy(data, &nbi_64, sizeof (nbi_64));
2983 nb_addr = nbi_64.addr;
b0d623f7
A
2984 /*
2985 * XXX: KAME specific hack for scoped addresses
39236c6e 2986 * XXXX: for other scopes than link-local?
b0d623f7 2987 */
316670eb
A
2988 if (IN6_IS_ADDR_LINKLOCAL(&nbi_64.addr) ||
2989 IN6_IS_ADDR_MC_LINKLOCAL(&nbi_64.addr)) {
2990 u_int16_t *idp =
2991 (u_int16_t *)(void *)&nb_addr.s6_addr[2];
b0d623f7
A
2992
2993 if (*idp == 0)
2994 *idp = htons(ifp->if_index);
2995 }
2996
2997 /* Callee returns a locked route upon success */
2998 if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) {
2999 error = EINVAL;
3000 break;
3001 }
3002 RT_LOCK_ASSERT_HELD(rt);
3003 ln = rt->rt_llinfo;
316670eb
A
3004 nbi_64.state = ln->ln_state;
3005 nbi_64.asked = ln->ln_asked;
3006 nbi_64.isrouter = ln->ln_router;
39236c6e 3007 nbi_64.expire = ln_getexpire(ln);
b0d623f7
A
3008 RT_REMREF_LOCKED(rt);
3009 RT_UNLOCK(rt);
316670eb 3010 bcopy(&nbi_64, data, sizeof (nbi_64));
1c79356b 3011 break;
b0d623f7
A
3012 }
3013
316670eb
A
3014 case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
3015 case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
3016 struct in6_ndifreq_64 *ndif_64 =
3017 (struct in6_ndifreq_64 *)(void *)data;
3018 struct in6_ndifreq_32 *ndif_32 =
3019 (struct in6_ndifreq_32 *)(void *)data;
b0d623f7 3020
316670eb
A
3021 if (cmd == SIOCGDEFIFACE_IN6_64) {
3022 u_int64_t j = nd6_defifindex;
3023 bcopy(&j, &ndif_64->ifindex, sizeof (j));
3024 } else {
3025 bcopy(&nd6_defifindex, &ndif_32->ifindex,
3026 sizeof (u_int32_t));
3027 }
1c79356b
A
3028 break;
3029 }
b0d623f7 3030
316670eb
A
3031 case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */
3032 case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */
3033 struct in6_ndifreq_64 *ndif_64 =
3034 (struct in6_ndifreq_64 *)(void *)data;
3035 struct in6_ndifreq_32 *ndif_32 =
3036 (struct in6_ndifreq_32 *)(void *)data;
3037 u_int32_t idx;
b0d623f7 3038
316670eb
A
3039 if (cmd == SIOCSDEFIFACE_IN6_64) {
3040 u_int64_t j;
3041 bcopy(&ndif_64->ifindex, &j, sizeof (j));
3042 idx = (u_int32_t)j;
3043 } else {
3044 bcopy(&ndif_32->ifindex, &idx, sizeof (idx));
3045 }
3046
3047 error = nd6_setdefaultiface(idx);
6d2010ae 3048 return (error);
b0d623f7
A
3049 /* NOTREACHED */
3050 }
39037602
A
3051 case SIOCGIFCGAPREP_IN6:
3052 case SIOCSIFCGAPREP_IN6:
3053 {
3054 struct in6_cgareq *p_cgareq =
3055 (struct in6_cgareq *)(void *)data;
3056 struct nd_ifinfo *ndi = ND_IFINFO(ifp);
3057
3058 struct in6_cga_modifier *req_cga_mod =
3059 &(p_cgareq->cgar_cgaprep.cga_modifier);
3060 struct in6_cga_modifier *ndi_cga_mod = NULL;
3061
3062 if ((NULL == ndi) || !ndi->initialized) {
3063 error = EINVAL;
3064 break;
3065 }
3066
3067 lck_mtx_lock(&ndi->lock);
3068 ndi_cga_mod = &(ndi->local_cga_modifier);
3069
3070 if (cmd == SIOCSIFCGAPREP_IN6) {
3071 bcopy(req_cga_mod, ndi_cga_mod, sizeof(*ndi_cga_mod));
3072 ndi->cga_initialized = TRUE;
3073 } else
3074 bcopy(ndi_cga_mod, req_cga_mod, sizeof(*req_cga_mod));
3075
3076 lck_mtx_unlock(&ndi->lock);
3077 return (error);
3078 /* NOTREACHED */
3079 }
b0d623f7
A
3080 }
3081 return (error);
1c79356b
A
3082}
3083
3084/*
3085 * Create neighbor cache entry and cache link-layer address,
3086 * on reception of inbound ND6 packets. (RS/RA/NS/redirect)
3087 */
b0d623f7 3088void
39236c6e
A
3089nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr,
3090 int lladdrlen, int type, int code)
1c79356b 3091{
39236c6e 3092#pragma unused(lladdrlen)
1c79356b
A
3093 struct rtentry *rt = NULL;
3094 struct llinfo_nd6 *ln = NULL;
3095 int is_newentry;
3096 struct sockaddr_dl *sdl = NULL;
3097 int do_update;
3098 int olladdr;
3099 int llchange;
3100 int newstate = 0;
39236c6e
A
3101 uint64_t timenow;
3102 boolean_t sched_timeout = FALSE;
39037602 3103 struct nd_ifinfo *ndi = NULL;
1c79356b 3104
6d2010ae 3105 if (ifp == NULL)
1c79356b 3106 panic("ifp == NULL in nd6_cache_lladdr");
6d2010ae 3107 if (from == NULL)
1c79356b
A
3108 panic("from == NULL in nd6_cache_lladdr");
3109
3110 /* nothing must be updated for unspecified address */
3111 if (IN6_IS_ADDR_UNSPECIFIED(from))
b0d623f7 3112 return;
1c79356b
A
3113
3114 /*
3115 * Validation about ifp->if_addrlen and lladdrlen must be done in
3116 * the caller.
1c79356b 3117 */
39236c6e 3118 timenow = net_uptime();
1c79356b 3119
b0d623f7
A
3120 rt = nd6_lookup(from, 0, ifp, 0);
3121 if (rt == NULL) {
b0d623f7
A
3122 if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL)
3123 return;
3124 RT_LOCK_ASSERT_HELD(rt);
1c79356b 3125 is_newentry = 1;
9bccf70c 3126 } else {
b0d623f7 3127 RT_LOCK_ASSERT_HELD(rt);
9bccf70c 3128 /* do nothing if static ndp is set */
91447636 3129 if (rt->rt_flags & RTF_STATIC) {
b0d623f7
A
3130 RT_REMREF_LOCKED(rt);
3131 RT_UNLOCK(rt);
3132 return;
91447636 3133 }
1c79356b 3134 is_newentry = 0;
9bccf70c 3135 }
1c79356b 3136
6d2010ae
A
3137 if (rt == NULL)
3138 return;
1c79356b
A
3139 if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) {
3140fail:
b0d623f7
A
3141 RT_UNLOCK(rt);
3142 nd6_free(rt);
3143 rtfree(rt);
3144 return;
1c79356b 3145 }
6d2010ae
A
3146 ln = (struct llinfo_nd6 *)rt->rt_llinfo;
3147 if (ln == NULL)
1c79356b 3148 goto fail;
6d2010ae 3149 if (rt->rt_gateway == NULL)
1c79356b
A
3150 goto fail;
3151 if (rt->rt_gateway->sa_family != AF_LINK)
3152 goto fail;
3153 sdl = SDL(rt->rt_gateway);
3154
3155 olladdr = (sdl->sdl_alen) ? 1 : 0;
3156 if (olladdr && lladdr) {
3157 if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen))
3158 llchange = 1;
3159 else
3160 llchange = 0;
3161 } else
3162 llchange = 0;
3163
3164 /*
3165 * newentry olladdr lladdr llchange (*=record)
3166 * 0 n n -- (1)
3167 * 0 y n -- (2)
3168 * 0 n y -- (3) * STALE
3169 * 0 y y n (4) *
3170 * 0 y y y (5) * STALE
3171 * 1 -- n -- (6) NOSTATE(= PASSIVE)
3172 * 1 -- y -- (7) * STALE
3173 */
3174
55e303ae 3175 if (lladdr) { /* (3-5) and (7) */
1c79356b
A
3176 /*
3177 * Record source link-layer address
3178 * XXX is it dependent to ifp->if_type?
3179 */
3180 sdl->sdl_alen = ifp->if_addrlen;
3181 bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen);
6d2010ae
A
3182
3183 /* cache the gateway (sender HW) address */
3184 nd6_llreach_alloc(rt, ifp, LLADDR(sdl), sdl->sdl_alen, FALSE);
1c79356b
A
3185 }
3186
3187 if (!is_newentry) {
6d2010ae
A
3188 if ((!olladdr && lladdr != NULL) || /* (3) */
3189 (olladdr && lladdr != NULL && llchange)) { /* (5) */
1c79356b
A
3190 do_update = 1;
3191 newstate = ND6_LLINFO_STALE;
55e303ae 3192 } else /* (1-2,4) */
1c79356b
A
3193 do_update = 0;
3194 } else {
3195 do_update = 1;
6d2010ae 3196 if (lladdr == NULL) /* (6) */
1c79356b 3197 newstate = ND6_LLINFO_NOSTATE;
55e303ae 3198 else /* (7) */
1c79356b
A
3199 newstate = ND6_LLINFO_STALE;
3200 }
3201
39037602
A
3202 /*
3203 * For interface's that do not perform NUD
3204 * neighbor cache entres must always be marked
3205 * reachable with no expiry
3206 */
3207 ndi = ND_IFINFO(ifp);
3208 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
3209
3210 if (ndi && !(ndi->flags & ND6_IFF_PERFORMNUD)) {
3211 newstate = ND6_LLINFO_REACHABLE;
3212 ln_setexpire(ln, 0);
3213 }
3214
1c79356b
A
3215 if (do_update) {
3216 /*
3217 * Update the state of the neighbor cache.
3218 */
39037602 3219 ND6_CACHE_STATE_TRANSITION(ln, newstate);
1c79356b 3220
39037602
A
3221 if ((ln->ln_state == ND6_LLINFO_STALE) ||
3222 (ln->ln_state == ND6_LLINFO_REACHABLE)) {
b0d623f7 3223 struct mbuf *m = ln->ln_hold;
9bccf70c
A
3224 /*
3225 * XXX: since nd6_output() below will cause
3226 * state tansition to DELAY and reset the timer,
3227 * we must set the timer now, although it is actually
3228 * meaningless.
3229 */
39037602
A
3230 if (ln->ln_state == ND6_LLINFO_STALE)
3231 ln_setexpire(ln, timenow + nd6_gctimer);
9bccf70c 3232
39037602 3233 ln->ln_hold = NULL;
b0d623f7 3234 if (m != NULL) {
6d2010ae
A
3235 struct sockaddr_in6 sin6;
3236
3237 rtkey_to_sa6(rt, &sin6);
9bccf70c
A
3238 /*
3239 * we assume ifp is not a p2p here, so just
3240 * set the 2nd argument as the 1st one.
3241 */
b0d623f7 3242 RT_UNLOCK(rt);
3e170ce0 3243 nd6_output_list(ifp, ifp, m, &sin6, rt, NULL);
b0d623f7 3244 RT_LOCK(rt);
1c79356b
A
3245 }
3246 } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) {
3247 /* probe right away */
39236c6e
A
3248 ln_setexpire(ln, timenow);
3249 sched_timeout = TRUE;
1c79356b
A
3250 }
3251 }
3252
3253 /*
3254 * ICMP6 type dependent behavior.
3255 *
3256 * NS: clear IsRouter if new entry
3257 * RS: clear IsRouter
3258 * RA: set IsRouter if there's lladdr
3259 * redir: clear IsRouter if new entry
3260 *
3261 * RA case, (1):
3262 * The spec says that we must set IsRouter in the following cases:
3263 * - If lladdr exist, set IsRouter. This means (1-5).
3264 * - If it is old entry (!newentry), set IsRouter. This means (7).
3265 * So, based on the spec, in (1-5) and (7) cases we must set IsRouter.
3266 * A quetion arises for (1) case. (1) case has no lladdr in the
3267 * neighbor cache, this is similar to (6).
3268 * This case is rare but we figured that we MUST NOT set IsRouter.
3269 *
39236c6e
A
3270 * newentry olladdr lladdr llchange NS RS RA redir
3271 * D R
3272 * 0 n n -- (1) c ? s
3273 * 0 y n -- (2) c s s
3274 * 0 n y -- (3) c s s
3275 * 0 y y n (4) c s s
3276 * 0 y y y (5) c s s
3277 * 1 -- n -- (6) c c c s
3278 * 1 -- y -- (7) c c s c s
1c79356b
A
3279 *
3280 * (c=clear s=set)
3281 */
3282 switch (type & 0xff) {
3283 case ND_NEIGHBOR_SOLICIT:
3284 /*
3285 * New entry must have is_router flag cleared.
3286 */
55e303ae 3287 if (is_newentry) /* (6-7) */
1c79356b
A
3288 ln->ln_router = 0;
3289 break;
3290 case ND_REDIRECT:
3291 /*
39236c6e
A
3292 * If the ICMP message is a Redirect to a better router, always
3293 * set the is_router flag. Otherwise, if the entry is newly
3294 * created, then clear the flag. [RFC 4861, sec 8.3]
1c79356b
A
3295 */
3296 if (code == ND_REDIRECT_ROUTER)
3297 ln->ln_router = 1;
55e303ae 3298 else if (is_newentry) /* (6-7) */
1c79356b
A
3299 ln->ln_router = 0;
3300 break;
3301 case ND_ROUTER_SOLICIT:
3302 /*
3303 * is_router flag must always be cleared.
3304 */
3305 ln->ln_router = 0;
3306 break;
3307 case ND_ROUTER_ADVERT:
3308 /*
3309 * Mark an entry with lladdr as a router.
3310 */
6d2010ae
A
3311 if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */
3312 (is_newentry && lladdr)) { /* (7) */
1c79356b
A
3313 ln->ln_router = 1;
3314 }
3315 break;
3316 }
3317
9bccf70c
A
3318 /*
3319 * When the link-layer address of a router changes, select the
3320 * best router again. In particular, when the neighbor entry is newly
3321 * created, it might affect the selection policy.
3322 * Question: can we restrict the first condition to the "is_newentry"
3323 * case?
316670eb
A
3324 *
3325 * Note: Perform default router selection even when we are a router,
3326 * if Scoped Routing is enabled.
9bccf70c 3327 */
39037602 3328 if (do_update && ln->ln_router) {
b0d623f7
A
3329 RT_REMREF_LOCKED(rt);
3330 RT_UNLOCK(rt);
91447636 3331 lck_mtx_lock(nd6_mutex);
6d2010ae 3332 defrouter_select(ifp);
91447636 3333 lck_mtx_unlock(nd6_mutex);
b0d623f7
A
3334 } else {
3335 RT_REMREF_LOCKED(rt);
3336 RT_UNLOCK(rt);
91447636 3337 }
39236c6e
A
3338 if (sched_timeout) {
3339 lck_mtx_lock(rnh_lock);
3340 nd6_sched_timeout(NULL, NULL);
3341 lck_mtx_unlock(rnh_lock);
3342 }
1c79356b
A
3343}
3344
3345static void
39236c6e 3346nd6_slowtimo(void *arg)
1c79356b 3347{
39236c6e 3348#pragma unused(arg)
3e170ce0
A
3349 struct nd_ifinfo *nd6if = NULL;
3350 struct ifnet *ifp = NULL;
3351
3352 ifnet_head_lock_shared();
3353 for (ifp = ifnet_head.tqh_first; ifp;
3354 ifp = ifp->if_link.tqe_next) {
3355 nd6if = ND_IFINFO(ifp);
3356 if ((NULL == nd6if) || (FALSE == nd6if->initialized)) {
3357 continue;
3358 }
1c79356b 3359
316670eb 3360 lck_mtx_lock(&nd6if->lock);
1c79356b
A
3361 if (nd6if->basereachable && /* already initialized */
3362 (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) {
3363 /*
3364 * Since reachable time rarely changes by router
3365 * advertisements, we SHOULD insure that a new random
3366 * value gets recomputed at least once every few hours.
39236c6e 3367 * (RFC 4861, 6.3.4)
1c79356b
A
3368 */
3369 nd6if->recalctm = nd6_recalc_reachtm_interval;
39236c6e
A
3370 nd6if->reachable =
3371 ND_COMPUTE_RTIME(nd6if->basereachable);
1c79356b 3372 }
316670eb 3373 lck_mtx_unlock(&nd6if->lock);
1c79356b 3374 }
3e170ce0 3375 ifnet_head_done();
39236c6e 3376 timeout(nd6_slowtimo, NULL, ND6_SLOWTIMER_INTERVAL * hz);
9bccf70c 3377}
1c79356b 3378
1c79356b 3379int
b0d623f7 3380nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
316670eb 3381 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
1c79356b 3382{
3e170ce0
A
3383 return nd6_output_list(ifp, origifp, m0, dst, hint0, adv);
3384}
3385
3386/*
3387 * nd6_output_list()
3388 *
3389 * Assumption: route determination for first packet can be correctly applied to
3390 * all packets in the chain.
3391 */
3392#define senderr(e) { error = (e); goto bad; }
3393int
3394nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0,
3395 struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv)
3396{
b0d623f7 3397 struct rtentry *rt = hint0, *hint = hint0;
1c79356b
A
3398 struct llinfo_nd6 *ln = NULL;
3399 int error = 0;
39236c6e 3400 uint64_t timenow;
b0d623f7 3401 struct rtentry *rtrele = NULL;
3e170ce0 3402 struct nd_ifinfo *ndi = NULL;
1c79356b 3403
b0d623f7
A
3404 if (rt != NULL) {
3405 RT_LOCK_SPIN(rt);
3406 RT_ADDREF_LOCKED(rt);
3407 }
1c79356b 3408
b0d623f7
A
3409 if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr) || !nd6_need_cache(ifp)) {
3410 if (rt != NULL)
3411 RT_UNLOCK(rt);
1c79356b 3412 goto sendpkt;
b0d623f7 3413 }
1c79356b
A
3414
3415 /*
b0d623f7
A
3416 * Next hop determination. Because we may involve the gateway route
3417 * in addition to the original route, locking is rather complicated.
3418 * The general concept is that regardless of whether the route points
3419 * to the original route or to the gateway route, this routine takes
3420 * an extra reference on such a route. This extra reference will be
3421 * released at the end.
3422 *
3423 * Care must be taken to ensure that the "hint0" route never gets freed
3424 * via rtfree(), since the caller may have stored it inside a struct
3425 * route with a reference held for that placeholder.
3426 *
3427 * This logic is similar to, though not exactly the same as the one
316670eb 3428 * used by route_to_gwroute().
1c79356b 3429 */
b0d623f7
A
3430 if (rt != NULL) {
3431 /*
3432 * We have a reference to "rt" by now (or below via rtalloc1),
3433 * which will either be released or freed at the end of this
3434 * routine.
3435 */
3436 RT_LOCK_ASSERT_HELD(rt);
3437 if (!(rt->rt_flags & RTF_UP)) {
3438 RT_REMREF_LOCKED(rt);
3439 RT_UNLOCK(rt);
39236c6e
A
3440 if ((hint = rt = rtalloc1_scoped(SA(dst), 1, 0,
3441 ifp->if_index)) != NULL) {
b0d623f7 3442 RT_LOCK_SPIN(rt);
9bccf70c
A
3443 if (rt->rt_ifp != ifp) {
3444 /* XXX: loop care? */
b0d623f7 3445 RT_UNLOCK(rt);
3e170ce0 3446 error = nd6_output_list(ifp, origifp, m0,
316670eb 3447 dst, rt, adv);
b0d623f7
A
3448 rtfree(rt);
3449 return (error);
9bccf70c 3450 }
91447636 3451 } else {
1c79356b 3452 senderr(EHOSTUNREACH);
91447636 3453 }
1c79356b 3454 }
9bccf70c 3455
1c79356b 3456 if (rt->rt_flags & RTF_GATEWAY) {
b0d623f7
A
3457 struct rtentry *gwrt;
3458 struct in6_ifaddr *ia6 = NULL;
3459 struct sockaddr_in6 gw6;
3460
6d2010ae 3461 rtgw_to_sa6(rt, &gw6);
b0d623f7
A
3462 /*
3463 * Must drop rt_lock since nd6_is_addr_neighbor()
3464 * calls nd6_lookup() and acquires rnh_lock.
3465 */
3466 RT_UNLOCK(rt);
9bccf70c
A
3467
3468 /*
3469 * We skip link-layer address resolution and NUD
3470 * if the gateway is not a neighbor from ND point
55e303ae
A
3471 * of view, regardless of the value of nd_ifinfo.flags.
3472 * The second condition is a bit tricky; we skip
9bccf70c
A
3473 * if the gateway is our own address, which is
3474 * sometimes used to install a route to a p2p link.
3475 */
b0d623f7
A
3476 if (!nd6_is_addr_neighbor(&gw6, ifp, 0) ||
3477 (ia6 = in6ifa_ifpwithaddr(ifp, &gw6.sin6_addr))) {
9bccf70c
A
3478 /*
3479 * We allow this kind of tricky route only
3480 * when the outgoing interface is p2p.
3481 * XXX: we may need a more generic rule here.
3482 */
b0d623f7 3483 if (ia6 != NULL)
6d2010ae 3484 IFA_REMREF(&ia6->ia_ifa);
9bccf70c
A
3485 if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
3486 senderr(EHOSTUNREACH);
9bccf70c
A
3487 goto sendpkt;
3488 }
3489
b0d623f7 3490 RT_LOCK_SPIN(rt);
39236c6e 3491 gw6 = *(SIN6(rt->rt_gateway));
b0d623f7
A
3492
3493 /* If hint is now down, give up */
3494 if (!(rt->rt_flags & RTF_UP)) {
3495 RT_UNLOCK(rt);
3496 senderr(EHOSTUNREACH);
3497 }
3498
3499 /* If there's no gateway route, look it up */
3500 if ((gwrt = rt->rt_gwroute) == NULL) {
3501 RT_UNLOCK(rt);
1c79356b 3502 goto lookup;
b0d623f7
A
3503 }
3504 /* Become a regular mutex */
3505 RT_CONVERT_LOCK(rt);
3506
3507 /*
3508 * Take gwrt's lock while holding route's lock;
3509 * this is okay since gwrt never points back
3510 * to rt, so no lock ordering issues.
3511 */
3512 RT_LOCK_SPIN(gwrt);
3513 if (!(gwrt->rt_flags & RTF_UP)) {
b0d623f7
A
3514 rt->rt_gwroute = NULL;
3515 RT_UNLOCK(gwrt);
3516 RT_UNLOCK(rt);
3517 rtfree(gwrt);
3518lookup:
316670eb 3519 lck_mtx_lock(rnh_lock);
39236c6e 3520 gwrt = rtalloc1_scoped_locked(SA(&gw6), 1, 0,
316670eb 3521 ifp->if_index);
b0d623f7
A
3522
3523 RT_LOCK(rt);
3524 /*
3525 * Bail out if the route is down, no route
3526 * to gateway, circular route, or if the
3527 * gateway portion of "rt" has changed.
3528 */
3529 if (!(rt->rt_flags & RTF_UP) ||
3530 gwrt == NULL || gwrt == rt ||
3531 !equal(SA(&gw6), rt->rt_gateway)) {
3532 if (gwrt == rt) {
3533 RT_REMREF_LOCKED(gwrt);
3534 gwrt = NULL;
3535 }
3536 RT_UNLOCK(rt);
3537 if (gwrt != NULL)
316670eb
A
3538 rtfree_locked(gwrt);
3539 lck_mtx_unlock(rnh_lock);
b0d623f7
A
3540 senderr(EHOSTUNREACH);
3541 }
316670eb
A
3542 VERIFY(gwrt != NULL);
3543 /*
3544 * Set gateway route; callee adds ref to gwrt;
3545 * gwrt has an extra ref from rtalloc1() for
3546 * this routine.
3547 */
3548 rt_set_gwroute(rt, rt_key(rt), gwrt);
b0d623f7 3549 RT_UNLOCK(rt);
316670eb 3550 lck_mtx_unlock(rnh_lock);
b0d623f7
A
3551 /* Remember to release/free "rt" at the end */
3552 rtrele = rt;
3553 rt = gwrt;
b0d623f7
A
3554 } else {
3555 RT_ADDREF_LOCKED(gwrt);
3556 RT_UNLOCK(gwrt);
3557 RT_UNLOCK(rt);
b0d623f7
A
3558 /* Remember to release/free "rt" at the end */
3559 rtrele = rt;
3560 rt = gwrt;
1c79356b 3561 }
316670eb
A
3562 VERIFY(rt == gwrt);
3563
3564 /*
3565 * This is an opportunity to revalidate the parent
3566 * route's gwroute, in case it now points to a dead
3567 * route entry. Parent route won't go away since the
3568 * clone (hint) holds a reference to it. rt == gwrt.
3569 */
3570 RT_LOCK_SPIN(hint);
3571 if ((hint->rt_flags & (RTF_WASCLONED | RTF_UP)) ==
3572 (RTF_WASCLONED | RTF_UP)) {
3573 struct rtentry *prt = hint->rt_parent;
3574 VERIFY(prt != NULL);
3575
3576 RT_CONVERT_LOCK(hint);
3577 RT_ADDREF(prt);
3578 RT_UNLOCK(hint);
3579 rt_revalidate_gwroute(prt, rt);
3580 RT_REMREF(prt);
3581 } else {
3582 RT_UNLOCK(hint);
3583 }
3584
3585 RT_LOCK_SPIN(rt);
3586 /* rt == gwrt; if it is now down, give up */
3587 if (!(rt->rt_flags & RTF_UP)) {
3588 RT_UNLOCK(rt);
3589 rtfree(rt);
3590 rt = NULL;
3591 /* "rtrele" == original "rt" */
3592 senderr(EHOSTUNREACH);
3593 }
1c79356b 3594 }
316670eb 3595
b0d623f7
A
3596 /* Become a regular mutex */
3597 RT_CONVERT_LOCK(rt);
1c79356b
A
3598 }
3599
3600 /*
3601 * Address resolution or Neighbor Unreachability Detection
3602 * for the next hop.
3603 * At this point, the destination of the packet must be a unicast
3604 * or an anycast address(i.e. not a multicast).
3605 */
3606
3607 /* Look up the neighbor cache for the nexthop */
b0d623f7
A
3608 if (rt && (rt->rt_flags & RTF_LLINFO) != 0) {
3609 ln = rt->rt_llinfo;
3610 } else {
6d2010ae
A
3611 struct sockaddr_in6 sin6;
3612 /*
3613 * Clear out Scope ID field in case it is set.
3614 */
3615 sin6 = *dst;
3616 sin6.sin6_scope_id = 0;
9bccf70c
A
3617 /*
3618 * Since nd6_is_addr_neighbor() internally calls nd6_lookup(),
55e303ae 3619 * the condition below is not very efficient. But we believe
9bccf70c 3620 * it is tolerable, because this should be a rare case.
b0d623f7
A
3621 * Must drop rt_lock since nd6_is_addr_neighbor() calls
3622 * nd6_lookup() and acquires rnh_lock.
9bccf70c 3623 */
b0d623f7
A
3624 if (rt != NULL)
3625 RT_UNLOCK(rt);
6d2010ae 3626 if (nd6_is_addr_neighbor(&sin6, ifp, 0)) {
b0d623f7
A
3627 /* "rtrele" may have been used, so clean up "rt" now */
3628 if (rt != NULL) {
3629 /* Don't free "hint0" */
3630 if (rt == hint0)
3631 RT_REMREF(rt);
3632 else
3633 rtfree(rt);
3634 }
3635 /* Callee returns a locked route upon success */
3636 rt = nd6_lookup(&dst->sin6_addr, 1, ifp, 0);
3637 if (rt != NULL) {
3638 RT_LOCK_ASSERT_HELD(rt);
3639 ln = rt->rt_llinfo;
3640 }
3641 } else if (rt != NULL) {
3642 RT_LOCK(rt);
3643 }
1c79356b 3644 }
b0d623f7 3645
1c79356b 3646 if (!ln || !rt) {
3e170ce0 3647 if (rt != NULL) {
b0d623f7 3648 RT_UNLOCK(rt);
3e170ce0 3649 }
316670eb
A
3650 ndi = ND_IFINFO(ifp);
3651 VERIFY(ndi != NULL && ndi->initialized);
3652 lck_mtx_lock(&ndi->lock);
9bccf70c 3653 if ((ifp->if_flags & IFF_POINTOPOINT) == 0 &&
316670eb
A
3654 !(ndi->flags & ND6_IFF_PERFORMNUD)) {
3655 lck_mtx_unlock(&ndi->lock);
9bccf70c
A
3656 log(LOG_DEBUG,
3657 "nd6_output: can't allocate llinfo for %s "
39236c6e
A
3658 "(ln=0x%llx, rt=0x%llx)\n",
3659 ip6_sprintf(&dst->sin6_addr),
3660 (uint64_t)VM_KERNEL_ADDRPERM(ln),
3661 (uint64_t)VM_KERNEL_ADDRPERM(rt));
9bccf70c
A
3662 senderr(EIO); /* XXX: good error? */
3663 }
316670eb 3664 lck_mtx_unlock(&ndi->lock);
9bccf70c
A
3665
3666 goto sendpkt; /* send anyway */
1c79356b
A
3667 }
3668
39236c6e
A
3669 net_update_uptime();
3670 timenow = net_uptime();
91447636 3671
1c79356b
A
3672 /* We don't have to do link-layer address resolution on a p2p link. */
3673 if ((ifp->if_flags & IFF_POINTOPOINT) != 0 &&
9bccf70c 3674 ln->ln_state < ND6_LLINFO_REACHABLE) {
39037602 3675 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE);
39236c6e 3676 ln_setexpire(ln, timenow + nd6_gctimer);
9bccf70c 3677 }
1c79356b
A
3678
3679 /*
3680 * The first time we send a packet to a neighbor whose entry is
3681 * STALE, we have to change the state to DELAY and a sets a timer to
3682 * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do
3683 * neighbor unreachability detection on expiration.
39236c6e 3684 * (RFC 4861 7.3.3)
1c79356b
A
3685 */
3686 if (ln->ln_state == ND6_LLINFO_STALE) {
3687 ln->ln_asked = 0;
39037602 3688 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_DELAY);
39236c6e
A
3689 ln_setexpire(ln, timenow + nd6_delay);
3690 /* N.B.: we will re-arm the timer below. */
3691 _CASSERT(ND6_LLINFO_DELAY > ND6_LLINFO_INCOMPLETE);
1c79356b
A
3692 }
3693
3694 /*
3695 * If the neighbor cache entry has a state other than INCOMPLETE
55e303ae 3696 * (i.e. its link-layer address is already resolved), just
1c79356b
A
3697 * send the packet.
3698 */
b0d623f7
A
3699 if (ln->ln_state > ND6_LLINFO_INCOMPLETE) {
3700 RT_UNLOCK(rt);
3701 /*
3702 * Move this entry to the head of the queue so that it is
3703 * less likely for this entry to be a target of forced
39236c6e
A
3704 * garbage collection (see nd6_rtrequest()). Do this only
3705 * if the entry is non-permanent (as permanent ones will
3706 * never be purged), and if the number of active entries
3707 * is at least half of the threshold.
b0d623f7 3708 */
39236c6e
A
3709 if (ln->ln_state == ND6_LLINFO_DELAY ||
3710 (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
3711 nd6_inuse >= (ip6_neighborgcthresh >> 1))) {
3712 lck_mtx_lock(rnh_lock);
3713 if (ln->ln_state == ND6_LLINFO_DELAY)
3714 nd6_sched_timeout(NULL, NULL);
3715 if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
3716 nd6_inuse >= (ip6_neighborgcthresh >> 1)) {
3717 RT_LOCK_SPIN(rt);
3718 if (ln->ln_flags & ND6_LNF_IN_USE) {
3719 LN_DEQUEUE(ln);
3720 LN_INSERTHEAD(ln);
3721 }
3722 RT_UNLOCK(rt);
3723 }
3724 lck_mtx_unlock(rnh_lock);
b0d623f7 3725 }
1c79356b 3726 goto sendpkt;
b0d623f7 3727 }
1c79356b 3728
39236c6e
A
3729 /*
3730 * If this is a prefix proxy route, record the inbound interface
3731 * so that it can be excluded from the list of interfaces eligible
3732 * for forwarding the proxied NS in nd6_prproxy_ns_output().
3733 */
3734 if (rt->rt_flags & RTF_PROXY)
3735 ln->ln_exclifp = ((origifp == ifp) ? NULL : origifp);
3736
1c79356b
A
3737 /*
3738 * There is a neighbor cache entry, but no ethernet address
55e303ae 3739 * response yet. Replace the held mbuf (if any) with this
1c79356b
A
3740 * latest one.
3741 *
55e303ae 3742 * This code conforms to the rate-limiting rule described in Section
39236c6e 3743 * 7.2.2 of RFC 4861, because the timer is set correctly after sending
55e303ae 3744 * an NS below.
1c79356b 3745 */
9bccf70c 3746 if (ln->ln_state == ND6_LLINFO_NOSTATE)
39037602 3747 ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE);
1c79356b 3748 if (ln->ln_hold)
3e170ce0
A
3749 m_freem_list(ln->ln_hold);
3750 ln->ln_hold = m0;
39236c6e
A
3751 if (ln->ln_expire != 0 && ln->ln_asked < nd6_mmaxtries &&
3752 ln->ln_expire <= timenow) {
b0d623f7 3753 ln->ln_asked++;
316670eb
A
3754 ndi = ND_IFINFO(ifp);
3755 VERIFY(ndi != NULL && ndi->initialized);
3756 lck_mtx_lock(&ndi->lock);
39236c6e 3757 ln_setexpire(ln, timenow + ndi->retrans / 1000);
316670eb 3758 lck_mtx_unlock(&ndi->lock);
b0d623f7
A
3759 RT_UNLOCK(rt);
3760 /* We still have a reference on rt (for ln) */
316670eb 3761 if (ip6_forwarding)
39236c6e
A
3762 nd6_prproxy_ns_output(ifp, origifp, NULL,
3763 &dst->sin6_addr, ln);
316670eb 3764 else
39037602 3765 nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, NULL);
39236c6e
A
3766 lck_mtx_lock(rnh_lock);
3767 nd6_sched_timeout(NULL, NULL);
3768 lck_mtx_unlock(rnh_lock);
b0d623f7 3769 } else {
3e170ce0 3770 if(ln->ln_state == ND6_LLINFO_INCOMPLETE) {
39037602 3771 ln_setexpire(ln, timenow);
3e170ce0 3772 }
b0d623f7 3773 RT_UNLOCK(rt);
1c79356b 3774 }
b0d623f7
A
3775 /*
3776 * Move this entry to the head of the queue so that it is
3777 * less likely for this entry to be a target of forced
39236c6e
A
3778 * garbage collection (see nd6_rtrequest()). Do this only
3779 * if the entry is non-permanent (as permanent ones will
3780 * never be purged), and if the number of active entries
3781 * is at least half of the threshold.
b0d623f7 3782 */
39236c6e
A
3783 if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 &&
3784 nd6_inuse >= (ip6_neighborgcthresh >> 1)) {
3785 lck_mtx_lock(rnh_lock);
3786 RT_LOCK_SPIN(rt);
3787 if (ln->ln_flags & ND6_LNF_IN_USE) {
3788 LN_DEQUEUE(ln);
3789 LN_INSERTHEAD(ln);
3790 }
3791 /* Clean up "rt" now while we can */
3792 if (rt == hint0) {
3793 RT_REMREF_LOCKED(rt);
3794 RT_UNLOCK(rt);
3795 } else {
3796 RT_UNLOCK(rt);
3797 rtfree_locked(rt);
3798 }
3799 rt = NULL; /* "rt" has been taken care of */
3800 lck_mtx_unlock(rnh_lock);
b0d623f7 3801 }
b0d623f7
A
3802 error = 0;
3803 goto release;
3804
3805sendpkt:
3806 if (rt != NULL)
3807 RT_LOCK_ASSERT_NOTHELD(rt);
9bccf70c 3808
6d2010ae 3809 /* discard the packet if IPv6 operation is disabled on the interface */
39236c6e 3810 if (ifp->if_eflags & IFEF_IPV6_DISABLED) {
6d2010ae
A
3811 error = ENETDOWN; /* better error? */
3812 goto bad;
3813 }
9bccf70c 3814
39236c6e 3815 if (ifp->if_flags & IFF_LOOPBACK) {
b0d623f7 3816 /* forwarding rules require the original scope_id */
3e170ce0
A
3817 m0->m_pkthdr.rcvif = origifp;
3818 error = dlil_output(origifp, PF_INET6, m0, (caddr_t)rt,
39236c6e 3819 SA(dst), 0, adv);
b0d623f7 3820 goto release;
e5568f75
A
3821 } else {
3822 /* Do not allow loopback address to wind up on a wire */
3e170ce0 3823 struct ip6_hdr *ip6 = mtod(m0, struct ip6_hdr *);
b0d623f7 3824
e5568f75 3825 if ((IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) ||
39236c6e 3826 IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst))) {
e5568f75 3827 ip6stat.ip6s_badscope++;
39236c6e 3828 error = EADDRNOTAVAIL;
e5568f75
A
3829 goto bad;
3830 }
9bccf70c
A
3831 }
3832
6d2010ae
A
3833 if (rt != NULL) {
3834 RT_LOCK_SPIN(rt);
3835 /* Mark use timestamp */
3836 if (rt->rt_llinfo != NULL)
3837 nd6_llreach_use(rt->rt_llinfo);
3838 RT_UNLOCK(rt);
3839 }
3840
3e170ce0
A
3841 struct mbuf *mcur = m0;
3842 uint32_t pktcnt = 0;
39236c6e 3843
3e170ce0
A
3844 while (mcur) {
3845 if (hint != NULL && nstat_collect) {
3846 int scnt;
39236c6e 3847
3e170ce0
A
3848 if ((mcur->m_pkthdr.csum_flags & CSUM_TSO_IPV6) &&
3849 (mcur->m_pkthdr.tso_segsz > 0))
3850 scnt = mcur->m_pkthdr.len / mcur->m_pkthdr.tso_segsz;
3851 else
3852 scnt = 1;
6d2010ae 3853
3e170ce0
A
3854 nstat_route_tx(hint, scnt, mcur->m_pkthdr.len, 0);
3855 }
3856 pktcnt++;
3857
3858 mcur->m_pkthdr.rcvif = NULL;
3859 mcur = mcur->m_nextpkt;
3860 }
3861 if (pktcnt > ip6_maxchainsent)
3862 ip6_maxchainsent = pktcnt;
3863 error = dlil_output(ifp, PF_INET6, m0, (caddr_t)rt, SA(dst), 0, adv);
b0d623f7
A
3864 goto release;
3865
3866bad:
3e170ce0
A
3867 if (m0 != NULL)
3868 m_freem_list(m0);
b0d623f7
A
3869
3870release:
3871 /* Clean up "rt" unless it's already been done */
3872 if (rt != NULL) {
3873 RT_LOCK_SPIN(rt);
3874 if (rt == hint0) {
3875 RT_REMREF_LOCKED(rt);
3876 RT_UNLOCK(rt);
3877 } else {
3878 RT_UNLOCK(rt);
3879 rtfree(rt);
3880 }
3881 }
3882 /* And now clean up "rtrele" if there is any */
3883 if (rtrele != NULL) {
3884 RT_LOCK_SPIN(rtrele);
3885 if (rtrele == hint0) {
3886 RT_REMREF_LOCKED(rtrele);
3887 RT_UNLOCK(rtrele);
3888 } else {
3889 RT_UNLOCK(rtrele);
3890 rtfree(rtrele);
3891 }
3892 }
1c79356b 3893 return (error);
b0d623f7 3894}
1c79356b
A
3895#undef senderr
3896
9bccf70c 3897int
39236c6e 3898nd6_need_cache(struct ifnet *ifp)
9bccf70c
A
3899{
3900 /*
3901 * XXX: we currently do not make neighbor cache on any interface
3902 * other than ARCnet, Ethernet, FDDI and GIF.
3903 *
3904 * RFC2893 says:
3905 * - unidirectional tunnels needs no ND
3906 */
3907 switch (ifp->if_type) {
3908 case IFT_ARCNET:
3909 case IFT_ETHER:
3910 case IFT_FDDI:
3911 case IFT_IEEE1394:
9bccf70c 3912 case IFT_L2VLAN:
91447636 3913 case IFT_IEEE8023ADLAG:
9bccf70c
A
3914#if IFT_IEEE80211
3915 case IFT_IEEE80211:
3916#endif
3917 case IFT_GIF: /* XXX need more cases? */
6d2010ae
A
3918 case IFT_PPP:
3919#if IFT_TUNNEL
3920 case IFT_TUNNEL:
3921#endif
3922 case IFT_BRIDGE:
3923 case IFT_CELLULAR:
39236c6e 3924 return (1);
9bccf70c 3925 default:
39236c6e 3926 return (0);
9bccf70c
A
3927 }
3928}
3929
1c79356b 3930int
39236c6e
A
3931nd6_storelladdr(struct ifnet *ifp, struct rtentry *rt, struct mbuf *m,
3932 struct sockaddr *dst, u_char *desten)
1c79356b 3933{
9bccf70c 3934 int i;
1c79356b
A
3935 struct sockaddr_dl *sdl;
3936
3937 if (m->m_flags & M_MCAST) {
3938 switch (ifp->if_type) {
3939 case IFT_ETHER:
9bccf70c 3940 case IFT_FDDI:
91447636
A
3941 case IFT_L2VLAN:
3942 case IFT_IEEE8023ADLAG:
9bccf70c
A
3943#if IFT_IEEE80211
3944 case IFT_IEEE80211:
3945#endif
b7266188 3946 case IFT_BRIDGE:
39236c6e
A
3947 ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, desten);
3948 return (1);
9bccf70c
A
3949 case IFT_IEEE1394:
3950 for (i = 0; i < ifp->if_addrlen; i++)
3951 desten[i] = ~0;
39236c6e 3952 return (1);
1c79356b
A
3953 case IFT_ARCNET:
3954 *desten = 0;
39236c6e 3955 return (1);
1c79356b 3956 default:
39236c6e 3957 return (0); /* caller will free mbuf */
1c79356b
A
3958 }
3959 }
3960
9bccf70c
A
3961 if (rt == NULL) {
3962 /* this could happen, if we could not allocate memory */
39236c6e 3963 return (0); /* caller will free mbuf */
9bccf70c 3964 }
b0d623f7 3965 RT_LOCK(rt);
9bccf70c 3966 if (rt->rt_gateway->sa_family != AF_LINK) {
1c79356b 3967 printf("nd6_storelladdr: something odd happens\n");
b0d623f7 3968 RT_UNLOCK(rt);
39236c6e 3969 return (0); /* caller will free mbuf */
1c79356b
A
3970 }
3971 sdl = SDL(rt->rt_gateway);
3972 if (sdl->sdl_alen == 0) {
3973 /* this should be impossible, but we bark here for debugging */
3974 printf("nd6_storelladdr: sdl_alen == 0\n");
b0d623f7 3975 RT_UNLOCK(rt);
39236c6e 3976 return (0); /* caller will free mbuf */
1c79356b
A
3977 }
3978
3979 bcopy(LLADDR(sdl), desten, sdl->sdl_alen);
b0d623f7 3980 RT_UNLOCK(rt);
39236c6e 3981 return (1);
1c79356b 3982}
91447636 3983
b0d623f7
A
3984/*
3985 * This is the ND pre-output routine; care must be taken to ensure that
3986 * the "hint" route never gets freed via rtfree(), since the caller may
3987 * have stored it inside a struct route with a reference held for that
3988 * placeholder.
3989 */
91447636 3990errno_t
b0d623f7
A
3991nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest,
3992 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
3993 mbuf_t packet)
91447636
A
3994{
3995 route_t route = hint;
3996 errno_t result = 0;
3997 struct sockaddr_dl *sdl = NULL;
3998 size_t copy_len;
b0d623f7 3999
39037602
A
4000 if (ifp == NULL || ip6_dest == NULL)
4001 return (EINVAL);
4002
91447636 4003 if (ip6_dest->sin6_family != AF_INET6)
b0d623f7
A
4004 return (EAFNOSUPPORT);
4005
91447636 4006 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
b0d623f7
A
4007 return (ENETDOWN);
4008
4009 if (hint != NULL) {
4010 /*
4011 * Callee holds a reference on the route and returns
4012 * with the route entry locked, upon success.
4013 */
316670eb
A
4014 result = route_to_gwroute((const struct sockaddr *)ip6_dest,
4015 hint, &route);
91447636 4016 if (result != 0)
b0d623f7
A
4017 return (result);
4018 if (route != NULL)
4019 RT_LOCK_ASSERT_HELD(route);
91447636 4020 }
b0d623f7 4021
39037602
A
4022 if ((packet != NULL && (packet->m_flags & M_MCAST) != 0) ||
4023 ((ifp->if_flags & IFF_MULTICAST) &&
4024 IN6_IS_ADDR_MULTICAST(&ip6_dest->sin6_addr))) {
b0d623f7
A
4025 if (route != NULL)
4026 RT_UNLOCK(route);
4027 result = dlil_resolve_multi(ifp,
39236c6e
A
4028 (const struct sockaddr *)ip6_dest,
4029 SA(ll_dest), ll_dest_len);
b0d623f7
A
4030 if (route != NULL)
4031 RT_LOCK(route);
4032 goto release;
39037602
A
4033 } else if (route == NULL) {
4034 /*
4035 * rdar://24596652
4036 * For unicast, lookup existing ND6 entries but
4037 * do not trigger a resolution
4038 */
4039 lck_mtx_lock(rnh_lock);
4040 route = rt_lookup(TRUE,
4041 __DECONST(struct sockaddr *, ip6_dest), NULL,
4042 rt_tables[AF_INET6], ifp->if_index);
4043 lck_mtx_unlock(rnh_lock);
4044
4045 if (route != NULL) {
4046 RT_LOCK(route);
4047 }
91447636 4048 }
b0d623f7 4049
91447636 4050 if (route == NULL) {
b0d623f7
A
4051 /*
4052 * This could happen, if we could not allocate memory or
316670eb 4053 * if route_to_gwroute() didn't return a route.
b0d623f7
A
4054 */
4055 result = ENOBUFS;
4056 goto release;
91447636 4057 }
b0d623f7 4058
91447636 4059 if (route->rt_gateway->sa_family != AF_LINK) {
39236c6e
A
4060 printf("%s: route %s on %s%d gateway address not AF_LINK\n",
4061 __func__, ip6_sprintf(&ip6_dest->sin6_addr),
4062 route->rt_ifp->if_name, route->rt_ifp->if_unit);
91447636 4063 result = EADDRNOTAVAIL;
b0d623f7 4064 goto release;
91447636 4065 }
b0d623f7 4066
91447636
A
4067 sdl = SDL(route->rt_gateway);
4068 if (sdl->sdl_alen == 0) {
4069 /* this should be impossible, but we bark here for debugging */
39236c6e
A
4070 printf("%s: route %s on %s%d sdl_alen == 0\n", __func__,
4071 ip6_sprintf(&ip6_dest->sin6_addr), route->rt_ifp->if_name,
4072 route->rt_ifp->if_unit);
91447636 4073 result = EHOSTUNREACH;
b0d623f7 4074 goto release;
91447636 4075 }
b0d623f7 4076
91447636
A
4077 copy_len = sdl->sdl_len <= ll_dest_len ? sdl->sdl_len : ll_dest_len;
4078 bcopy(sdl, ll_dest, copy_len);
4079
b0d623f7
A
4080release:
4081 if (route != NULL) {
4082 if (route == hint) {
4083 RT_REMREF_LOCKED(route);
4084 RT_UNLOCK(route);
4085 } else {
4086 RT_UNLOCK(route);
4087 rtfree(route);
4088 }
4089 }
4090 return (result);
91447636
A
4091}
4092
39037602
A
4093#if (DEVELOPMENT || DEBUG)
4094
4095static int sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS;
4096SYSCTL_PROC(_net_inet6_icmp6, OID_AUTO, nd6_lookup_ipv6,
4097 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
4098 sysctl_nd6_lookup_ipv6, "S", "");
4099
4100int
4101sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS
4102{
4103#pragma unused(oidp, arg1, arg2)
4104 int error = 0;
4105 struct nd6_lookup_ipv6_args nd6_lookup_ipv6_args;
4106 ifnet_t ifp = NULL;
4107
4108 /*
4109 * Only root can lookup MAC addresses
4110 */
4111 error = proc_suser(current_proc());
4112 if (error != 0) {
4113 printf("%s: proc_suser() error %d\n",
4114 __func__, error);
4115 goto done;
4116 }
4117 if (req->oldptr == USER_ADDR_NULL) {
4118 req->oldidx = sizeof(struct nd6_lookup_ipv6_args);
4119 }
4120 if (req->newptr == USER_ADDR_NULL) {
4121 goto done;
4122 }
4123 if (req->oldlen != sizeof(struct nd6_lookup_ipv6_args) ||
4124 req->newlen != sizeof(struct nd6_lookup_ipv6_args)) {
4125 error = EINVAL;
4126 printf("%s: bad req, error %d\n",
4127 __func__, error);
4128 goto done;
4129 }
4130 error = SYSCTL_IN(req, &nd6_lookup_ipv6_args,
4131 sizeof(struct nd6_lookup_ipv6_args));
4132 if (error != 0) {
4133 printf("%s: SYSCTL_IN() error %d\n",
4134 __func__, error);
4135 goto done;
4136 }
4137 /* Make sure to terminate the string */
4138 nd6_lookup_ipv6_args.ifname[IFNAMSIZ - 1] = 0;
4139
4140 error = ifnet_find_by_name(nd6_lookup_ipv6_args.ifname, &ifp);
4141 if (error != 0) {
4142 printf("%s: ifnet_find_by_name() error %d\n",
4143 __func__, error);
4144 goto done;
4145 }
4146
4147 error = nd6_lookup_ipv6(ifp, &nd6_lookup_ipv6_args.ip6_dest,
4148 &nd6_lookup_ipv6_args.ll_dest_._sdl,
4149 nd6_lookup_ipv6_args.ll_dest_len, NULL, NULL);
4150 if (error != 0) {
4151 printf("%s: nd6_lookup_ipv6() error %d\n",
4152 __func__, error);
4153 goto done;
4154 }
4155
4156 error = SYSCTL_OUT(req, &nd6_lookup_ipv6_args,
4157 sizeof(struct nd6_lookup_ipv6_args));
4158 if (error != 0) {
4159 printf("%s: SYSCTL_OUT() error %d\n",
4160 __func__, error);
4161 goto done;
4162 }
4163done:
4164 return (error);
4165}
4166
4167#endif /* (DEVELOPEMENT || DEBUG) */
4168
316670eb
A
4169int
4170nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after)
4171{
39236c6e
A
4172 uint32_t b, a;
4173 int err = 0;
4174
316670eb 4175 /*
39236c6e 4176 * Handle ND6_IFF_IFDISABLED
316670eb 4177 */
39236c6e
A
4178 if ((before & ND6_IFF_IFDISABLED) ||
4179 (after & ND6_IFF_IFDISABLED)) {
4180 b = (before & ND6_IFF_IFDISABLED);
4181 a = (after & ND6_IFF_IFDISABLED);
316670eb 4182
39236c6e
A
4183 if (b != a && (err = nd6_if_disable(ifp,
4184 ((int32_t)(a - b) > 0))) != 0)
4185 goto done;
4186 }
4187
4188 /*
4189 * Handle ND6_IFF_PROXY_PREFIXES
4190 */
4191 if ((before & ND6_IFF_PROXY_PREFIXES) ||
4192 (after & ND6_IFF_PROXY_PREFIXES)) {
4193 b = (before & ND6_IFF_PROXY_PREFIXES);
4194 a = (after & ND6_IFF_PROXY_PREFIXES);
316670eb 4195
39236c6e
A
4196 if (b != a && (err = nd6_if_prproxy(ifp,
4197 ((int32_t)(a - b) > 0))) != 0)
4198 goto done;
4199 }
4200done:
4201 return (err);
316670eb
A
4202}
4203
39236c6e
A
4204/*
4205 * Enable/disable IPv6 on an interface, called as part of
4206 * setting/clearing ND6_IFF_IFDISABLED, or during DAD failure.
4207 */
4208int
4209nd6_if_disable(struct ifnet *ifp, boolean_t enable)
4210{
4211 ifnet_lock_shared(ifp);
4212 if (enable)
4213 ifp->if_eflags |= IFEF_IPV6_DISABLED;
4214 else
4215 ifp->if_eflags &= ~IFEF_IPV6_DISABLED;
4216 ifnet_lock_done(ifp);
4217
4218 return (0);
4219}
9bccf70c
A
4220
4221static int
b0d623f7 4222nd6_sysctl_drlist SYSCTL_HANDLER_ARGS
9bccf70c 4223{
2d21ac55 4224#pragma unused(oidp, arg1, arg2)
39236c6e 4225 char pbuf[MAX_IPv6_STR_LEN];
9bccf70c 4226 struct nd_defrouter *dr;
39236c6e 4227 int error = 0;
9bccf70c 4228
39236c6e 4229 if (req->newptr != USER_ADDR_NULL)
b0d623f7 4230 return (EPERM);
9bccf70c 4231
39037602 4232 /* XXX Handle mapped defrouter entries */
91447636 4233 lck_mtx_lock(nd6_mutex);
39236c6e
A
4234 if (proc_is64bit(req->p)) {
4235 struct in6_defrouter_64 d;
4236
4237 bzero(&d, sizeof (d));
4238 d.rtaddr.sin6_family = AF_INET6;
4239 d.rtaddr.sin6_len = sizeof (d.rtaddr);
4240
4241 TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) {
4242 d.rtaddr.sin6_addr = dr->rtaddr;
4243 if (in6_recoverscope(&d.rtaddr,
4244 &dr->rtaddr, dr->ifp) != 0)
4245 log(LOG_ERR, "scope error in default router "
4246 "list (%s)\n", inet_ntop(AF_INET6,
4247 &dr->rtaddr, pbuf, sizeof (pbuf)));
4248 d.flags = dr->flags;
4249 d.stateflags = dr->stateflags;
39236c6e
A
4250 d.rtlifetime = dr->rtlifetime;
4251 d.expire = nddr_getexpire(dr);
4252 d.if_index = dr->ifp->if_index;
4253 error = SYSCTL_OUT(req, &d, sizeof (d));
4254 if (error != 0)
b0d623f7
A
4255 break;
4256 }
4257 } else {
39236c6e
A
4258 struct in6_defrouter_32 d;
4259
4260 bzero(&d, sizeof (d));
4261 d.rtaddr.sin6_family = AF_INET6;
4262 d.rtaddr.sin6_len = sizeof (d.rtaddr);
4263
4264 TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) {
4265 d.rtaddr.sin6_addr = dr->rtaddr;
4266 if (in6_recoverscope(&d.rtaddr,
4267 &dr->rtaddr, dr->ifp) != 0)
4268 log(LOG_ERR, "scope error in default router "
4269 "list (%s)\n", inet_ntop(AF_INET6,
4270 &dr->rtaddr, pbuf, sizeof (pbuf)));
4271 d.flags = dr->flags;
4272 d.stateflags = dr->stateflags;
39236c6e
A
4273 d.rtlifetime = dr->rtlifetime;
4274 d.expire = nddr_getexpire(dr);
4275 d.if_index = dr->ifp->if_index;
4276 error = SYSCTL_OUT(req, &d, sizeof (d));
4277 if (error != 0)
b0d623f7
A
4278 break;
4279 }
9bccf70c 4280 }
91447636 4281 lck_mtx_unlock(nd6_mutex);
b0d623f7 4282 return (error);
9bccf70c
A
4283}
4284
4285static int
b0d623f7 4286nd6_sysctl_prlist SYSCTL_HANDLER_ARGS
9bccf70c 4287{
2d21ac55 4288#pragma unused(oidp, arg1, arg2)
39236c6e
A
4289 char pbuf[MAX_IPv6_STR_LEN];
4290 struct nd_pfxrouter *pfr;
4291 struct sockaddr_in6 s6;
9bccf70c 4292 struct nd_prefix *pr;
39236c6e 4293 int error = 0;
9bccf70c 4294
39236c6e 4295 if (req->newptr != USER_ADDR_NULL)
b0d623f7 4296 return (EPERM);
9bccf70c 4297
39236c6e
A
4298 bzero(&s6, sizeof (s6));
4299 s6.sin6_family = AF_INET6;
4300 s6.sin6_len = sizeof (s6);
9bccf70c 4301
39037602 4302 /* XXX Handle mapped defrouter entries */
39236c6e
A
4303 lck_mtx_lock(nd6_mutex);
4304 if (proc_is64bit(req->p)) {
4305 struct in6_prefix_64 p;
9bccf70c 4306
39236c6e
A
4307 bzero(&p, sizeof (p));
4308 p.origin = PR_ORIG_RA;
9bccf70c 4309
39236c6e
A
4310 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
4311 NDPR_LOCK(pr);
4312 p.prefix = pr->ndpr_prefix;
4313 if (in6_recoverscope(&p.prefix,
4314 &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0)
4315 log(LOG_ERR, "scope error in "
4316 "prefix list (%s)\n", inet_ntop(AF_INET6,
4317 &p.prefix.sin6_addr, pbuf, sizeof (pbuf)));
4318 p.raflags = pr->ndpr_raf;
4319 p.prefixlen = pr->ndpr_plen;
4320 p.vltime = pr->ndpr_vltime;
4321 p.pltime = pr->ndpr_pltime;
4322 p.if_index = pr->ndpr_ifp->if_index;
4323 p.expire = ndpr_getexpire(pr);
4324 p.refcnt = pr->ndpr_addrcnt;
4325 p.flags = pr->ndpr_stateflags;
4326 p.advrtrs = 0;
4327 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry)
4328 p.advrtrs++;
4329 error = SYSCTL_OUT(req, &p, sizeof (p));
4330 if (error != 0) {
4331 NDPR_UNLOCK(pr);
4332 break;
4333 }
4334 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
4335 s6.sin6_addr = pfr->router->rtaddr;
4336 if (in6_recoverscope(&s6, &pfr->router->rtaddr,
4337 pfr->router->ifp) != 0)
b0d623f7
A
4338 log(LOG_ERR,
4339 "scope error in prefix list (%s)\n",
39236c6e
A
4340 inet_ntop(AF_INET6, &s6.sin6_addr,
4341 pbuf, sizeof (pbuf)));
4342 error = SYSCTL_OUT(req, &s6, sizeof (s6));
4343 if (error != 0)
4344 break;
9bccf70c 4345 }
39236c6e
A
4346 NDPR_UNLOCK(pr);
4347 if (error != 0)
b0d623f7
A
4348 break;
4349 }
4350 } else {
39236c6e 4351 struct in6_prefix_32 p;
9bccf70c 4352
39236c6e
A
4353 bzero(&p, sizeof (p));
4354 p.origin = PR_ORIG_RA;
b0d623f7 4355
39236c6e
A
4356 LIST_FOREACH(pr, &nd_prefix, ndpr_entry) {
4357 NDPR_LOCK(pr);
4358 p.prefix = pr->ndpr_prefix;
4359 if (in6_recoverscope(&p.prefix,
4360 &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0)
4361 log(LOG_ERR,
4362 "scope error in prefix list (%s)\n",
4363 inet_ntop(AF_INET6, &p.prefix.sin6_addr,
4364 pbuf, sizeof (pbuf)));
4365 p.raflags = pr->ndpr_raf;
4366 p.prefixlen = pr->ndpr_plen;
4367 p.vltime = pr->ndpr_vltime;
4368 p.pltime = pr->ndpr_pltime;
4369 p.if_index = pr->ndpr_ifp->if_index;
4370 p.expire = ndpr_getexpire(pr);
4371 p.refcnt = pr->ndpr_addrcnt;
4372 p.flags = pr->ndpr_stateflags;
4373 p.advrtrs = 0;
4374 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry)
4375 p.advrtrs++;
4376 error = SYSCTL_OUT(req, &p, sizeof (p));
4377 if (error != 0) {
6d2010ae 4378 NDPR_UNLOCK(pr);
39236c6e
A
4379 break;
4380 }
4381 LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) {
4382 s6.sin6_addr = pfr->router->rtaddr;
4383 if (in6_recoverscope(&s6, &pfr->router->rtaddr,
4384 pfr->router->ifp) != 0)
4385 log(LOG_ERR,
4386 "scope error in prefix list (%s)\n",
4387 inet_ntop(AF_INET6, &s6.sin6_addr,
4388 pbuf, sizeof (pbuf)));
4389 error = SYSCTL_OUT(req, &s6, sizeof (s6));
4390 if (error != 0)
4391 break;
b0d623f7 4392 }
39236c6e
A
4393 NDPR_UNLOCK(pr);
4394 if (error != 0)
b0d623f7
A
4395 break;
4396 }
9bccf70c 4397 }
91447636 4398 lck_mtx_unlock(nd6_mutex);
39236c6e 4399
b0d623f7 4400 return (error);
9bccf70c 4401}
39037602
A
4402
4403void
4404in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
4405{
4406 struct ifnet* ifp = ia->ia_ifp;
4407 uint32_t flags = IN6_IFF_TENTATIVE;
4408 uint32_t optdad = nd6_optimistic_dad;
4409 struct nd_ifinfo *ndi = NULL;
4410
4411 ndi = ND_IFINFO(ifp);
4412 VERIFY((NULL != ndi) && (TRUE == ndi->initialized));
4413 if (!(ndi->flags & ND6_IFF_DAD))
4414 return;
4415
4416 if (optdad) {
4417 if ((ifp->if_eflags & IFEF_IPV6_ROUTER) != 0) {
4418 optdad = 0;
4419 } else {
4420 lck_mtx_lock(&ndi->lock);
4421 if ((ndi->flags & ND6_IFF_REPLICATED) != 0) {
4422 optdad = 0;
4423 }
4424 lck_mtx_unlock(&ndi->lock);
4425 }
4426 }
4427
4428 if (optdad) {
4429 if ((optdad & ND6_OPTIMISTIC_DAD_LINKLOCAL) &&
4430 IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
4431 flags = IN6_IFF_OPTIMISTIC;
4432 else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) &&
4433 (ia->ia6_flags & IN6_IFF_AUTOCONF)) {
4434 if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
4435 if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
4436 flags = IN6_IFF_OPTIMISTIC;
4437 } else if (ia->ia6_flags & IN6_IFF_SECURED) {
4438 if (optdad & ND6_OPTIMISTIC_DAD_SECURED)
4439 flags = IN6_IFF_OPTIMISTIC;
4440 } else {
4441 /*
4442 * Keeping the behavior for temp and CGA
4443 * SLAAC addresses to have a knob for optimistic
4444 * DAD.
4445 * Other than that if ND6_OPTIMISTIC_DAD_AUTOCONF
4446 * is set, we should default to optimistic
4447 * DAD.
4448 * For now this means SLAAC addresses with interface
4449 * identifier derived from modified EUI-64 bit
4450 * identifiers.
4451 */
4452 flags = IN6_IFF_OPTIMISTIC;
4453 }
4454 } else if ((optdad & ND6_OPTIMISTIC_DAD_DYNAMIC) &&
4455 (ia->ia6_flags & IN6_IFF_DYNAMIC)) {
4456 if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
4457 if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
4458 flags = IN6_IFF_OPTIMISTIC;
4459 } else {
4460 flags = IN6_IFF_OPTIMISTIC;
4461 }
4462 } else if ((optdad & ND6_OPTIMISTIC_DAD_MANUAL) &&
4463 (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) {
4464 /*
4465 * rdar://17483438
4466 * Bypass tentative for address assignments
4467 * not covered above (e.g. manual) upon request
4468 */
4469 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr) &&
4470 !(ia->ia6_flags & IN6_IFF_AUTOCONF) &&
4471 !(ia->ia6_flags & IN6_IFF_DYNAMIC))
4472 flags = IN6_IFF_OPTIMISTIC;
4473 }
4474 }
4475
4476 ia->ia6_flags &= ~(IN6_IFF_DUPLICATED | IN6_IFF_DADPROGRESS);
4477 ia->ia6_flags |= flags;
4478
4479 nd6log2((LOG_DEBUG, "%s - %s ifp %s ia6_flags 0x%x\n",
4480 __func__,
4481 ip6_sprintf(&ia->ia_addr.sin6_addr),
4482 if_name(ia->ia_ifp),
4483 ia->ia6_flags));
4484}
4485