]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* $FreeBSD: src/sys/netinet6/nd6.c,v 1.20 2002/08/02 20:49:14 rwatson Exp $ */ | |
30 | /* $KAME: nd6.c,v 1.144 2001/05/24 07:44:00 itojun Exp $ */ | |
31 | ||
32 | /* | |
33 | * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. | |
34 | * All rights reserved. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * 1. Redistributions of source code must retain the above copyright | |
40 | * notice, this list of conditions and the following disclaimer. | |
41 | * 2. Redistributions in binary form must reproduce the above copyright | |
42 | * notice, this list of conditions and the following disclaimer in the | |
43 | * documentation and/or other materials provided with the distribution. | |
44 | * 3. Neither the name of the project nor the names of its contributors | |
45 | * may be used to endorse or promote products derived from this software | |
46 | * without specific prior written permission. | |
47 | * | |
48 | * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND | |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE | |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
58 | * SUCH DAMAGE. | |
59 | */ | |
60 | ||
61 | /* | |
62 | * XXX | |
63 | * KAME 970409 note: | |
64 | * BSD/OS version heavily modifies this code, related to llinfo. | |
65 | * Since we don't have BSD/OS version of net/route.c in our hand, | |
66 | * I left the code mostly as it was in 970310. -- itojun | |
67 | */ | |
68 | ||
69 | #include <sys/param.h> | |
70 | #include <sys/systm.h> | |
71 | #include <sys/malloc.h> | |
72 | #include <sys/mbuf.h> | |
73 | #include <sys/socket.h> | |
74 | #include <sys/sockio.h> | |
75 | #include <sys/time.h> | |
76 | #include <sys/kernel.h> | |
77 | #include <sys/sysctl.h> | |
78 | #include <sys/errno.h> | |
79 | #include <sys/syslog.h> | |
80 | #include <sys/protosw.h> | |
81 | #include <sys/proc.h> | |
82 | #include <sys/mcache.h> | |
83 | ||
84 | #include <kern/queue.h> | |
85 | #include <kern/zalloc.h> | |
86 | ||
87 | #define DONT_WARN_OBSOLETE | |
88 | #include <net/if.h> | |
89 | #include <net/if_dl.h> | |
90 | #include <net/if_types.h> | |
91 | #include <net/if_llreach.h> | |
92 | #include <net/route.h> | |
93 | #include <net/dlil.h> | |
94 | #include <net/ntstat.h> | |
95 | ||
96 | #include <netinet/in.h> | |
97 | #include <netinet/in_arp.h> | |
98 | #include <netinet/if_ether.h> | |
99 | #include <netinet6/in6_var.h> | |
100 | #include <netinet/ip6.h> | |
101 | #include <netinet6/ip6_var.h> | |
102 | #include <netinet6/nd6.h> | |
103 | #include <netinet6/scope6_var.h> | |
104 | #include <netinet/icmp6.h> | |
105 | ||
106 | #include "loop.h" | |
107 | ||
108 | #include <net/net_osdep.h> | |
109 | ||
110 | #define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */ | |
111 | #define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */ | |
112 | ||
113 | #define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) | |
114 | ||
115 | /* timer values */ | |
116 | int nd6_prune = 1; /* walk list every 1 seconds */ | |
117 | int nd6_delay = 5; /* delay first probe time 5 second */ | |
118 | int nd6_umaxtries = 3; /* maximum unicast query */ | |
119 | int nd6_mmaxtries = 3; /* maximum multicast query */ | |
120 | int nd6_useloopback = 1; /* use loopback interface for local traffic */ | |
121 | int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */ | |
122 | ||
123 | /* preventing too many loops in ND option parsing */ | |
124 | int nd6_maxndopt = 10; /* max # of ND options allowed */ | |
125 | ||
126 | int nd6_maxnudhint = 0; /* max # of subsequent upper layer hints */ | |
127 | int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */ | |
128 | ||
129 | #if ND6_DEBUG | |
130 | int nd6_debug = 1; | |
131 | #else | |
132 | int nd6_debug = 0; | |
133 | #endif | |
134 | ||
135 | int nd6_optimistic_dad = | |
136 | (ND6_OPTIMISTIC_DAD_LINKLOCAL|ND6_OPTIMISTIC_DAD_AUTOCONF| | |
137 | ND6_OPTIMISTIC_DAD_TEMPORARY|ND6_OPTIMISTIC_DAD_DYNAMIC); | |
138 | ||
139 | static int nd6_is_new_addr_neighbor (struct sockaddr_in6 *, struct ifnet *); | |
140 | ||
141 | /* for debugging? */ | |
142 | static int nd6_inuse, nd6_allocated; | |
143 | ||
144 | /* | |
145 | * Synchronization notes: | |
146 | * | |
147 | * The global list of ND entries are stored in llinfo_nd6; an entry | |
148 | * gets inserted into the list when the route is created and gets | |
149 | * removed from the list when it is deleted; this is done as part | |
150 | * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in nd6_rtrequest(). | |
151 | * | |
152 | * Because rnh_lock and rt_lock for the entry are held during those | |
153 | * operations, the same locks (and thus lock ordering) must be used | |
154 | * elsewhere to access the relevant data structure fields: | |
155 | * | |
156 | * ln_next, ln_prev, ln_rt | |
157 | * | |
158 | * - Routing lock (rnh_lock) | |
159 | * | |
160 | * ln_hold, ln_asked, ln_expire, ln_state, ln_router, ln_byhint, ln_flags, | |
161 | * ln_llreach, ln_lastused | |
162 | * | |
163 | * - Routing entry lock (rt_lock) | |
164 | * | |
165 | * Due to the dependency on rt_lock, llinfo_nd6 has the same lifetime | |
166 | * as the route entry itself. When a route is deleted (RTM_DELETE), | |
167 | * it is simply removed from the global list but the memory is not | |
168 | * freed until the route itself is freed. | |
169 | */ | |
170 | struct llinfo_nd6 llinfo_nd6 = { | |
171 | &llinfo_nd6, &llinfo_nd6, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL, 0 | |
172 | }; | |
173 | ||
174 | /* Protected by nd_if_rwlock */ | |
175 | size_t nd_ifinfo_indexlim = 32; /* increased for 5589193 */ | |
176 | struct nd_ifinfo *nd_ifinfo = NULL; | |
177 | ||
178 | static lck_grp_attr_t *nd_if_lock_grp_attr; | |
179 | static lck_grp_t *nd_if_lock_grp; | |
180 | static lck_attr_t *nd_if_lock_attr; | |
181 | decl_lck_rw_data(, nd_if_rwlock_data); | |
182 | lck_rw_t *nd_if_rwlock = &nd_if_rwlock_data; | |
183 | ||
184 | /* Protected by nd6_mutex */ | |
185 | struct nd_drhead nd_defrouter; | |
186 | struct nd_prhead nd_prefix = { 0 }; | |
187 | ||
188 | /* Serialization variables for nd6_drain() */ | |
189 | static boolean_t nd6_drain_busy; | |
190 | static void *nd6_drain_waitchan = &nd6_drain_busy; | |
191 | static int nd6_drain_waiters = 0; | |
192 | ||
193 | int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL; | |
194 | static struct sockaddr_in6 all1_sa; | |
195 | ||
196 | static int regen_tmpaddr(struct in6_ifaddr *); | |
197 | extern lck_mtx_t *nd6_mutex; | |
198 | ||
199 | static void nd6_slowtimo(void *ignored_arg); | |
200 | static struct llinfo_nd6 *nd6_llinfo_alloc(void); | |
201 | static void nd6_llinfo_free(void *); | |
202 | static void nd6_llinfo_purge(struct rtentry *); | |
203 | static void nd6_llinfo_get_ri(struct rtentry *, struct rt_reach_info *); | |
204 | static void nd6_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *); | |
205 | ||
206 | static int nd6_siocgdrlst(void *, int); | |
207 | static int nd6_siocgprlst(void *, int); | |
208 | ||
209 | /* | |
210 | * Insertion and removal from llinfo_nd6 must be done with rnh_lock held. | |
211 | */ | |
212 | #define LN_DEQUEUE(_ln) do { \ | |
213 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \ | |
214 | RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \ | |
215 | (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \ | |
216 | (_ln)->ln_prev->ln_next = (_ln)->ln_next; \ | |
217 | (_ln)->ln_prev = (_ln)->ln_next = NULL; \ | |
218 | (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \ | |
219 | } while (0) | |
220 | ||
221 | #define LN_INSERTHEAD(_ln) do { \ | |
222 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \ | |
223 | RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \ | |
224 | (_ln)->ln_next = llinfo_nd6.ln_next; \ | |
225 | llinfo_nd6.ln_next = (_ln); \ | |
226 | (_ln)->ln_prev = &llinfo_nd6; \ | |
227 | (_ln)->ln_next->ln_prev = (_ln); \ | |
228 | (_ln)->ln_flags |= ND6_LNF_IN_USE; \ | |
229 | } while (0) | |
230 | ||
231 | static struct zone *llinfo_nd6_zone; | |
232 | #define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */ | |
233 | #define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */ | |
234 | ||
235 | void | |
236 | nd6_init() | |
237 | { | |
238 | static int nd6_init_done = 0; | |
239 | int i; | |
240 | ||
241 | if (nd6_init_done) { | |
242 | log(LOG_NOTICE, "nd6_init called more than once (ignored)\n"); | |
243 | return; | |
244 | } | |
245 | ||
246 | all1_sa.sin6_family = AF_INET6; | |
247 | all1_sa.sin6_len = sizeof(struct sockaddr_in6); | |
248 | for (i = 0; i < sizeof(all1_sa.sin6_addr); i++) | |
249 | all1_sa.sin6_addr.s6_addr[i] = 0xff; | |
250 | ||
251 | /* initialization of the default router list */ | |
252 | TAILQ_INIT(&nd_defrouter); | |
253 | ||
254 | nd_if_lock_grp_attr = lck_grp_attr_alloc_init(); | |
255 | nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr); | |
256 | nd_if_lock_attr = lck_attr_alloc_init(); | |
257 | lck_rw_init(nd_if_rwlock, nd_if_lock_grp, nd_if_lock_attr); | |
258 | ||
259 | llinfo_nd6_zone = zinit(sizeof (struct llinfo_nd6), | |
260 | LLINFO_ND6_ZONE_MAX * sizeof (struct llinfo_nd6), 0, | |
261 | LLINFO_ND6_ZONE_NAME); | |
262 | if (llinfo_nd6_zone == NULL) | |
263 | panic("%s: failed allocating llinfo_nd6_zone", __func__); | |
264 | ||
265 | zone_change(llinfo_nd6_zone, Z_EXPAND, TRUE); | |
266 | zone_change(llinfo_nd6_zone, Z_CALLERACCT, FALSE); | |
267 | ||
268 | nd6_nbr_init(); | |
269 | nd6_rtr_init(); | |
270 | nd6_prproxy_init(); | |
271 | ||
272 | nd6_init_done = 1; | |
273 | ||
274 | /* start timer */ | |
275 | timeout(nd6_slowtimo, (caddr_t)0, ND6_SLOWTIMER_INTERVAL * hz); | |
276 | } | |
277 | ||
278 | static struct llinfo_nd6 * | |
279 | nd6_llinfo_alloc(void) | |
280 | { | |
281 | return (zalloc(llinfo_nd6_zone)); | |
282 | } | |
283 | ||
284 | static void | |
285 | nd6_llinfo_free(void *arg) | |
286 | { | |
287 | struct llinfo_nd6 *ln = arg; | |
288 | ||
289 | if (ln->ln_next != NULL || ln->ln_prev != NULL) { | |
290 | panic("%s: trying to free %p when it is in use", __func__, ln); | |
291 | /* NOTREACHED */ | |
292 | } | |
293 | ||
294 | /* Just in case there's anything there, free it */ | |
295 | if (ln->ln_hold != NULL) { | |
296 | m_freem(ln->ln_hold); | |
297 | ln->ln_hold = NULL; | |
298 | } | |
299 | ||
300 | /* Purge any link-layer info caching */ | |
301 | VERIFY(ln->ln_rt->rt_llinfo == ln); | |
302 | if (ln->ln_rt->rt_llinfo_purge != NULL) | |
303 | ln->ln_rt->rt_llinfo_purge(ln->ln_rt); | |
304 | ||
305 | zfree(llinfo_nd6_zone, ln); | |
306 | } | |
307 | ||
308 | static void | |
309 | nd6_llinfo_purge(struct rtentry *rt) | |
310 | { | |
311 | struct llinfo_nd6 *ln = rt->rt_llinfo; | |
312 | ||
313 | RT_LOCK_ASSERT_HELD(rt); | |
314 | VERIFY(rt->rt_llinfo_purge == nd6_llinfo_purge && ln != NULL); | |
315 | ||
316 | if (ln->ln_llreach != NULL) { | |
317 | RT_CONVERT_LOCK(rt); | |
318 | ifnet_llreach_free(ln->ln_llreach); | |
319 | ln->ln_llreach = NULL; | |
320 | } | |
321 | ln->ln_lastused = 0; | |
322 | } | |
323 | ||
324 | static void | |
325 | nd6_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri) | |
326 | { | |
327 | struct llinfo_nd6 *ln = rt->rt_llinfo; | |
328 | struct if_llreach *lr = ln->ln_llreach; | |
329 | ||
330 | if (lr == NULL) { | |
331 | bzero(ri, sizeof (*ri)); | |
332 | ri->ri_rssi = IFNET_RSSI_UNKNOWN; | |
333 | ri->ri_lqm = IFNET_LQM_THRESH_OFF; | |
334 | ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN; | |
335 | } else { | |
336 | IFLR_LOCK(lr); | |
337 | /* Export to rt_reach_info structure */ | |
338 | ifnet_lr2ri(lr, ri); | |
339 | /* Export ND6 send expiration (calendar) time */ | |
340 | ri->ri_snd_expire = | |
341 | ifnet_llreach_up2calexp(lr, ln->ln_lastused); | |
342 | IFLR_UNLOCK(lr); | |
343 | } | |
344 | } | |
345 | ||
346 | static void | |
347 | nd6_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri) | |
348 | { | |
349 | struct llinfo_nd6 *ln = rt->rt_llinfo; | |
350 | struct if_llreach *lr = ln->ln_llreach; | |
351 | ||
352 | if (lr == NULL) { | |
353 | bzero(iflri, sizeof (*iflri)); | |
354 | iflri->iflri_rssi = IFNET_RSSI_UNKNOWN; | |
355 | iflri->iflri_lqm = IFNET_LQM_THRESH_OFF; | |
356 | iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN; | |
357 | } else { | |
358 | IFLR_LOCK(lr); | |
359 | /* Export to ifnet_llreach_info structure */ | |
360 | ifnet_lr2iflri(lr, iflri); | |
361 | /* Export ND6 send expiration (uptime) time */ | |
362 | iflri->iflri_snd_expire = | |
363 | ifnet_llreach_up2upexp(lr, ln->ln_lastused); | |
364 | IFLR_UNLOCK(lr); | |
365 | } | |
366 | } | |
367 | ||
368 | int | |
369 | nd6_ifattach(struct ifnet *ifp) | |
370 | { | |
371 | /* | |
372 | * We have some arrays that should be indexed by if_index. | |
373 | * since if_index will grow dynamically, they should grow too. | |
374 | */ | |
375 | lck_rw_lock_exclusive(nd_if_rwlock); | |
376 | if (nd_ifinfo == NULL || if_index >= nd_ifinfo_indexlim) { | |
377 | size_t n; | |
378 | caddr_t q; | |
379 | size_t newlim = nd_ifinfo_indexlim; | |
380 | ||
381 | while (if_index >= newlim) | |
382 | newlim <<= 1; | |
383 | ||
384 | /* grow nd_ifinfo */ | |
385 | n = newlim * sizeof(struct nd_ifinfo); | |
386 | q = (caddr_t)_MALLOC(n, M_IP6NDP, M_WAITOK); | |
387 | if (q == NULL) { | |
388 | lck_rw_done(nd_if_rwlock); | |
389 | return (ENOBUFS); | |
390 | } | |
391 | bzero(q, n); | |
392 | nd_ifinfo_indexlim = newlim; | |
393 | if (nd_ifinfo) { | |
394 | bcopy((caddr_t)nd_ifinfo, q, n/2); | |
395 | /* | |
396 | * We might want to pattern fill the old | |
397 | * array to catch use-after-free cases. | |
398 | */ | |
399 | FREE((caddr_t)nd_ifinfo, M_IP6NDP); | |
400 | } | |
401 | nd_ifinfo = (struct nd_ifinfo *)(void *)q; | |
402 | } | |
403 | ||
404 | #define ND nd_ifinfo[ifp->if_index] | |
405 | /* | |
406 | * Don't initialize if called twice. | |
407 | */ | |
408 | if (ND.initialized) { | |
409 | lck_rw_done(nd_if_rwlock); | |
410 | return (0); | |
411 | } | |
412 | lck_mtx_init(&ND.lock, nd_if_lock_grp, nd_if_lock_attr); | |
413 | ND.initialized = TRUE; | |
414 | ND.linkmtu = ifp->if_mtu; | |
415 | ND.chlim = IPV6_DEFHLIM; | |
416 | ND.basereachable = REACHABLE_TIME; | |
417 | ND.reachable = ND_COMPUTE_RTIME(ND.basereachable); | |
418 | ND.retrans = RETRANS_TIMER; | |
419 | ND.flags = ND6_IFF_PERFORMNUD; | |
420 | lck_rw_done(nd_if_rwlock); | |
421 | #undef ND | |
422 | ||
423 | nd6_setmtu(ifp); | |
424 | ||
425 | return (0); | |
426 | } | |
427 | ||
428 | /* | |
429 | * Reset ND level link MTU. This function is called when the physical MTU | |
430 | * changes, which means we might have to adjust the ND level MTU. | |
431 | */ | |
432 | void | |
433 | nd6_setmtu(struct ifnet *ifp) | |
434 | { | |
435 | struct nd_ifinfo *ndi; | |
436 | u_int32_t oldmaxmtu, maxmtu; | |
437 | ||
438 | /* | |
439 | * Make sure IPv6 is enabled for the interface first, | |
440 | * because this can be called directly from SIOCSIFMTU for IPv4 | |
441 | */ | |
442 | lck_rw_lock_shared(nd_if_rwlock); | |
443 | if (ifp->if_index >= nd_ifinfo_indexlim || | |
444 | !nd_ifinfo[ifp->if_index].initialized) { | |
445 | lck_rw_done(nd_if_rwlock); | |
446 | return; /* nd_ifinfo out of bound, or not yet initialized */ | |
447 | } | |
448 | ||
449 | ndi = &nd_ifinfo[ifp->if_index]; | |
450 | VERIFY(ndi->initialized); | |
451 | lck_mtx_lock(&ndi->lock); | |
452 | oldmaxmtu = ndi->maxmtu; | |
453 | ||
454 | /* | |
455 | * The ND level maxmtu is somewhat redundant to the interface MTU | |
456 | * and is an implementation artifact of KAME. Instead of hard- | |
457 | * limiting the maxmtu based on the interface type here, we simply | |
458 | * take the if_mtu value since SIOCSIFMTU would have taken care of | |
459 | * the sanity checks related to the maximum MTU allowed for the | |
460 | * interface (a value that is known only by the interface layer), | |
461 | * by sending the request down via ifnet_ioctl(). The use of the | |
462 | * ND level maxmtu and linkmtu are done via IN6_LINKMTU() which | |
463 | * does further checking against if_mtu. | |
464 | */ | |
465 | maxmtu = ndi->maxmtu = ifp->if_mtu; | |
466 | ||
467 | /* | |
468 | * Decreasing the interface MTU under IPV6 minimum MTU may cause | |
469 | * undesirable situation. We thus notify the operator of the change | |
470 | * explicitly. The check for oldmaxmtu is necessary to restrict the | |
471 | * log to the case of changing the MTU, not initializing it. | |
472 | */ | |
473 | if (oldmaxmtu >= IPV6_MMTU && ndi->maxmtu < IPV6_MMTU) { | |
474 | log(LOG_NOTICE, "nd6_setmtu: " | |
475 | "new link MTU on %s%d (%u) is too small for IPv6\n", | |
476 | ifp->if_name, ifp->if_unit, (uint32_t)ndi->maxmtu); | |
477 | } | |
478 | ndi->linkmtu = ifp->if_mtu; | |
479 | lck_mtx_unlock(&ndi->lock); | |
480 | lck_rw_done(nd_if_rwlock); | |
481 | ||
482 | /* also adjust in6_maxmtu if necessary. */ | |
483 | if (maxmtu > in6_maxmtu) | |
484 | in6_setmaxmtu(); | |
485 | } | |
486 | ||
487 | void | |
488 | nd6_option_init( | |
489 | void *opt, | |
490 | int icmp6len, | |
491 | union nd_opts *ndopts) | |
492 | { | |
493 | bzero(ndopts, sizeof(*ndopts)); | |
494 | ndopts->nd_opts_search = (struct nd_opt_hdr *)opt; | |
495 | ndopts->nd_opts_last | |
496 | = (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len); | |
497 | ||
498 | if (icmp6len == 0) { | |
499 | ndopts->nd_opts_done = 1; | |
500 | ndopts->nd_opts_search = NULL; | |
501 | } | |
502 | } | |
503 | ||
504 | /* | |
505 | * Take one ND option. | |
506 | */ | |
507 | struct nd_opt_hdr * | |
508 | nd6_option( | |
509 | union nd_opts *ndopts) | |
510 | { | |
511 | struct nd_opt_hdr *nd_opt; | |
512 | int olen; | |
513 | ||
514 | if (!ndopts) | |
515 | panic("ndopts == NULL in nd6_option\n"); | |
516 | if (!ndopts->nd_opts_last) | |
517 | panic("uninitialized ndopts in nd6_option\n"); | |
518 | if (!ndopts->nd_opts_search) | |
519 | return NULL; | |
520 | if (ndopts->nd_opts_done) | |
521 | return NULL; | |
522 | ||
523 | nd_opt = ndopts->nd_opts_search; | |
524 | ||
525 | /* make sure nd_opt_len is inside the buffer */ | |
526 | if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) { | |
527 | bzero(ndopts, sizeof(*ndopts)); | |
528 | return NULL; | |
529 | } | |
530 | ||
531 | olen = nd_opt->nd_opt_len << 3; | |
532 | if (olen == 0) { | |
533 | /* | |
534 | * Message validation requires that all included | |
535 | * options have a length that is greater than zero. | |
536 | */ | |
537 | bzero(ndopts, sizeof(*ndopts)); | |
538 | return NULL; | |
539 | } | |
540 | ||
541 | ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen); | |
542 | if (ndopts->nd_opts_search > ndopts->nd_opts_last) { | |
543 | /* option overruns the end of buffer, invalid */ | |
544 | bzero(ndopts, sizeof(*ndopts)); | |
545 | return NULL; | |
546 | } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) { | |
547 | /* reached the end of options chain */ | |
548 | ndopts->nd_opts_done = 1; | |
549 | ndopts->nd_opts_search = NULL; | |
550 | } | |
551 | return nd_opt; | |
552 | } | |
553 | ||
554 | /* | |
555 | * Parse multiple ND options. | |
556 | * This function is much easier to use, for ND routines that do not need | |
557 | * multiple options of the same type. | |
558 | */ | |
559 | int | |
560 | nd6_options( | |
561 | union nd_opts *ndopts) | |
562 | { | |
563 | struct nd_opt_hdr *nd_opt; | |
564 | int i = 0; | |
565 | ||
566 | if (ndopts == NULL) | |
567 | panic("ndopts == NULL in nd6_options"); | |
568 | if (ndopts->nd_opts_last == NULL) | |
569 | panic("uninitialized ndopts in nd6_options"); | |
570 | if (ndopts->nd_opts_search == NULL) | |
571 | return 0; | |
572 | ||
573 | while (1) { | |
574 | nd_opt = nd6_option(ndopts); | |
575 | if (nd_opt == NULL && ndopts->nd_opts_last == NULL) { | |
576 | /* | |
577 | * Message validation requires that all included | |
578 | * options have a length that is greater than zero. | |
579 | */ | |
580 | icmp6stat.icp6s_nd_badopt++; | |
581 | bzero(ndopts, sizeof(*ndopts)); | |
582 | return -1; | |
583 | } | |
584 | ||
585 | if (nd_opt == NULL) | |
586 | goto skip1; | |
587 | ||
588 | switch (nd_opt->nd_opt_type) { | |
589 | case ND_OPT_SOURCE_LINKADDR: | |
590 | case ND_OPT_TARGET_LINKADDR: | |
591 | case ND_OPT_MTU: | |
592 | case ND_OPT_REDIRECTED_HEADER: | |
593 | if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { | |
594 | nd6log((LOG_INFO, | |
595 | "duplicated ND6 option found (type=%d)\n", | |
596 | nd_opt->nd_opt_type)); | |
597 | /* XXX bark? */ | |
598 | } else { | |
599 | ndopts->nd_opt_array[nd_opt->nd_opt_type] | |
600 | = nd_opt; | |
601 | } | |
602 | break; | |
603 | case ND_OPT_PREFIX_INFORMATION: | |
604 | if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) { | |
605 | ndopts->nd_opt_array[nd_opt->nd_opt_type] | |
606 | = nd_opt; | |
607 | } | |
608 | ndopts->nd_opts_pi_end = | |
609 | (struct nd_opt_prefix_info *)nd_opt; | |
610 | break; | |
611 | case ND_OPT_RDNSS: | |
612 | /* ignore */ | |
613 | break; | |
614 | default: | |
615 | /* | |
616 | * Unknown options must be silently ignored, | |
617 | * to accomodate future extension to the protocol. | |
618 | */ | |
619 | nd6log((LOG_DEBUG, | |
620 | "nd6_options: unsupported option %d - " | |
621 | "option ignored\n", nd_opt->nd_opt_type)); | |
622 | } | |
623 | ||
624 | skip1: | |
625 | i++; | |
626 | if (i > nd6_maxndopt) { | |
627 | icmp6stat.icp6s_nd_toomanyopt++; | |
628 | nd6log((LOG_INFO, "too many loop in nd opt\n")); | |
629 | break; | |
630 | } | |
631 | ||
632 | if (ndopts->nd_opts_done) | |
633 | break; | |
634 | } | |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
639 | void | |
640 | nd6_drain(__unused void *ignored_arg) | |
641 | { | |
642 | struct llinfo_nd6 *ln; | |
643 | struct nd_defrouter *dr; | |
644 | struct nd_prefix *pr; | |
645 | struct ifnet *ifp = NULL; | |
646 | struct in6_ifaddr *ia6, *nia6; | |
647 | struct in6_addrlifetime *lt6; | |
648 | struct timeval timenow; | |
649 | ||
650 | getmicrotime(&timenow); | |
651 | again: | |
652 | /* | |
653 | * The global list llinfo_nd6 is modified by nd6_request() and is | |
654 | * therefore protected by rnh_lock. For obvious reasons, we cannot | |
655 | * hold rnh_lock across calls that might lead to code paths which | |
656 | * attempt to acquire rnh_lock, else we deadlock. Hence for such | |
657 | * cases we drop rt_lock and rnh_lock, make the calls, and repeat the | |
658 | * loop. To ensure that we don't process the same entry more than | |
659 | * once in a single timeout, we mark the "already-seen" entries with | |
660 | * ND6_LNF_TIMER_SKIP flag. At the end of the loop, we do a second | |
661 | * pass thru the entries and clear the flag so they can be processed | |
662 | * during the next timeout. | |
663 | */ | |
664 | lck_mtx_lock(rnh_lock); | |
665 | ln = llinfo_nd6.ln_next; | |
666 | while (ln != NULL && ln != &llinfo_nd6) { | |
667 | struct rtentry *rt; | |
668 | struct sockaddr_in6 *dst; | |
669 | struct llinfo_nd6 *next; | |
670 | struct nd_ifinfo *ndi; | |
671 | u_int32_t retrans, flags; | |
672 | ||
673 | /* ln_next/prev/rt is protected by rnh_lock */ | |
674 | next = ln->ln_next; | |
675 | rt = ln->ln_rt; | |
676 | RT_LOCK(rt); | |
677 | ||
678 | /* We've seen this already; skip it */ | |
679 | if (ln->ln_flags & ND6_LNF_TIMER_SKIP) { | |
680 | RT_UNLOCK(rt); | |
681 | ln = next; | |
682 | continue; | |
683 | } | |
684 | ||
685 | /* rt->rt_ifp should never be NULL */ | |
686 | if ((ifp = rt->rt_ifp) == NULL) { | |
687 | panic("%s: ln(%p) rt(%p) rt_ifp == NULL", __func__, | |
688 | ln, rt); | |
689 | /* NOTREACHED */ | |
690 | } | |
691 | ||
692 | /* rt_llinfo must always be equal to ln */ | |
693 | if ((struct llinfo_nd6 *)rt->rt_llinfo != ln) { | |
694 | panic("%s: rt_llinfo(%p) is not equal to ln(%p)", | |
695 | __func__, rt->rt_llinfo, ln); | |
696 | /* NOTREACHED */ | |
697 | } | |
698 | ||
699 | /* rt_key should never be NULL */ | |
700 | dst = (struct sockaddr_in6 *)(void *)rt_key(rt); | |
701 | if (dst == NULL) { | |
702 | panic("%s: rt(%p) key is NULL ln(%p)", __func__, | |
703 | rt, ln); | |
704 | /* NOTREACHED */ | |
705 | } | |
706 | ||
707 | /* Set the flag in case we jump to "again" */ | |
708 | ln->ln_flags |= ND6_LNF_TIMER_SKIP; | |
709 | ||
710 | if (ln->ln_expire > timenow.tv_sec) { | |
711 | RT_UNLOCK(rt); | |
712 | ln = next; | |
713 | continue; | |
714 | } | |
715 | ||
716 | lck_rw_lock_shared(nd_if_rwlock); | |
717 | if (ifp->if_index >= nd_ifinfo_indexlim) { | |
718 | lck_rw_done(nd_if_rwlock); | |
719 | RT_UNLOCK(rt); | |
720 | ln = next; | |
721 | continue; | |
722 | } | |
723 | ndi = ND_IFINFO(ifp); | |
724 | VERIFY(ndi->initialized); | |
725 | lck_mtx_lock(&ndi->lock); | |
726 | retrans = ndi->retrans; | |
727 | flags = ndi->flags; | |
728 | lck_mtx_unlock(&ndi->lock); | |
729 | lck_rw_done(nd_if_rwlock); | |
730 | ||
731 | RT_LOCK_ASSERT_HELD(rt); | |
732 | ||
733 | switch (ln->ln_state) { | |
734 | case ND6_LLINFO_INCOMPLETE: | |
735 | if (ln->ln_asked < nd6_mmaxtries) { | |
736 | ln->ln_asked++; | |
737 | ln->ln_expire = timenow.tv_sec + retrans / 1000; | |
738 | RT_ADDREF_LOCKED(rt); | |
739 | RT_UNLOCK(rt); | |
740 | lck_mtx_unlock(rnh_lock); | |
741 | if (ip6_forwarding) { | |
742 | nd6_prproxy_ns_output(ifp, NULL, | |
743 | &dst->sin6_addr, ln); | |
744 | } else { | |
745 | nd6_ns_output(ifp, NULL, | |
746 | &dst->sin6_addr, ln, 0); | |
747 | } | |
748 | RT_REMREF(rt); | |
749 | } else { | |
750 | struct mbuf *m = ln->ln_hold; | |
751 | ln->ln_hold = NULL; | |
752 | if (m != NULL) { | |
753 | /* | |
754 | * Fake rcvif to make ICMP error | |
755 | * more helpful in diagnosing | |
756 | * for the receiver. | |
757 | * XXX: should we consider | |
758 | * older rcvif? | |
759 | */ | |
760 | m->m_pkthdr.rcvif = ifp; | |
761 | RT_UNLOCK(rt); | |
762 | lck_mtx_unlock(rnh_lock); | |
763 | icmp6_error(m, ICMP6_DST_UNREACH, | |
764 | ICMP6_DST_UNREACH_ADDR, 0); | |
765 | } else { | |
766 | RT_UNLOCK(rt); | |
767 | lck_mtx_unlock(rnh_lock); | |
768 | } | |
769 | nd6_free(rt); | |
770 | } | |
771 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); | |
772 | goto again; | |
773 | ||
774 | case ND6_LLINFO_REACHABLE: | |
775 | if (ln->ln_expire) { | |
776 | ln->ln_state = ND6_LLINFO_STALE; | |
777 | ln->ln_expire = rt_expiry(rt, timenow.tv_sec, | |
778 | nd6_gctimer); | |
779 | } | |
780 | RT_UNLOCK(rt); | |
781 | break; | |
782 | ||
783 | case ND6_LLINFO_STALE: | |
784 | case ND6_LLINFO_PURGE: | |
785 | /* Garbage Collection(RFC 2461 5.3) */ | |
786 | if (ln->ln_expire) { | |
787 | RT_UNLOCK(rt); | |
788 | lck_mtx_unlock(rnh_lock); | |
789 | nd6_free(rt); | |
790 | lck_mtx_assert(rnh_lock, | |
791 | LCK_MTX_ASSERT_NOTOWNED); | |
792 | goto again; | |
793 | } else { | |
794 | RT_UNLOCK(rt); | |
795 | } | |
796 | break; | |
797 | ||
798 | case ND6_LLINFO_DELAY: | |
799 | if ((flags & ND6_IFF_PERFORMNUD) != 0) { | |
800 | /* We need NUD */ | |
801 | ln->ln_asked = 1; | |
802 | ln->ln_state = ND6_LLINFO_PROBE; | |
803 | ln->ln_expire = timenow.tv_sec + retrans / 1000; | |
804 | RT_ADDREF_LOCKED(rt); | |
805 | RT_UNLOCK(rt); | |
806 | lck_mtx_unlock(rnh_lock); | |
807 | nd6_ns_output(ifp, &dst->sin6_addr, | |
808 | &dst->sin6_addr, ln, 0); | |
809 | lck_mtx_assert(rnh_lock, | |
810 | LCK_MTX_ASSERT_NOTOWNED); | |
811 | RT_REMREF(rt); | |
812 | goto again; | |
813 | } | |
814 | ln->ln_state = ND6_LLINFO_STALE; /* XXX */ | |
815 | ln->ln_expire = rt_expiry(rt, timenow.tv_sec, | |
816 | nd6_gctimer); | |
817 | RT_UNLOCK(rt); | |
818 | break; | |
819 | ||
820 | case ND6_LLINFO_PROBE: | |
821 | if (ln->ln_asked < nd6_umaxtries) { | |
822 | ln->ln_asked++; | |
823 | ln->ln_expire = timenow.tv_sec + retrans / 1000; | |
824 | RT_ADDREF_LOCKED(rt); | |
825 | RT_UNLOCK(rt); | |
826 | lck_mtx_unlock(rnh_lock); | |
827 | nd6_ns_output(ifp, &dst->sin6_addr, | |
828 | &dst->sin6_addr, ln, 0); | |
829 | RT_REMREF(rt); | |
830 | } else { | |
831 | RT_UNLOCK(rt); | |
832 | lck_mtx_unlock(rnh_lock); | |
833 | nd6_free(rt); | |
834 | } | |
835 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); | |
836 | goto again; | |
837 | ||
838 | default: | |
839 | RT_UNLOCK(rt); | |
840 | break; | |
841 | } | |
842 | ln = next; | |
843 | } | |
844 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); | |
845 | ||
846 | /* Now clear the flag from all entries */ | |
847 | ln = llinfo_nd6.ln_next; | |
848 | while (ln != NULL && ln != &llinfo_nd6) { | |
849 | struct rtentry *rt = ln->ln_rt; | |
850 | struct llinfo_nd6 *next = ln->ln_next; | |
851 | ||
852 | RT_LOCK_SPIN(rt); | |
853 | if (ln->ln_flags & ND6_LNF_TIMER_SKIP) | |
854 | ln->ln_flags &= ~ND6_LNF_TIMER_SKIP; | |
855 | RT_UNLOCK(rt); | |
856 | ln = next; | |
857 | } | |
858 | lck_mtx_unlock(rnh_lock); | |
859 | ||
860 | /* expire default router list */ | |
861 | lck_mtx_lock(nd6_mutex); | |
862 | dr = TAILQ_FIRST(&nd_defrouter); | |
863 | while (dr) { | |
864 | if (dr->expire && dr->expire < timenow.tv_sec) { | |
865 | struct nd_defrouter *t; | |
866 | t = TAILQ_NEXT(dr, dr_entry); | |
867 | defrtrlist_del(dr); | |
868 | dr = t; | |
869 | } else { | |
870 | dr = TAILQ_NEXT(dr, dr_entry); | |
871 | } | |
872 | } | |
873 | lck_mtx_unlock(nd6_mutex); | |
874 | ||
875 | /* | |
876 | * expire interface addresses. | |
877 | * in the past the loop was inside prefix expiry processing. | |
878 | * However, from a stricter speci-confrmance standpoint, we should | |
879 | * rather separate address lifetimes and prefix lifetimes. | |
880 | */ | |
881 | addrloop: | |
882 | lck_rw_lock_exclusive(&in6_ifaddr_rwlock); | |
883 | for (ia6 = in6_ifaddrs; ia6; ia6 = nia6) { | |
884 | nia6 = ia6->ia_next; | |
885 | IFA_LOCK(&ia6->ia_ifa); | |
886 | /* | |
887 | * Extra reference for ourselves; it's no-op if | |
888 | * we don't have to regenerate temporary address, | |
889 | * otherwise it protects the address from going | |
890 | * away since we drop in6_ifaddr_rwlock below. | |
891 | */ | |
892 | IFA_ADDREF_LOCKED(&ia6->ia_ifa); | |
893 | /* check address lifetime */ | |
894 | lt6 = &ia6->ia6_lifetime; | |
895 | if (IFA6_IS_INVALID(ia6)) { | |
896 | /* | |
897 | * If the expiring address is temporary, try | |
898 | * regenerating a new one. This would be useful when | |
899 | * we suspended a laptop PC, then turned it on after a | |
900 | * period that could invalidate all temporary | |
901 | * addresses. Although we may have to restart the | |
902 | * loop (see below), it must be after purging the | |
903 | * address. Otherwise, we'd see an infinite loop of | |
904 | * regeneration. | |
905 | */ | |
906 | if (ip6_use_tempaddr && | |
907 | (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0) { | |
908 | /* | |
909 | * NOTE: We have to drop the lock here | |
910 | * because regen_tmpaddr() eventually calls | |
911 | * in6_update_ifa(), which must take the lock | |
912 | * and would otherwise cause a hang. This is | |
913 | * safe because the goto addrloop leads to a | |
914 | * re-evaluation of the in6_ifaddrs list | |
915 | */ | |
916 | IFA_UNLOCK(&ia6->ia_ifa); | |
917 | lck_rw_done(&in6_ifaddr_rwlock); | |
918 | (void) regen_tmpaddr(ia6); | |
919 | } else { | |
920 | IFA_UNLOCK(&ia6->ia_ifa); | |
921 | lck_rw_done(&in6_ifaddr_rwlock); | |
922 | } | |
923 | ||
924 | /* | |
925 | * Purging the address would have caused | |
926 | * in6_ifaddr_rwlock to be dropped and reacquired; | |
927 | * therefore search again from the beginning | |
928 | * of in6_ifaddrs list. | |
929 | */ | |
930 | in6_purgeaddr(&ia6->ia_ifa); | |
931 | ||
932 | /* Release extra reference taken above */ | |
933 | IFA_REMREF(&ia6->ia_ifa); | |
934 | goto addrloop; | |
935 | } | |
936 | IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa); | |
937 | if (IFA6_IS_DEPRECATED(ia6)) { | |
938 | int oldflags = ia6->ia6_flags; | |
939 | ||
940 | ia6->ia6_flags |= IN6_IFF_DEPRECATED; | |
941 | ||
942 | /* | |
943 | * If a temporary address has just become deprecated, | |
944 | * regenerate a new one if possible. | |
945 | */ | |
946 | if (ip6_use_tempaddr && | |
947 | (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 && | |
948 | (oldflags & IN6_IFF_DEPRECATED) == 0) { | |
949 | ||
950 | /* see NOTE above */ | |
951 | IFA_UNLOCK(&ia6->ia_ifa); | |
952 | lck_rw_done(&in6_ifaddr_rwlock); | |
953 | if (regen_tmpaddr(ia6) == 0) { | |
954 | /* | |
955 | * A new temporary address is | |
956 | * generated. | |
957 | * XXX: this means the address chain | |
958 | * has changed while we are still in | |
959 | * the loop. Although the change | |
960 | * would not cause disaster (because | |
961 | * it's not a deletion, but an | |
962 | * addition,) we'd rather restart the | |
963 | * loop just for safety. Or does this | |
964 | * significantly reduce performance?? | |
965 | */ | |
966 | /* Release extra reference */ | |
967 | IFA_REMREF(&ia6->ia_ifa); | |
968 | goto addrloop; | |
969 | } | |
970 | lck_rw_lock_exclusive(&in6_ifaddr_rwlock); | |
971 | } else { | |
972 | IFA_UNLOCK(&ia6->ia_ifa); | |
973 | } | |
974 | } else { | |
975 | /* | |
976 | * A new RA might have made a deprecated address | |
977 | * preferred. | |
978 | */ | |
979 | ia6->ia6_flags &= ~IN6_IFF_DEPRECATED; | |
980 | IFA_UNLOCK(&ia6->ia_ifa); | |
981 | } | |
982 | lck_rw_assert(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE); | |
983 | /* Release extra reference taken above */ | |
984 | IFA_REMREF(&ia6->ia_ifa); | |
985 | } | |
986 | lck_rw_done(&in6_ifaddr_rwlock); | |
987 | ||
988 | lck_mtx_lock(nd6_mutex); | |
989 | /* | |
990 | * Since we drop the nd6_mutex in prelist_remove, we want to run this | |
991 | * section single threaded. | |
992 | */ | |
993 | while (nd6_drain_busy) { | |
994 | nd6_drain_waiters++; | |
995 | msleep(nd6_drain_waitchan, nd6_mutex, (PZERO-1), | |
996 | __func__, NULL); | |
997 | lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED); | |
998 | } | |
999 | nd6_drain_busy = TRUE; | |
1000 | ||
1001 | /* expire prefix list */ | |
1002 | pr = nd_prefix.lh_first; | |
1003 | while (pr) { | |
1004 | /* | |
1005 | * check prefix lifetime. | |
1006 | * since pltime is just for autoconf, pltime processing for | |
1007 | * prefix is not necessary. | |
1008 | */ | |
1009 | NDPR_LOCK(pr); | |
1010 | if (pr->ndpr_stateflags & NDPRF_PROCESSED) { | |
1011 | NDPR_UNLOCK(pr); | |
1012 | pr = pr->ndpr_next; | |
1013 | continue; | |
1014 | } | |
1015 | if (pr->ndpr_expire && pr->ndpr_expire < timenow.tv_sec) { | |
1016 | /* | |
1017 | * address expiration and prefix expiration are | |
1018 | * separate. NEVER perform in6_purgeaddr here. | |
1019 | */ | |
1020 | pr->ndpr_stateflags |= NDPRF_PROCESSED; | |
1021 | NDPR_ADDREF_LOCKED(pr); | |
1022 | prelist_remove(pr); | |
1023 | NDPR_UNLOCK(pr); | |
1024 | NDPR_REMREF(pr); | |
1025 | pr = nd_prefix.lh_first; | |
1026 | } else { | |
1027 | pr->ndpr_stateflags |= NDPRF_PROCESSED; | |
1028 | NDPR_UNLOCK(pr); | |
1029 | pr = pr->ndpr_next; | |
1030 | } | |
1031 | } | |
1032 | LIST_FOREACH(pr, &nd_prefix, ndpr_entry) { | |
1033 | NDPR_LOCK(pr); | |
1034 | pr->ndpr_stateflags &= ~NDPRF_PROCESSED; | |
1035 | NDPR_UNLOCK(pr); | |
1036 | } | |
1037 | nd6_drain_busy = FALSE; | |
1038 | if (nd6_drain_waiters > 0) { | |
1039 | nd6_drain_waiters = 0; | |
1040 | wakeup(nd6_drain_waitchan); | |
1041 | } | |
1042 | lck_mtx_unlock(nd6_mutex); | |
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * ND6 router advertisement kernel notification | |
1047 | */ | |
1048 | void | |
1049 | nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list, | |
1050 | u_int32_t list_length, u_int32_t mtu, char *dl_addr, u_int32_t dl_addr_len) | |
1051 | { | |
1052 | struct kev_msg ev_msg; | |
1053 | struct kev_nd6_ra_data nd6_ra_msg_data; | |
1054 | struct nd_prefix_list *itr = prefix_list; | |
1055 | ||
1056 | bzero(&ev_msg, sizeof(struct kev_msg)); | |
1057 | ev_msg.vendor_code = KEV_VENDOR_APPLE; | |
1058 | ev_msg.kev_class = KEV_NETWORK_CLASS; | |
1059 | ev_msg.kev_subclass = KEV_ND6_SUBCLASS; | |
1060 | ev_msg.event_code = code; | |
1061 | ||
1062 | bzero(&nd6_ra_msg_data, sizeof(nd6_ra_msg_data)); | |
1063 | nd6_ra_msg_data.lladdrlen = (dl_addr_len <= ND6_ROUTER_LL_SIZE) ? | |
1064 | dl_addr_len : ND6_ROUTER_LL_SIZE; | |
1065 | bcopy(dl_addr, &nd6_ra_msg_data.lladdr, nd6_ra_msg_data.lladdrlen); | |
1066 | ||
1067 | if (mtu > 0 && mtu >= IPV6_MMTU) { | |
1068 | nd6_ra_msg_data.mtu = mtu; | |
1069 | nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_MTU; | |
1070 | } | |
1071 | ||
1072 | if (list_length > 0 && prefix_list != NULL) { | |
1073 | nd6_ra_msg_data.list_length = list_length; | |
1074 | nd6_ra_msg_data.flags |= KEV_ND6_DATA_VALID_PREFIX; | |
1075 | } | |
1076 | ||
1077 | while (itr != NULL && nd6_ra_msg_data.list_index < list_length) { | |
1078 | bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix, | |
1079 | sizeof (nd6_ra_msg_data.prefix.prefix)); | |
1080 | nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf; | |
1081 | nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen; | |
1082 | nd6_ra_msg_data.prefix.origin = PR_ORIG_RA; | |
1083 | nd6_ra_msg_data.prefix.vltime = itr->pr.ndpr_vltime; | |
1084 | nd6_ra_msg_data.prefix.pltime = itr->pr.ndpr_pltime; | |
1085 | nd6_ra_msg_data.prefix.expire = itr->pr.ndpr_expire; | |
1086 | nd6_ra_msg_data.prefix.flags = itr->pr.ndpr_stateflags; | |
1087 | nd6_ra_msg_data.prefix.refcnt = itr->pr.ndpr_addrcnt; | |
1088 | nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index; | |
1089 | ||
1090 | /* send the message up */ | |
1091 | ev_msg.dv[0].data_ptr = &nd6_ra_msg_data; | |
1092 | ev_msg.dv[0].data_length = sizeof(nd6_ra_msg_data); | |
1093 | ev_msg.dv[1].data_length = 0; | |
1094 | kev_post_msg(&ev_msg); | |
1095 | ||
1096 | /* clean up for the next prefix */ | |
1097 | bzero(&nd6_ra_msg_data.prefix, sizeof(nd6_ra_msg_data.prefix)); | |
1098 | itr = itr->next; | |
1099 | nd6_ra_msg_data.list_index++; | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | /* | |
1104 | * ND6 timer routine to expire default route list and prefix list | |
1105 | */ | |
1106 | void | |
1107 | nd6_timer(__unused void *ignored_arg) | |
1108 | { | |
1109 | nd6_drain(NULL); | |
1110 | timeout(nd6_timer, (caddr_t)0, nd6_prune * hz); | |
1111 | } | |
1112 | ||
1113 | static int | |
1114 | regen_tmpaddr( | |
1115 | struct in6_ifaddr *ia6) /* deprecated/invalidated temporary address */ | |
1116 | { | |
1117 | struct ifaddr *ifa; | |
1118 | struct ifnet *ifp; | |
1119 | struct in6_ifaddr *public_ifa6 = NULL; | |
1120 | struct timeval timenow; | |
1121 | ||
1122 | getmicrotime(&timenow); | |
1123 | ||
1124 | ifp = ia6->ia_ifa.ifa_ifp; | |
1125 | ifnet_lock_shared(ifp); | |
1126 | for (ifa = ifp->if_addrlist.tqh_first; ifa; | |
1127 | ifa = ifa->ifa_list.tqe_next) | |
1128 | { | |
1129 | struct in6_ifaddr *it6; | |
1130 | ||
1131 | IFA_LOCK(ifa); | |
1132 | if (ifa->ifa_addr->sa_family != AF_INET6) { | |
1133 | IFA_UNLOCK(ifa); | |
1134 | continue; | |
1135 | } | |
1136 | it6 = (struct in6_ifaddr *)ifa; | |
1137 | ||
1138 | /* ignore no autoconf addresses. */ | |
1139 | if ((it6->ia6_flags & IN6_IFF_AUTOCONF) == 0) { | |
1140 | IFA_UNLOCK(ifa); | |
1141 | continue; | |
1142 | } | |
1143 | /* ignore autoconf addresses with different prefixes. */ | |
1144 | if (it6->ia6_ndpr == NULL || it6->ia6_ndpr != ia6->ia6_ndpr) { | |
1145 | IFA_UNLOCK(ifa); | |
1146 | continue; | |
1147 | } | |
1148 | /* | |
1149 | * Now we are looking at an autoconf address with the same | |
1150 | * prefix as ours. If the address is temporary and is still | |
1151 | * preferred, do not create another one. It would be rare, but | |
1152 | * could happen, for example, when we resume a laptop PC after | |
1153 | * a long period. | |
1154 | */ | |
1155 | if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 && | |
1156 | !IFA6_IS_DEPRECATED(it6)) { | |
1157 | IFA_UNLOCK(ifa); | |
1158 | if (public_ifa6 != NULL) | |
1159 | IFA_REMREF(&public_ifa6->ia_ifa); | |
1160 | public_ifa6 = NULL; | |
1161 | break; | |
1162 | } | |
1163 | ||
1164 | /* | |
1165 | * This is a public autoconf address that has the same prefix | |
1166 | * as ours. If it is preferred, keep it. We can't break the | |
1167 | * loop here, because there may be a still-preferred temporary | |
1168 | * address with the prefix. | |
1169 | */ | |
1170 | if (!IFA6_IS_DEPRECATED(it6)) { | |
1171 | IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */ | |
1172 | IFA_UNLOCK(ifa); | |
1173 | if (public_ifa6 != NULL) | |
1174 | IFA_REMREF(&public_ifa6->ia_ifa); | |
1175 | public_ifa6 = it6; | |
1176 | } else { | |
1177 | IFA_UNLOCK(ifa); | |
1178 | } | |
1179 | } | |
1180 | ifnet_lock_done(ifp); | |
1181 | ||
1182 | if (public_ifa6 != NULL) { | |
1183 | int e; | |
1184 | ||
1185 | if ((e = in6_tmpifadd(public_ifa6, 0, M_WAITOK)) != 0) { | |
1186 | log(LOG_NOTICE, "regen_tmpaddr: failed to create a new" | |
1187 | " tmp addr,errno=%d\n", e); | |
1188 | IFA_REMREF(&public_ifa6->ia_ifa); | |
1189 | return(-1); | |
1190 | } | |
1191 | IFA_REMREF(&public_ifa6->ia_ifa); | |
1192 | return(0); | |
1193 | } | |
1194 | ||
1195 | return(-1); | |
1196 | } | |
1197 | ||
1198 | /* | |
1199 | * Nuke neighbor cache/prefix/default router management table, right before | |
1200 | * ifp goes away. | |
1201 | */ | |
1202 | void | |
1203 | nd6_purge( | |
1204 | struct ifnet *ifp) | |
1205 | { | |
1206 | struct llinfo_nd6 *ln; | |
1207 | struct nd_defrouter *dr, *ndr; | |
1208 | struct nd_prefix *pr, *npr; | |
1209 | ||
1210 | /* Nuke default router list entries toward ifp */ | |
1211 | lck_mtx_lock(nd6_mutex); | |
1212 | if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { | |
1213 | /* | |
1214 | * The first entry of the list may be stored in | |
1215 | * the routing table, so we'll delete it later. | |
1216 | */ | |
1217 | for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = ndr) { | |
1218 | ndr = TAILQ_NEXT(dr, dr_entry); | |
1219 | if (dr->stateflags & NDDRF_INSTALLED) | |
1220 | continue; | |
1221 | if (dr->ifp == ifp) | |
1222 | defrtrlist_del(dr); | |
1223 | } | |
1224 | dr = TAILQ_FIRST(&nd_defrouter); | |
1225 | if (dr->ifp == ifp) | |
1226 | defrtrlist_del(dr); | |
1227 | } | |
1228 | ||
1229 | for (dr = TAILQ_FIRST(&nd_defrouter); dr; dr = ndr) { | |
1230 | ndr = TAILQ_NEXT(dr, dr_entry); | |
1231 | if (!(dr->stateflags & NDDRF_INSTALLED)) | |
1232 | continue; | |
1233 | ||
1234 | if (dr->ifp == ifp) | |
1235 | defrtrlist_del(dr); | |
1236 | } | |
1237 | ||
1238 | /* Nuke prefix list entries toward ifp */ | |
1239 | for (pr = nd_prefix.lh_first; pr; pr = npr) { | |
1240 | npr = pr->ndpr_next; | |
1241 | NDPR_LOCK(pr); | |
1242 | if (pr->ndpr_ifp == ifp) { | |
1243 | /* | |
1244 | * Because if_detach() does *not* release prefixes | |
1245 | * while purging addresses the reference count will | |
1246 | * still be above zero. We therefore reset it to | |
1247 | * make sure that the prefix really gets purged. | |
1248 | */ | |
1249 | pr->ndpr_addrcnt = 0; | |
1250 | ||
1251 | /* | |
1252 | * Previously, pr->ndpr_addr is removed as well, | |
1253 | * but I strongly believe we don't have to do it. | |
1254 | * nd6_purge() is only called from in6_ifdetach(), | |
1255 | * which removes all the associated interface addresses | |
1256 | * by itself. | |
1257 | * (jinmei@kame.net 20010129) | |
1258 | */ | |
1259 | NDPR_ADDREF_LOCKED(pr); | |
1260 | prelist_remove(pr); | |
1261 | NDPR_UNLOCK(pr); | |
1262 | NDPR_REMREF(pr); | |
1263 | } else { | |
1264 | NDPR_UNLOCK(pr); | |
1265 | } | |
1266 | } | |
1267 | lck_mtx_unlock(nd6_mutex); | |
1268 | ||
1269 | /* cancel default outgoing interface setting */ | |
1270 | if (nd6_defifindex == ifp->if_index) { | |
1271 | nd6_setdefaultiface(0); | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * Perform default router selection even when we are a router, | |
1276 | * if Scoped Routing is enabled. | |
1277 | */ | |
1278 | if (ip6_doscopedroute || !ip6_forwarding) { | |
1279 | lck_mtx_lock(nd6_mutex); | |
1280 | /* refresh default router list */ | |
1281 | defrouter_select(ifp); | |
1282 | lck_mtx_unlock(nd6_mutex); | |
1283 | } | |
1284 | ||
1285 | /* | |
1286 | * Nuke neighbor cache entries for the ifp. | |
1287 | * Note that rt->rt_ifp may not be the same as ifp, | |
1288 | * due to KAME goto ours hack. See RTM_RESOLVE case in | |
1289 | * nd6_rtrequest(), and ip6_input(). | |
1290 | */ | |
1291 | again: | |
1292 | lck_mtx_lock(rnh_lock); | |
1293 | ln = llinfo_nd6.ln_next; | |
1294 | while (ln != NULL && ln != &llinfo_nd6) { | |
1295 | struct rtentry *rt; | |
1296 | struct llinfo_nd6 *nln; | |
1297 | ||
1298 | nln = ln->ln_next; | |
1299 | rt = ln->ln_rt; | |
1300 | RT_LOCK(rt); | |
1301 | if (rt->rt_gateway != NULL && | |
1302 | rt->rt_gateway->sa_family == AF_LINK && | |
1303 | SDL(rt->rt_gateway)->sdl_index == ifp->if_index) { | |
1304 | RT_UNLOCK(rt); | |
1305 | lck_mtx_unlock(rnh_lock); | |
1306 | /* | |
1307 | * See comments on nd6_timer() for reasons why | |
1308 | * this loop is repeated; we bite the costs of | |
1309 | * going thru the same llinfo_nd6 more than once | |
1310 | * here, since this purge happens during detach, | |
1311 | * and that unlike the timer case, it's possible | |
1312 | * there's more than one purges happening at the | |
1313 | * same time (thus a flag wouldn't buy anything). | |
1314 | */ | |
1315 | nd6_free(rt); | |
1316 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); | |
1317 | goto again; | |
1318 | } else { | |
1319 | RT_UNLOCK(rt); | |
1320 | } | |
1321 | ln = nln; | |
1322 | } | |
1323 | lck_mtx_unlock(rnh_lock); | |
1324 | } | |
1325 | ||
1326 | /* | |
1327 | * Upon success, the returned route will be locked and the caller is | |
1328 | * responsible for releasing the reference and doing RT_UNLOCK(rt). | |
1329 | * This routine does not require rnh_lock to be held by the caller, | |
1330 | * although it needs to be indicated of such a case in order to call | |
1331 | * the correct variant of the relevant routing routines. | |
1332 | */ | |
1333 | struct rtentry * | |
1334 | nd6_lookup( | |
1335 | struct in6_addr *addr6, | |
1336 | int create, | |
1337 | struct ifnet *ifp, | |
1338 | int rt_locked) | |
1339 | { | |
1340 | struct rtentry *rt; | |
1341 | struct sockaddr_in6 sin6; | |
1342 | unsigned int ifscope; | |
1343 | ||
1344 | bzero(&sin6, sizeof(sin6)); | |
1345 | sin6.sin6_len = sizeof(struct sockaddr_in6); | |
1346 | sin6.sin6_family = AF_INET6; | |
1347 | sin6.sin6_addr = *addr6; | |
1348 | ||
1349 | ifscope = (ifp != NULL) ? ifp->if_index : IFSCOPE_NONE; | |
1350 | if (rt_locked) { | |
1351 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); | |
1352 | rt = rtalloc1_scoped_locked((struct sockaddr *)&sin6, | |
1353 | create, 0, ifscope); | |
1354 | } else { | |
1355 | rt = rtalloc1_scoped((struct sockaddr *)&sin6, | |
1356 | create, 0, ifscope); | |
1357 | } | |
1358 | ||
1359 | if (rt != NULL) { | |
1360 | RT_LOCK(rt); | |
1361 | if ((rt->rt_flags & RTF_LLINFO) == 0) { | |
1362 | /* | |
1363 | * This is the case for the default route. | |
1364 | * If we want to create a neighbor cache for the | |
1365 | * address, we should free the route for the | |
1366 | * destination and allocate an interface route. | |
1367 | */ | |
1368 | if (create) { | |
1369 | RT_UNLOCK(rt); | |
1370 | if (rt_locked) | |
1371 | rtfree_locked(rt); | |
1372 | else | |
1373 | rtfree(rt); | |
1374 | rt = NULL; | |
1375 | } | |
1376 | } | |
1377 | } | |
1378 | if (rt == NULL) { | |
1379 | if (create && ifp) { | |
1380 | struct ifaddr *ifa; | |
1381 | u_int32_t ifa_flags; | |
1382 | int e; | |
1383 | ||
1384 | /* | |
1385 | * If no route is available and create is set, | |
1386 | * we allocate a host route for the destination | |
1387 | * and treat it like an interface route. | |
1388 | * This hack is necessary for a neighbor which can't | |
1389 | * be covered by our own prefix. | |
1390 | */ | |
1391 | ifa = ifaof_ifpforaddr((struct sockaddr *)&sin6, ifp); | |
1392 | if (ifa == NULL) | |
1393 | return(NULL); | |
1394 | ||
1395 | /* | |
1396 | * Create a new route. RTF_LLINFO is necessary | |
1397 | * to create a Neighbor Cache entry for the | |
1398 | * destination in nd6_rtrequest which will be | |
1399 | * called in rtrequest via ifa->ifa_rtrequest. | |
1400 | */ | |
1401 | if (!rt_locked) | |
1402 | lck_mtx_lock(rnh_lock); | |
1403 | IFA_LOCK_SPIN(ifa); | |
1404 | ifa_flags = ifa->ifa_flags; | |
1405 | IFA_UNLOCK(ifa); | |
1406 | if ((e = rtrequest_scoped_locked(RTM_ADD, | |
1407 | (struct sockaddr *)&sin6, ifa->ifa_addr, | |
1408 | (struct sockaddr *)&all1_sa, | |
1409 | (ifa_flags | RTF_HOST | RTF_LLINFO) & | |
1410 | ~RTF_CLONING, &rt, ifscope)) != 0) { | |
1411 | if (e != EEXIST) | |
1412 | log(LOG_ERR, "%s: failed to add route " | |
1413 | "for a neighbor(%s), errno=%d\n", | |
1414 | __func__, ip6_sprintf(addr6), e); | |
1415 | } | |
1416 | if (!rt_locked) | |
1417 | lck_mtx_unlock(rnh_lock); | |
1418 | IFA_REMREF(ifa); | |
1419 | if (rt == NULL) | |
1420 | return(NULL); | |
1421 | ||
1422 | RT_LOCK(rt); | |
1423 | if (rt->rt_llinfo) { | |
1424 | struct llinfo_nd6 *ln = rt->rt_llinfo; | |
1425 | ln->ln_state = ND6_LLINFO_NOSTATE; | |
1426 | } | |
1427 | } else { | |
1428 | return(NULL); | |
1429 | } | |
1430 | } | |
1431 | RT_LOCK_ASSERT_HELD(rt); | |
1432 | /* | |
1433 | * Validation for the entry. | |
1434 | * Note that the check for rt_llinfo is necessary because a cloned | |
1435 | * route from a parent route that has the L flag (e.g. the default | |
1436 | * route to a p2p interface) may have the flag, too, while the | |
1437 | * destination is not actually a neighbor. | |
1438 | * XXX: we can't use rt->rt_ifp to check for the interface, since | |
1439 | * it might be the loopback interface if the entry is for our | |
1440 | * own address on a non-loopback interface. Instead, we should | |
1441 | * use rt->rt_ifa->ifa_ifp, which would specify the REAL | |
1442 | * interface. | |
1443 | * Note also that ifa_ifp and ifp may differ when we connect two | |
1444 | * interfaces to a same link, install a link prefix to an interface, | |
1445 | * and try to install a neighbor cache on an interface that does not | |
1446 | * have a route to the prefix. | |
1447 | * | |
1448 | * If the address is from a proxied prefix, the ifa_ifp and ifp might | |
1449 | * not match, because nd6_na_input() could have modified the ifp | |
1450 | * of the route to point to the interface where the NA arrived on, | |
1451 | * hence the test for RTF_PROXY. | |
1452 | */ | |
1453 | if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 || | |
1454 | rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL || | |
1455 | (ifp && rt->rt_ifa->ifa_ifp != ifp && | |
1456 | !(rt->rt_flags & RTF_PROXY))) { | |
1457 | RT_REMREF_LOCKED(rt); | |
1458 | RT_UNLOCK(rt); | |
1459 | if (create) { | |
1460 | log(LOG_DEBUG, "%s: failed to lookup %s " | |
1461 | "(if = %s)\n", __func__, ip6_sprintf(addr6), | |
1462 | ifp ? if_name(ifp) : "unspec"); | |
1463 | /* xxx more logs... kazu */ | |
1464 | } | |
1465 | return(NULL); | |
1466 | } | |
1467 | /* | |
1468 | * Caller needs to release reference and call RT_UNLOCK(rt). | |
1469 | */ | |
1470 | return(rt); | |
1471 | } | |
1472 | ||
1473 | /* | |
1474 | * Test whether a given IPv6 address is a neighbor or not, ignoring | |
1475 | * the actual neighbor cache. The neighbor cache is ignored in order | |
1476 | * to not reenter the routing code from within itself. | |
1477 | */ | |
1478 | static int | |
1479 | nd6_is_new_addr_neighbor( | |
1480 | struct sockaddr_in6 *addr, | |
1481 | struct ifnet *ifp) | |
1482 | { | |
1483 | struct nd_prefix *pr; | |
1484 | struct ifaddr *dstaddr; | |
1485 | ||
1486 | lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED); | |
1487 | ||
1488 | /* | |
1489 | * A link-local address is always a neighbor. | |
1490 | * XXX: a link does not necessarily specify a single interface. | |
1491 | */ | |
1492 | if (IN6_IS_ADDR_LINKLOCAL(&addr->sin6_addr)) { | |
1493 | struct sockaddr_in6 sin6_copy; | |
1494 | u_int32_t zone; | |
1495 | ||
1496 | /* | |
1497 | * We need sin6_copy since sa6_recoverscope() may modify the | |
1498 | * content (XXX). | |
1499 | */ | |
1500 | sin6_copy = *addr; | |
1501 | if (sa6_recoverscope(&sin6_copy, FALSE)) | |
1502 | return (0); /* XXX: should be impossible */ | |
1503 | if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone)) | |
1504 | return (0); | |
1505 | if (sin6_copy.sin6_scope_id == zone) | |
1506 | return (1); | |
1507 | else | |
1508 | return (0); | |
1509 | } | |
1510 | ||
1511 | /* | |
1512 | * If the address matches one of our addresses, | |
1513 | * it should be a neighbor. | |
1514 | * If the address matches one of our on-link prefixes, it should be a | |
1515 | * neighbor. | |
1516 | */ | |
1517 | for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { | |
1518 | NDPR_LOCK(pr); | |
1519 | if (pr->ndpr_ifp != ifp) { | |
1520 | NDPR_UNLOCK(pr); | |
1521 | continue; | |
1522 | } | |
1523 | if (!(pr->ndpr_stateflags & NDPRF_ONLINK)) { | |
1524 | NDPR_UNLOCK(pr); | |
1525 | continue; | |
1526 | } | |
1527 | if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr, | |
1528 | &addr->sin6_addr, &pr->ndpr_mask)) { | |
1529 | NDPR_UNLOCK(pr); | |
1530 | return (1); | |
1531 | } | |
1532 | NDPR_UNLOCK(pr); | |
1533 | } | |
1534 | ||
1535 | /* | |
1536 | * If the address is assigned on the node of the other side of | |
1537 | * a p2p interface, the address should be a neighbor. | |
1538 | */ | |
1539 | dstaddr = ifa_ifwithdstaddr((struct sockaddr *)addr); | |
1540 | if (dstaddr != NULL) { | |
1541 | if (dstaddr->ifa_ifp == ifp) { | |
1542 | IFA_REMREF(dstaddr); | |
1543 | return (1); | |
1544 | } | |
1545 | IFA_REMREF(dstaddr); | |
1546 | dstaddr = NULL; | |
1547 | } | |
1548 | ||
1549 | /* | |
1550 | * If the default router list is empty, all addresses are regarded | |
1551 | * as on-link, and thus, as a neighbor. | |
1552 | * XXX: we restrict the condition to hosts, because routers usually do | |
1553 | * not have the "default router list". | |
1554 | * XXX: this block should eventually be removed (it is disabled when | |
1555 | * Scoped Routing is in effect); treating all destinations as on-link | |
1556 | * in the absence of a router is rather harmful. | |
1557 | */ | |
1558 | if (!ip6_doscopedroute && !ip6_forwarding && | |
1559 | TAILQ_FIRST(&nd_defrouter) == NULL && | |
1560 | nd6_defifindex == ifp->if_index) { | |
1561 | return (1); | |
1562 | } | |
1563 | ||
1564 | return (0); | |
1565 | } | |
1566 | ||
1567 | ||
1568 | /* | |
1569 | * Detect if a given IPv6 address identifies a neighbor on a given link. | |
1570 | * XXX: should take care of the destination of a p2p link? | |
1571 | */ | |
1572 | int | |
1573 | nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp, int rt_locked) | |
1574 | { | |
1575 | struct rtentry *rt; | |
1576 | ||
1577 | lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); | |
1578 | lck_mtx_lock(nd6_mutex); | |
1579 | if (nd6_is_new_addr_neighbor(addr, ifp)) { | |
1580 | lck_mtx_unlock(nd6_mutex); | |
1581 | return (1); | |
1582 | } | |
1583 | lck_mtx_unlock(nd6_mutex); | |
1584 | ||
1585 | /* | |
1586 | * Even if the address matches none of our addresses, it might be | |
1587 | * in the neighbor cache. | |
1588 | */ | |
1589 | if ((rt = nd6_lookup(&addr->sin6_addr, 0, ifp, rt_locked)) != NULL) { | |
1590 | RT_LOCK_ASSERT_HELD(rt); | |
1591 | RT_REMREF_LOCKED(rt); | |
1592 | RT_UNLOCK(rt); | |
1593 | return (1); | |
1594 | } | |
1595 | ||
1596 | return (0); | |
1597 | } | |
1598 | ||
1599 | /* | |
1600 | * Free an nd6 llinfo entry. | |
1601 | * Since the function would cause significant changes in the kernel, DO NOT | |
1602 | * make it global, unless you have a strong reason for the change, and are sure | |
1603 | * that the change is safe. | |
1604 | */ | |
1605 | void | |
1606 | nd6_free( | |
1607 | struct rtentry *rt) | |
1608 | { | |
1609 | struct llinfo_nd6 *ln; | |
1610 | struct in6_addr in6; | |
1611 | struct nd_defrouter *dr; | |
1612 | ||
1613 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); | |
1614 | RT_LOCK_ASSERT_NOTHELD(rt); | |
1615 | lck_mtx_lock(nd6_mutex); | |
1616 | ||
1617 | RT_LOCK(rt); | |
1618 | RT_ADDREF_LOCKED(rt); /* Extra ref */ | |
1619 | ln = rt->rt_llinfo; | |
1620 | in6 = ((struct sockaddr_in6 *)(void *)rt_key(rt))->sin6_addr; | |
1621 | ||
1622 | /* | |
1623 | * Prevent another thread from modifying rt_key, rt_gateway | |
1624 | * via rt_setgate() after the rt_lock is dropped by marking | |
1625 | * the route as defunct. | |
1626 | */ | |
1627 | rt->rt_flags |= RTF_CONDEMNED; | |
1628 | ||
1629 | /* | |
1630 | * We used to have pfctlinput(PRC_HOSTDEAD) here. Even though it is | |
1631 | * not harmful, it was not really necessary. Perform default router | |
1632 | * selection even when we are a router, if Scoped Routing is enabled. | |
1633 | */ | |
1634 | if (ip6_doscopedroute || !ip6_forwarding) { | |
1635 | dr = defrouter_lookup(&((struct sockaddr_in6 *)(void *) | |
1636 | rt_key(rt))->sin6_addr, rt->rt_ifp); | |
1637 | ||
1638 | if ((ln && ln->ln_router) || dr) { | |
1639 | /* | |
1640 | * rt6_flush must be called whether or not the neighbor | |
1641 | * is in the Default Router List. | |
1642 | * See a corresponding comment in nd6_na_input(). | |
1643 | */ | |
1644 | RT_UNLOCK(rt); | |
1645 | lck_mtx_unlock(nd6_mutex); | |
1646 | rt6_flush(&in6, rt->rt_ifp); | |
1647 | lck_mtx_lock(nd6_mutex); | |
1648 | } else { | |
1649 | RT_UNLOCK(rt); | |
1650 | } | |
1651 | ||
1652 | if (dr) { | |
1653 | NDDR_REMREF(dr); | |
1654 | /* | |
1655 | * Unreachablity of a router might affect the default | |
1656 | * router selection and on-link detection of advertised | |
1657 | * prefixes. | |
1658 | */ | |
1659 | ||
1660 | /* | |
1661 | * Temporarily fake the state to choose a new default | |
1662 | * router and to perform on-link determination of | |
1663 | * prefixes correctly. | |
1664 | * Below the state will be set correctly, | |
1665 | * or the entry itself will be deleted. | |
1666 | */ | |
1667 | RT_LOCK_SPIN(rt); | |
1668 | ln->ln_state = ND6_LLINFO_INCOMPLETE; | |
1669 | ||
1670 | /* | |
1671 | * Since defrouter_select() does not affect the | |
1672 | * on-link determination and MIP6 needs the check | |
1673 | * before the default router selection, we perform | |
1674 | * the check now. | |
1675 | */ | |
1676 | RT_UNLOCK(rt); | |
1677 | pfxlist_onlink_check(); | |
1678 | ||
1679 | /* | |
1680 | * refresh default router list | |
1681 | */ | |
1682 | defrouter_select(rt->rt_ifp); | |
1683 | } | |
1684 | RT_LOCK_ASSERT_NOTHELD(rt); | |
1685 | } else { | |
1686 | RT_UNLOCK(rt); | |
1687 | } | |
1688 | ||
1689 | lck_mtx_unlock(nd6_mutex); | |
1690 | /* | |
1691 | * Detach the route from the routing tree and the list of neighbor | |
1692 | * caches, and disable the route entry not to be used in already | |
1693 | * cached routes. | |
1694 | */ | |
1695 | (void) rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, | |
1696 | rt_mask(rt), 0, (struct rtentry **)0); | |
1697 | ||
1698 | /* Extra ref held above; now free it */ | |
1699 | rtfree(rt); | |
1700 | } | |
1701 | ||
1702 | /* | |
1703 | * Upper-layer reachability hint for Neighbor Unreachability Detection. | |
1704 | * | |
1705 | * XXX cost-effective methods? | |
1706 | */ | |
1707 | void | |
1708 | nd6_nud_hint( | |
1709 | struct rtentry *rt, | |
1710 | struct in6_addr *dst6, | |
1711 | int force) | |
1712 | { | |
1713 | struct llinfo_nd6 *ln; | |
1714 | struct timeval timenow; | |
1715 | ||
1716 | getmicrotime(&timenow); | |
1717 | ||
1718 | /* | |
1719 | * If the caller specified "rt", use that. Otherwise, resolve the | |
1720 | * routing table by supplied "dst6". | |
1721 | */ | |
1722 | if (!rt) { | |
1723 | if (!dst6) | |
1724 | return; | |
1725 | /* Callee returns a locked route upon success */ | |
1726 | if ((rt = nd6_lookup(dst6, 0, NULL, 0)) == NULL) | |
1727 | return; | |
1728 | RT_LOCK_ASSERT_HELD(rt); | |
1729 | } else { | |
1730 | RT_LOCK(rt); | |
1731 | RT_ADDREF_LOCKED(rt); | |
1732 | } | |
1733 | ||
1734 | if ((rt->rt_flags & RTF_GATEWAY) != 0 || | |
1735 | (rt->rt_flags & RTF_LLINFO) == 0 || | |
1736 | !rt->rt_llinfo || !rt->rt_gateway || | |
1737 | rt->rt_gateway->sa_family != AF_LINK) { | |
1738 | /* This is not a host route. */ | |
1739 | goto done; | |
1740 | } | |
1741 | ||
1742 | ln = rt->rt_llinfo; | |
1743 | if (ln->ln_state < ND6_LLINFO_REACHABLE) | |
1744 | goto done; | |
1745 | ||
1746 | /* | |
1747 | * if we get upper-layer reachability confirmation many times, | |
1748 | * it is possible we have false information. | |
1749 | */ | |
1750 | if (!force) { | |
1751 | ln->ln_byhint++; | |
1752 | if (ln->ln_byhint > nd6_maxnudhint) | |
1753 | goto done; | |
1754 | } | |
1755 | ||
1756 | ln->ln_state = ND6_LLINFO_REACHABLE; | |
1757 | if (ln->ln_expire) { | |
1758 | struct nd_ifinfo *ndi; | |
1759 | ||
1760 | lck_rw_lock_shared(nd_if_rwlock); | |
1761 | ndi = ND_IFINFO(rt->rt_ifp); | |
1762 | VERIFY(ndi != NULL && ndi->initialized); | |
1763 | lck_mtx_lock(&ndi->lock); | |
1764 | ln->ln_expire = timenow.tv_sec + ndi->reachable; | |
1765 | lck_mtx_unlock(&ndi->lock); | |
1766 | lck_rw_done(nd_if_rwlock); | |
1767 | } | |
1768 | done: | |
1769 | RT_REMREF_LOCKED(rt); | |
1770 | RT_UNLOCK(rt); | |
1771 | } | |
1772 | ||
1773 | void | |
1774 | nd6_rtrequest( | |
1775 | int req, | |
1776 | struct rtentry *rt, | |
1777 | __unused struct sockaddr *sa) | |
1778 | { | |
1779 | struct sockaddr *gate = rt->rt_gateway; | |
1780 | struct llinfo_nd6 *ln = rt->rt_llinfo; | |
1781 | static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK, 0, 0, 0, 0, 0, | |
1782 | {0,0,0,0,0,0,0,0,0,0,0,0,} }; | |
1783 | struct ifnet *ifp = rt->rt_ifp; | |
1784 | struct ifaddr *ifa; | |
1785 | struct timeval timenow; | |
1786 | ||
1787 | lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); | |
1788 | RT_LOCK_ASSERT_HELD(rt); | |
1789 | ||
1790 | if ((rt->rt_flags & RTF_GATEWAY)) | |
1791 | return; | |
1792 | ||
1793 | if (nd6_need_cache(ifp) == 0 && (rt->rt_flags & RTF_HOST) == 0) { | |
1794 | /* | |
1795 | * This is probably an interface direct route for a link | |
1796 | * which does not need neighbor caches (e.g. fe80::%lo0/64). | |
1797 | * We do not need special treatment below for such a route. | |
1798 | * Moreover, the RTF_LLINFO flag which would be set below | |
1799 | * would annoy the ndp(8) command. | |
1800 | */ | |
1801 | return; | |
1802 | } | |
1803 | ||
1804 | if (req == RTM_RESOLVE) { | |
1805 | int no_nd_cache; | |
1806 | ||
1807 | if (!nd6_need_cache(ifp)) { /* stf case */ | |
1808 | no_nd_cache = 1; | |
1809 | } else { | |
1810 | struct sockaddr_in6 sin6; | |
1811 | ||
1812 | rtkey_to_sa6(rt, &sin6); | |
1813 | /* | |
1814 | * nd6_is_addr_neighbor() may call nd6_lookup(), | |
1815 | * therefore we drop rt_lock to avoid deadlock | |
1816 | * during the lookup. | |
1817 | */ | |
1818 | RT_ADDREF_LOCKED(rt); | |
1819 | RT_UNLOCK(rt); | |
1820 | no_nd_cache = !nd6_is_addr_neighbor(&sin6, ifp, 1); | |
1821 | RT_LOCK(rt); | |
1822 | RT_REMREF_LOCKED(rt); | |
1823 | } | |
1824 | ||
1825 | /* | |
1826 | * FreeBSD and BSD/OS often make a cloned host route based | |
1827 | * on a less-specific route (e.g. the default route). | |
1828 | * If the less specific route does not have a "gateway" | |
1829 | * (this is the case when the route just goes to a p2p or an | |
1830 | * stf interface), we'll mistakenly make a neighbor cache for | |
1831 | * the host route, and will see strange neighbor solicitation | |
1832 | * for the corresponding destination. In order to avoid the | |
1833 | * confusion, we check if the destination of the route is | |
1834 | * a neighbor in terms of neighbor discovery, and stop the | |
1835 | * process if not. Additionally, we remove the LLINFO flag | |
1836 | * so that ndp(8) will not try to get the neighbor information | |
1837 | * of the destination. | |
1838 | */ | |
1839 | if (no_nd_cache) { | |
1840 | rt->rt_flags &= ~RTF_LLINFO; | |
1841 | return; | |
1842 | } | |
1843 | } | |
1844 | ||
1845 | getmicrotime(&timenow); | |
1846 | switch (req) { | |
1847 | case RTM_ADD: | |
1848 | /* | |
1849 | * There is no backward compatibility :) | |
1850 | * | |
1851 | * if ((rt->rt_flags & RTF_HOST) == 0 && | |
1852 | * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) | |
1853 | * rt->rt_flags |= RTF_CLONING; | |
1854 | */ | |
1855 | if ((rt->rt_flags & RTF_CLONING) || | |
1856 | ((rt->rt_flags & RTF_LLINFO) && ln == NULL)) { | |
1857 | /* | |
1858 | * Case 1: This route should come from a route to | |
1859 | * interface (RTF_CLONING case) or the route should be | |
1860 | * treated as on-link but is currently not | |
1861 | * (RTF_LLINFO && ln == NULL case). | |
1862 | */ | |
1863 | if (rt_setgate(rt, rt_key(rt), | |
1864 | (struct sockaddr *)&null_sdl) == 0) { | |
1865 | gate = rt->rt_gateway; | |
1866 | SDL(gate)->sdl_type = ifp->if_type; | |
1867 | SDL(gate)->sdl_index = ifp->if_index; | |
1868 | /* | |
1869 | * In case we're called before 1.0 sec. | |
1870 | * has elapsed. | |
1871 | */ | |
1872 | if (ln != NULL) | |
1873 | ln->ln_expire = MAX(timenow.tv_sec, 1); | |
1874 | } | |
1875 | if ((rt->rt_flags & RTF_CLONING)) | |
1876 | break; | |
1877 | } | |
1878 | /* | |
1879 | * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. | |
1880 | * We don't do that here since llinfo is not ready yet. | |
1881 | * | |
1882 | * There are also couple of other things to be discussed: | |
1883 | * - unsolicited NA code needs improvement beforehand | |
1884 | * - RFC2461 says we MAY send multicast unsolicited NA | |
1885 | * (7.2.6 paragraph 4), however, it also says that we | |
1886 | * SHOULD provide a mechanism to prevent multicast NA storm. | |
1887 | * we don't have anything like it right now. | |
1888 | * note that the mechanism needs a mutual agreement | |
1889 | * between proxies, which means that we need to implement | |
1890 | * a new protocol, or a new kludge. | |
1891 | * - from RFC2461 6.2.4, host MUST NOT send an unsolicited NA. | |
1892 | * we need to check ip6forwarding before sending it. | |
1893 | * (or should we allow proxy ND configuration only for | |
1894 | * routers? there's no mention about proxy ND from hosts) | |
1895 | */ | |
1896 | /* FALLTHROUGH */ | |
1897 | case RTM_RESOLVE: | |
1898 | if ((ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) == 0) { | |
1899 | /* | |
1900 | * Address resolution isn't necessary for a point to | |
1901 | * point link, so we can skip this test for a p2p link. | |
1902 | */ | |
1903 | if (gate->sa_family != AF_LINK || | |
1904 | gate->sa_len < sizeof(null_sdl)) { | |
1905 | /* Don't complain in case of RTM_ADD */ | |
1906 | if (req == RTM_RESOLVE) { | |
1907 | log(LOG_DEBUG, | |
1908 | "nd6_rtrequest: bad gateway " | |
1909 | "value: %s\n", if_name(ifp)); | |
1910 | } | |
1911 | break; | |
1912 | } | |
1913 | SDL(gate)->sdl_type = ifp->if_type; | |
1914 | SDL(gate)->sdl_index = ifp->if_index; | |
1915 | } | |
1916 | if (ln != NULL) | |
1917 | break; /* This happens on a route change */ | |
1918 | /* | |
1919 | * Case 2: This route may come from cloning, or a manual route | |
1920 | * add with a LL address. | |
1921 | */ | |
1922 | rt->rt_llinfo = ln = nd6_llinfo_alloc(); | |
1923 | if (ln == NULL) { | |
1924 | log(LOG_DEBUG, "nd6_rtrequest: malloc failed\n"); | |
1925 | break; | |
1926 | } | |
1927 | rt->rt_llinfo_get_ri = nd6_llinfo_get_ri; | |
1928 | rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri; | |
1929 | rt->rt_llinfo_purge = nd6_llinfo_purge; | |
1930 | rt->rt_llinfo_free = nd6_llinfo_free; | |
1931 | ||
1932 | nd6_inuse++; | |
1933 | nd6_allocated++; | |
1934 | Bzero(ln, sizeof(*ln)); | |
1935 | ln->ln_rt = rt; | |
1936 | /* this is required for "ndp" command. - shin */ | |
1937 | if (req == RTM_ADD) { | |
1938 | /* | |
1939 | * gate should have some valid AF_LINK entry, | |
1940 | * and ln->ln_expire should have some lifetime | |
1941 | * which is specified by ndp command. | |
1942 | */ | |
1943 | ln->ln_state = ND6_LLINFO_REACHABLE; | |
1944 | ln->ln_byhint = 0; | |
1945 | } else { | |
1946 | /* | |
1947 | * When req == RTM_RESOLVE, rt is created and | |
1948 | * initialized in rtrequest(), so rt_expire is 0. | |
1949 | */ | |
1950 | ln->ln_state = ND6_LLINFO_NOSTATE; | |
1951 | /* In case we're called before 1.0 sec. has elapsed */ | |
1952 | ln->ln_expire = MAX(timenow.tv_sec, 1); | |
1953 | } | |
1954 | rt->rt_flags |= RTF_LLINFO; | |
1955 | LN_INSERTHEAD(ln); | |
1956 | ||
1957 | /* | |
1958 | * If we have too many cache entries, initiate immediate | |
1959 | * purging for some "less recently used" entries. Note that | |
1960 | * we cannot directly call nd6_free() here because it would | |
1961 | * cause re-entering rtable related routines triggering an LOR | |
1962 | * problem. | |
1963 | */ | |
1964 | if (ip6_neighborgcthresh >= 0 && | |
1965 | nd6_inuse >= ip6_neighborgcthresh) { | |
1966 | int i; | |
1967 | ||
1968 | for (i = 0; i < 10 && llinfo_nd6.ln_prev != ln; i++) { | |
1969 | struct llinfo_nd6 *ln_end = llinfo_nd6.ln_prev; | |
1970 | struct rtentry *rt_end = ln_end->ln_rt; | |
1971 | ||
1972 | /* Move this entry to the head */ | |
1973 | RT_LOCK(rt_end); | |
1974 | LN_DEQUEUE(ln_end); | |
1975 | LN_INSERTHEAD(ln_end); | |
1976 | ||
1977 | if (ln_end->ln_expire == 0) { | |
1978 | RT_UNLOCK(rt_end); | |
1979 | continue; | |
1980 | } | |
1981 | if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE) | |
1982 | ln_end->ln_state = ND6_LLINFO_STALE; | |
1983 | else | |
1984 | ln_end->ln_state = ND6_LLINFO_PURGE; | |
1985 | ln_end->ln_expire = timenow.tv_sec; | |
1986 | RT_UNLOCK(rt_end); | |
1987 | } | |
1988 | } | |
1989 | ||
1990 | /* | |
1991 | * check if rt_key(rt) is one of my address assigned | |
1992 | * to the interface. | |
1993 | */ | |
1994 | ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp, | |
1995 | &SIN6(rt_key(rt))->sin6_addr); | |
1996 | if (ifa) { | |
1997 | caddr_t macp = nd6_ifptomac(ifp); | |
1998 | ln->ln_expire = 0; | |
1999 | ln->ln_state = ND6_LLINFO_REACHABLE; | |
2000 | ln->ln_byhint = 0; | |
2001 | if (macp) { | |
2002 | Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen); | |
2003 | SDL(gate)->sdl_alen = ifp->if_addrlen; | |
2004 | } | |
2005 | if (nd6_useloopback) { | |
2006 | if (rt->rt_ifp != lo_ifp) { | |
2007 | /* | |
2008 | * Purge any link-layer info caching. | |
2009 | */ | |
2010 | if (rt->rt_llinfo_purge != NULL) | |
2011 | rt->rt_llinfo_purge(rt); | |
2012 | ||
2013 | /* | |
2014 | * Adjust route ref count for the | |
2015 | * interfaces. | |
2016 | */ | |
2017 | if (rt->rt_if_ref_fn != NULL) { | |
2018 | rt->rt_if_ref_fn(lo_ifp, 1); | |
2019 | rt->rt_if_ref_fn(rt->rt_ifp, -1); | |
2020 | } | |
2021 | } | |
2022 | rt->rt_ifp = lo_ifp; /* XXX */ | |
2023 | /* | |
2024 | * Make sure rt_ifa be equal to the ifaddr | |
2025 | * corresponding to the address. | |
2026 | * We need this because when we refer | |
2027 | * rt_ifa->ia6_flags in ip6_input, we assume | |
2028 | * that the rt_ifa points to the address instead | |
2029 | * of the loopback address. | |
2030 | */ | |
2031 | if (ifa != rt->rt_ifa) { | |
2032 | rtsetifa(rt, ifa); | |
2033 | } | |
2034 | } | |
2035 | IFA_REMREF(ifa); | |
2036 | } else if (rt->rt_flags & RTF_ANNOUNCE) { | |
2037 | ln->ln_expire = 0; | |
2038 | ln->ln_state = ND6_LLINFO_REACHABLE; | |
2039 | ln->ln_byhint = 0; | |
2040 | ||
2041 | /* join solicited node multicast for proxy ND */ | |
2042 | if (ifp->if_flags & IFF_MULTICAST) { | |
2043 | struct in6_addr llsol; | |
2044 | struct in6_multi *in6m; | |
2045 | int error; | |
2046 | ||
2047 | llsol = SIN6(rt_key(rt))->sin6_addr; | |
2048 | llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL; | |
2049 | llsol.s6_addr32[1] = 0; | |
2050 | llsol.s6_addr32[2] = htonl(1); | |
2051 | llsol.s6_addr8[12] = 0xff; | |
2052 | if (in6_setscope(&llsol, ifp, NULL)) | |
2053 | break; | |
2054 | error = in6_mc_join(ifp, &llsol, NULL, &in6m, 0); | |
2055 | if (error) { | |
2056 | nd6log((LOG_ERR, "%s: failed to join " | |
2057 | "%s (errno=%d)\n", if_name(ifp), | |
2058 | ip6_sprintf(&llsol), error)); | |
2059 | } else { | |
2060 | IN6M_REMREF(in6m); | |
2061 | } | |
2062 | } | |
2063 | } | |
2064 | break; | |
2065 | ||
2066 | case RTM_DELETE: | |
2067 | if (ln == NULL) | |
2068 | break; | |
2069 | /* leave from solicited node multicast for proxy ND */ | |
2070 | if ((rt->rt_flags & RTF_ANNOUNCE) != 0 && | |
2071 | (ifp->if_flags & IFF_MULTICAST) != 0) { | |
2072 | struct in6_addr llsol; | |
2073 | struct in6_multi *in6m; | |
2074 | ||
2075 | llsol = SIN6(rt_key(rt))->sin6_addr; | |
2076 | llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL; | |
2077 | llsol.s6_addr32[1] = 0; | |
2078 | llsol.s6_addr32[2] = htonl(1); | |
2079 | llsol.s6_addr8[12] = 0xff; | |
2080 | if (in6_setscope(&llsol, ifp, NULL) == 0) { | |
2081 | in6_multihead_lock_shared(); | |
2082 | IN6_LOOKUP_MULTI(&llsol, ifp, in6m); | |
2083 | in6_multihead_lock_done(); | |
2084 | if (in6m != NULL) { | |
2085 | in6_mc_leave(in6m, NULL); | |
2086 | IN6M_REMREF(in6m); | |
2087 | } | |
2088 | } | |
2089 | } | |
2090 | nd6_inuse--; | |
2091 | /* | |
2092 | * Unchain it but defer the actual freeing until the route | |
2093 | * itself is to be freed. rt->rt_llinfo still points to | |
2094 | * llinfo_nd6, and likewise, ln->ln_rt stil points to this | |
2095 | * route entry, except that RTF_LLINFO is now cleared. | |
2096 | */ | |
2097 | if (ln->ln_flags & ND6_LNF_IN_USE) | |
2098 | LN_DEQUEUE(ln); | |
2099 | ||
2100 | /* | |
2101 | * Purge any link-layer info caching. | |
2102 | */ | |
2103 | if (rt->rt_llinfo_purge != NULL) | |
2104 | rt->rt_llinfo_purge(rt); | |
2105 | ||
2106 | rt->rt_flags &= ~RTF_LLINFO; | |
2107 | if (ln->ln_hold != NULL) { | |
2108 | m_freem(ln->ln_hold); | |
2109 | ln->ln_hold = NULL; | |
2110 | } | |
2111 | } | |
2112 | } | |
2113 | ||
2114 | static int | |
2115 | nd6_siocgdrlst(void *data, int data_is_64) | |
2116 | { | |
2117 | struct in6_drlist_32 *drl_32; | |
2118 | struct nd_defrouter *dr; | |
2119 | int i = 0; | |
2120 | ||
2121 | lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED); | |
2122 | ||
2123 | dr = TAILQ_FIRST(&nd_defrouter); | |
2124 | ||
2125 | /* For 64-bit process */ | |
2126 | if (data_is_64) { | |
2127 | struct in6_drlist_64 *drl_64; | |
2128 | ||
2129 | drl_64 = _MALLOC(sizeof (*drl_64), M_TEMP, M_WAITOK|M_ZERO); | |
2130 | if (drl_64 == NULL) | |
2131 | return (ENOMEM); | |
2132 | ||
2133 | /* preserve the interface name */ | |
2134 | bcopy(data, drl_64, sizeof (drl_64->ifname)); | |
2135 | ||
2136 | while (dr && i < DRLSTSIZ) { | |
2137 | drl_64->defrouter[i].rtaddr = dr->rtaddr; | |
2138 | if (IN6_IS_ADDR_LINKLOCAL(&drl_64->defrouter[i].rtaddr)) { | |
2139 | /* XXX: need to this hack for KAME stack */ | |
2140 | drl_64->defrouter[i].rtaddr.s6_addr16[1] = 0; | |
2141 | } else { | |
2142 | log(LOG_ERR, | |
2143 | "default router list contains a " | |
2144 | "non-linklocal address(%s)\n", | |
2145 | ip6_sprintf(&drl_64->defrouter[i].rtaddr)); | |
2146 | } | |
2147 | drl_64->defrouter[i].flags = dr->flags; | |
2148 | drl_64->defrouter[i].rtlifetime = dr->rtlifetime; | |
2149 | drl_64->defrouter[i].expire = dr->expire; | |
2150 | drl_64->defrouter[i].if_index = dr->ifp->if_index; | |
2151 | i++; | |
2152 | dr = TAILQ_NEXT(dr, dr_entry); | |
2153 | } | |
2154 | bcopy(drl_64, data, sizeof (*drl_64)); | |
2155 | _FREE(drl_64, M_TEMP); | |
2156 | return (0); | |
2157 | } | |
2158 | ||
2159 | /* For 32-bit process */ | |
2160 | drl_32 = _MALLOC(sizeof (*drl_32), M_TEMP, M_WAITOK|M_ZERO); | |
2161 | if (drl_32 == NULL) | |
2162 | return (ENOMEM); | |
2163 | ||
2164 | /* preserve the interface name */ | |
2165 | bcopy(data, drl_32, sizeof (drl_32->ifname)); | |
2166 | ||
2167 | while (dr && i < DRLSTSIZ) { | |
2168 | drl_32->defrouter[i].rtaddr = dr->rtaddr; | |
2169 | if (IN6_IS_ADDR_LINKLOCAL(&drl_32->defrouter[i].rtaddr)) { | |
2170 | /* XXX: need to this hack for KAME stack */ | |
2171 | drl_32->defrouter[i].rtaddr.s6_addr16[1] = 0; | |
2172 | } else { | |
2173 | log(LOG_ERR, | |
2174 | "default router list contains a " | |
2175 | "non-linklocal address(%s)\n", | |
2176 | ip6_sprintf(&drl_32->defrouter[i].rtaddr)); | |
2177 | } | |
2178 | drl_32->defrouter[i].flags = dr->flags; | |
2179 | drl_32->defrouter[i].rtlifetime = dr->rtlifetime; | |
2180 | drl_32->defrouter[i].expire = dr->expire; | |
2181 | drl_32->defrouter[i].if_index = dr->ifp->if_index; | |
2182 | i++; | |
2183 | dr = TAILQ_NEXT(dr, dr_entry); | |
2184 | } | |
2185 | bcopy(drl_32, data, sizeof (*drl_32)); | |
2186 | _FREE(drl_32, M_TEMP); | |
2187 | return (0); | |
2188 | } | |
2189 | ||
2190 | /* | |
2191 | * XXX meaning of fields, especialy "raflags", is very | |
2192 | * differnet between RA prefix list and RR/static prefix list. | |
2193 | * how about separating ioctls into two? | |
2194 | */ | |
2195 | static int | |
2196 | nd6_siocgprlst(void *data, int data_is_64) | |
2197 | { | |
2198 | struct in6_prlist_32 *prl_32; | |
2199 | struct nd_prefix *pr; | |
2200 | int i = 0; | |
2201 | ||
2202 | lck_mtx_assert(nd6_mutex, LCK_MTX_ASSERT_OWNED); | |
2203 | ||
2204 | pr = nd_prefix.lh_first; | |
2205 | ||
2206 | /* For 64-bit process */ | |
2207 | if (data_is_64) { | |
2208 | struct in6_prlist_64 *prl_64; | |
2209 | ||
2210 | prl_64 = _MALLOC(sizeof (*prl_64), M_TEMP, M_WAITOK|M_ZERO); | |
2211 | if (prl_64 == NULL) | |
2212 | return (ENOMEM); | |
2213 | ||
2214 | /* preserve the interface name */ | |
2215 | bcopy(data, prl_64, sizeof (prl_64->ifname)); | |
2216 | ||
2217 | while (pr && i < PRLSTSIZ) { | |
2218 | struct nd_pfxrouter *pfr; | |
2219 | int j; | |
2220 | ||
2221 | NDPR_LOCK(pr); | |
2222 | (void) in6_embedscope(&prl_64->prefix[i].prefix, | |
2223 | &pr->ndpr_prefix, NULL, NULL, NULL); | |
2224 | prl_64->prefix[i].raflags = pr->ndpr_raf; | |
2225 | prl_64->prefix[i].prefixlen = pr->ndpr_plen; | |
2226 | prl_64->prefix[i].vltime = pr->ndpr_vltime; | |
2227 | prl_64->prefix[i].pltime = pr->ndpr_pltime; | |
2228 | prl_64->prefix[i].if_index = pr->ndpr_ifp->if_index; | |
2229 | prl_64->prefix[i].expire = pr->ndpr_expire; | |
2230 | ||
2231 | pfr = pr->ndpr_advrtrs.lh_first; | |
2232 | j = 0; | |
2233 | while (pfr) { | |
2234 | if (j < DRLSTSIZ) { | |
2235 | #define RTRADDR prl_64->prefix[i].advrtr[j] | |
2236 | RTRADDR = pfr->router->rtaddr; | |
2237 | if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) { | |
2238 | /* XXX: hack for KAME */ | |
2239 | RTRADDR.s6_addr16[1] = 0; | |
2240 | } else { | |
2241 | log(LOG_ERR, | |
2242 | "a router(%s) advertises " | |
2243 | "a prefix with " | |
2244 | "non-link local address\n", | |
2245 | ip6_sprintf(&RTRADDR)); | |
2246 | } | |
2247 | #undef RTRADDR | |
2248 | } | |
2249 | j++; | |
2250 | pfr = pfr->pfr_next; | |
2251 | } | |
2252 | prl_64->prefix[i].advrtrs = j; | |
2253 | prl_64->prefix[i].origin = PR_ORIG_RA; | |
2254 | NDPR_UNLOCK(pr); | |
2255 | ||
2256 | i++; | |
2257 | pr = pr->ndpr_next; | |
2258 | } | |
2259 | bcopy(prl_64, data, sizeof (*prl_64)); | |
2260 | _FREE(prl_64, M_TEMP); | |
2261 | return (0); | |
2262 | } | |
2263 | ||
2264 | /* For 32-bit process */ | |
2265 | prl_32 = _MALLOC(sizeof (*prl_32), M_TEMP, M_WAITOK|M_ZERO); | |
2266 | if (prl_32 == NULL) | |
2267 | return (ENOMEM); | |
2268 | ||
2269 | /* preserve the interface name */ | |
2270 | bcopy(data, prl_32, sizeof (prl_32->ifname)); | |
2271 | ||
2272 | while (pr && i < PRLSTSIZ) { | |
2273 | struct nd_pfxrouter *pfr; | |
2274 | int j; | |
2275 | ||
2276 | NDPR_LOCK(pr); | |
2277 | (void) in6_embedscope(&prl_32->prefix[i].prefix, | |
2278 | &pr->ndpr_prefix, NULL, NULL, NULL); | |
2279 | prl_32->prefix[i].raflags = pr->ndpr_raf; | |
2280 | prl_32->prefix[i].prefixlen = pr->ndpr_plen; | |
2281 | prl_32->prefix[i].vltime = pr->ndpr_vltime; | |
2282 | prl_32->prefix[i].pltime = pr->ndpr_pltime; | |
2283 | prl_32->prefix[i].if_index = pr->ndpr_ifp->if_index; | |
2284 | prl_32->prefix[i].expire = pr->ndpr_expire; | |
2285 | ||
2286 | pfr = pr->ndpr_advrtrs.lh_first; | |
2287 | j = 0; | |
2288 | while (pfr) { | |
2289 | if (j < DRLSTSIZ) { | |
2290 | #define RTRADDR prl_32->prefix[i].advrtr[j] | |
2291 | RTRADDR = pfr->router->rtaddr; | |
2292 | if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) { | |
2293 | /* XXX: hack for KAME */ | |
2294 | RTRADDR.s6_addr16[1] = 0; | |
2295 | } else { | |
2296 | log(LOG_ERR, | |
2297 | "a router(%s) advertises " | |
2298 | "a prefix with " | |
2299 | "non-link local address\n", | |
2300 | ip6_sprintf(&RTRADDR)); | |
2301 | } | |
2302 | #undef RTRADDR | |
2303 | } | |
2304 | j++; | |
2305 | pfr = pfr->pfr_next; | |
2306 | } | |
2307 | prl_32->prefix[i].advrtrs = j; | |
2308 | prl_32->prefix[i].origin = PR_ORIG_RA; | |
2309 | NDPR_UNLOCK(pr); | |
2310 | ||
2311 | i++; | |
2312 | pr = pr->ndpr_next; | |
2313 | } | |
2314 | bcopy(prl_32, data, sizeof (*prl_32)); | |
2315 | _FREE(prl_32, M_TEMP); | |
2316 | return (0); | |
2317 | } | |
2318 | ||
2319 | int | |
2320 | nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) | |
2321 | { | |
2322 | struct nd_defrouter *dr; | |
2323 | struct nd_prefix *pr; | |
2324 | struct rtentry *rt; | |
2325 | int i = ifp->if_index, error = 0; | |
2326 | ||
2327 | switch (cmd) { | |
2328 | case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */ | |
2329 | case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */ | |
2330 | /* | |
2331 | * obsolete API, use sysctl under net.inet6.icmp6 | |
2332 | */ | |
2333 | lck_mtx_lock(nd6_mutex); | |
2334 | error = nd6_siocgdrlst(data, cmd == SIOCGDRLST_IN6_64); | |
2335 | lck_mtx_unlock(nd6_mutex); | |
2336 | break; | |
2337 | ||
2338 | case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */ | |
2339 | case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */ | |
2340 | /* | |
2341 | * obsolete API, use sysctl under net.inet6.icmp6 | |
2342 | */ | |
2343 | lck_mtx_lock(nd6_mutex); | |
2344 | error = nd6_siocgprlst(data, cmd == SIOCGPRLST_IN6_64); | |
2345 | lck_mtx_unlock(nd6_mutex); | |
2346 | break; | |
2347 | ||
2348 | case OSIOCGIFINFO_IN6: /* struct in6_ondireq */ | |
2349 | case SIOCGIFINFO_IN6: { /* struct in6_ondireq */ | |
2350 | u_int32_t linkmtu; | |
2351 | struct in6_ondireq *ondi = (struct in6_ondireq *)(void *)data; | |
2352 | struct nd_ifinfo *ndi; | |
2353 | /* | |
2354 | * SIOCGIFINFO_IN6 ioctl is encoded with in6_ondireq | |
2355 | * instead of in6_ndireq, so we treat it as such. | |
2356 | */ | |
2357 | lck_rw_lock_shared(nd_if_rwlock); | |
2358 | ndi = ND_IFINFO(ifp); | |
2359 | if (!nd_ifinfo || i >= nd_ifinfo_indexlim || | |
2360 | !ndi->initialized) { | |
2361 | lck_rw_done(nd_if_rwlock); | |
2362 | error = EINVAL; | |
2363 | break; | |
2364 | } | |
2365 | lck_mtx_lock(&ndi->lock); | |
2366 | linkmtu = IN6_LINKMTU(ifp); | |
2367 | bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof (linkmtu)); | |
2368 | bcopy(&nd_ifinfo[i].maxmtu, &ondi->ndi.maxmtu, | |
2369 | sizeof (u_int32_t)); | |
2370 | bcopy(&nd_ifinfo[i].basereachable, &ondi->ndi.basereachable, | |
2371 | sizeof (u_int32_t)); | |
2372 | bcopy(&nd_ifinfo[i].reachable, &ondi->ndi.reachable, | |
2373 | sizeof (u_int32_t)); | |
2374 | bcopy(&nd_ifinfo[i].retrans, &ondi->ndi.retrans, | |
2375 | sizeof (u_int32_t)); | |
2376 | bcopy(&nd_ifinfo[i].flags, &ondi->ndi.flags, | |
2377 | sizeof (u_int32_t)); | |
2378 | bcopy(&nd_ifinfo[i].recalctm, &ondi->ndi.recalctm, | |
2379 | sizeof (int)); | |
2380 | ondi->ndi.chlim = nd_ifinfo[i].chlim; | |
2381 | ondi->ndi.receivedra = 0; | |
2382 | lck_mtx_unlock(&ndi->lock); | |
2383 | lck_rw_done(nd_if_rwlock); | |
2384 | break; | |
2385 | } | |
2386 | ||
2387 | case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */ | |
2388 | struct in6_ndireq *cndi = (struct in6_ndireq *)(void *)data; | |
2389 | u_int32_t oflags, flags; | |
2390 | struct nd_ifinfo *ndi; | |
2391 | ||
2392 | /* XXX: almost all other fields of cndi->ndi is unused */ | |
2393 | lck_rw_lock_shared(nd_if_rwlock); | |
2394 | ndi = ND_IFINFO(ifp); | |
2395 | if (!nd_ifinfo || i >= nd_ifinfo_indexlim || | |
2396 | !ndi->initialized) { | |
2397 | lck_rw_done(nd_if_rwlock); | |
2398 | error = EINVAL; | |
2399 | break; | |
2400 | } | |
2401 | lck_mtx_lock(&ndi->lock); | |
2402 | oflags = nd_ifinfo[i].flags; | |
2403 | bcopy(&cndi->ndi.flags, &nd_ifinfo[i].flags, sizeof (flags)); | |
2404 | flags = nd_ifinfo[i].flags; | |
2405 | lck_mtx_unlock(&ndi->lock); | |
2406 | lck_rw_done(nd_if_rwlock); | |
2407 | ||
2408 | if (oflags == flags) | |
2409 | break; | |
2410 | ||
2411 | error = nd6_setifinfo(ifp, oflags, flags); | |
2412 | break; | |
2413 | } | |
2414 | ||
2415 | case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */ | |
2416 | /* flush default router list */ | |
2417 | /* | |
2418 | * xxx sumikawa: should not delete route if default | |
2419 | * route equals to the top of default router list | |
2420 | */ | |
2421 | lck_mtx_lock(nd6_mutex); | |
2422 | defrouter_reset(); | |
2423 | defrouter_select(ifp); | |
2424 | lck_mtx_unlock(nd6_mutex); | |
2425 | /* xxx sumikawa: flush prefix list */ | |
2426 | break; | |
2427 | ||
2428 | case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */ | |
2429 | /* flush all the prefix advertised by routers */ | |
2430 | struct nd_prefix *next; | |
2431 | ||
2432 | lck_mtx_lock(nd6_mutex); | |
2433 | for (pr = nd_prefix.lh_first; pr; pr = next) { | |
2434 | struct in6_ifaddr *ia; | |
2435 | ||
2436 | next = pr->ndpr_next; | |
2437 | ||
2438 | NDPR_LOCK(pr); | |
2439 | if (IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_prefix.sin6_addr)) { | |
2440 | NDPR_UNLOCK(pr); | |
2441 | continue; /* XXX */ | |
2442 | } | |
2443 | if (ifp != lo_ifp && pr->ndpr_ifp != ifp) { | |
2444 | NDPR_UNLOCK(pr); | |
2445 | continue; | |
2446 | } | |
2447 | /* do we really have to remove addresses as well? */ | |
2448 | NDPR_ADDREF_LOCKED(pr); | |
2449 | NDPR_UNLOCK(pr); | |
2450 | lck_rw_lock_exclusive(&in6_ifaddr_rwlock); | |
2451 | ia = in6_ifaddrs; | |
2452 | while (ia != NULL) { | |
2453 | IFA_LOCK(&ia->ia_ifa); | |
2454 | if ((ia->ia6_flags & IN6_IFF_AUTOCONF) == 0) { | |
2455 | IFA_UNLOCK(&ia->ia_ifa); | |
2456 | ia = ia->ia_next; | |
2457 | continue; | |
2458 | } | |
2459 | ||
2460 | if (ia->ia6_ndpr == pr) { | |
2461 | IFA_ADDREF_LOCKED(&ia->ia_ifa); | |
2462 | IFA_UNLOCK(&ia->ia_ifa); | |
2463 | lck_rw_done(&in6_ifaddr_rwlock); | |
2464 | lck_mtx_unlock(nd6_mutex); | |
2465 | in6_purgeaddr(&ia->ia_ifa); | |
2466 | IFA_REMREF(&ia->ia_ifa); | |
2467 | lck_mtx_lock(nd6_mutex); | |
2468 | lck_rw_lock_exclusive(&in6_ifaddr_rwlock); | |
2469 | /* | |
2470 | * Purging the address caused | |
2471 | * in6_ifaddr_rwlock to be | |
2472 | * dropped and | |
2473 | * reacquired; therefore search again | |
2474 | * from the beginning of in6_ifaddrs. | |
2475 | * The same applies for the prefix list. | |
2476 | */ | |
2477 | ia = in6_ifaddrs; | |
2478 | next = nd_prefix.lh_first; | |
2479 | continue; | |
2480 | ||
2481 | } | |
2482 | IFA_UNLOCK(&ia->ia_ifa); | |
2483 | ia = ia->ia_next; | |
2484 | } | |
2485 | lck_rw_done(&in6_ifaddr_rwlock); | |
2486 | NDPR_LOCK(pr); | |
2487 | prelist_remove(pr); | |
2488 | NDPR_UNLOCK(pr); | |
2489 | /* | |
2490 | * If we were trying to restart this loop | |
2491 | * above by changing the value of 'next', we might | |
2492 | * end up freeing the only element on the list | |
2493 | * when we call NDPR_REMREF(). | |
2494 | * When this happens, we also have get out of this | |
2495 | * loop because we have nothing else to do. | |
2496 | */ | |
2497 | if (pr == next) | |
2498 | next = NULL; | |
2499 | NDPR_REMREF(pr); | |
2500 | } | |
2501 | lck_mtx_unlock(nd6_mutex); | |
2502 | break; | |
2503 | } | |
2504 | ||
2505 | case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */ | |
2506 | /* flush all the default routers */ | |
2507 | struct nd_defrouter *next; | |
2508 | ||
2509 | lck_mtx_lock(nd6_mutex); | |
2510 | if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { | |
2511 | /* | |
2512 | * The first entry of the list may be stored in | |
2513 | * the routing table, so we'll delete it later. | |
2514 | */ | |
2515 | for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) { | |
2516 | next = TAILQ_NEXT(dr, dr_entry); | |
2517 | if (ifp == lo_ifp || dr->ifp == ifp) | |
2518 | defrtrlist_del(dr); | |
2519 | } | |
2520 | if (ifp == lo_ifp || | |
2521 | TAILQ_FIRST(&nd_defrouter)->ifp == ifp) | |
2522 | defrtrlist_del(TAILQ_FIRST(&nd_defrouter)); | |
2523 | } | |
2524 | lck_mtx_unlock(nd6_mutex); | |
2525 | break; | |
2526 | } | |
2527 | ||
2528 | case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */ | |
2529 | struct llinfo_nd6 *ln; | |
2530 | struct in6_nbrinfo_32 nbi_32; | |
2531 | struct in6_addr nb_addr; /* make local for safety */ | |
2532 | ||
2533 | bcopy(data, &nbi_32, sizeof (nbi_32)); | |
2534 | nb_addr = nbi_32.addr; | |
2535 | /* | |
2536 | * XXX: KAME specific hack for scoped addresses | |
2537 | * XXXX: for other scopes than link-local? | |
2538 | */ | |
2539 | if (IN6_IS_ADDR_LINKLOCAL(&nbi_32.addr) || | |
2540 | IN6_IS_ADDR_MC_LINKLOCAL(&nbi_32.addr)) { | |
2541 | u_int16_t *idp = | |
2542 | (u_int16_t *)(void *)&nb_addr.s6_addr[2]; | |
2543 | ||
2544 | if (*idp == 0) | |
2545 | *idp = htons(ifp->if_index); | |
2546 | } | |
2547 | ||
2548 | /* Callee returns a locked route upon success */ | |
2549 | if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) { | |
2550 | error = EINVAL; | |
2551 | break; | |
2552 | } | |
2553 | RT_LOCK_ASSERT_HELD(rt); | |
2554 | ln = rt->rt_llinfo; | |
2555 | nbi_32.state = ln->ln_state; | |
2556 | nbi_32.asked = ln->ln_asked; | |
2557 | nbi_32.isrouter = ln->ln_router; | |
2558 | nbi_32.expire = ln->ln_expire; | |
2559 | RT_REMREF_LOCKED(rt); | |
2560 | RT_UNLOCK(rt); | |
2561 | bcopy(&nbi_32, data, sizeof (nbi_32)); | |
2562 | break; | |
2563 | } | |
2564 | ||
2565 | case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */ | |
2566 | struct llinfo_nd6 *ln; | |
2567 | struct in6_nbrinfo_64 nbi_64; | |
2568 | struct in6_addr nb_addr; /* make local for safety */ | |
2569 | ||
2570 | bcopy(data, &nbi_64, sizeof (nbi_64)); | |
2571 | nb_addr = nbi_64.addr; | |
2572 | /* | |
2573 | * XXX: KAME specific hack for scoped addresses | |
2574 | * XXXX: for other scopes than link-local? | |
2575 | */ | |
2576 | if (IN6_IS_ADDR_LINKLOCAL(&nbi_64.addr) || | |
2577 | IN6_IS_ADDR_MC_LINKLOCAL(&nbi_64.addr)) { | |
2578 | u_int16_t *idp = | |
2579 | (u_int16_t *)(void *)&nb_addr.s6_addr[2]; | |
2580 | ||
2581 | if (*idp == 0) | |
2582 | *idp = htons(ifp->if_index); | |
2583 | } | |
2584 | ||
2585 | /* Callee returns a locked route upon success */ | |
2586 | if ((rt = nd6_lookup(&nb_addr, 0, ifp, 0)) == NULL) { | |
2587 | error = EINVAL; | |
2588 | break; | |
2589 | } | |
2590 | RT_LOCK_ASSERT_HELD(rt); | |
2591 | ln = rt->rt_llinfo; | |
2592 | nbi_64.state = ln->ln_state; | |
2593 | nbi_64.asked = ln->ln_asked; | |
2594 | nbi_64.isrouter = ln->ln_router; | |
2595 | nbi_64.expire = ln->ln_expire; | |
2596 | RT_REMREF_LOCKED(rt); | |
2597 | RT_UNLOCK(rt); | |
2598 | bcopy(&nbi_64, data, sizeof (nbi_64)); | |
2599 | break; | |
2600 | } | |
2601 | ||
2602 | case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ | |
2603 | case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */ | |
2604 | struct in6_ndifreq_64 *ndif_64 = | |
2605 | (struct in6_ndifreq_64 *)(void *)data; | |
2606 | struct in6_ndifreq_32 *ndif_32 = | |
2607 | (struct in6_ndifreq_32 *)(void *)data; | |
2608 | ||
2609 | if (cmd == SIOCGDEFIFACE_IN6_64) { | |
2610 | u_int64_t j = nd6_defifindex; | |
2611 | bcopy(&j, &ndif_64->ifindex, sizeof (j)); | |
2612 | } else { | |
2613 | bcopy(&nd6_defifindex, &ndif_32->ifindex, | |
2614 | sizeof (u_int32_t)); | |
2615 | } | |
2616 | break; | |
2617 | } | |
2618 | ||
2619 | case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ | |
2620 | case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */ | |
2621 | struct in6_ndifreq_64 *ndif_64 = | |
2622 | (struct in6_ndifreq_64 *)(void *)data; | |
2623 | struct in6_ndifreq_32 *ndif_32 = | |
2624 | (struct in6_ndifreq_32 *)(void *)data; | |
2625 | u_int32_t idx; | |
2626 | ||
2627 | if (cmd == SIOCSDEFIFACE_IN6_64) { | |
2628 | u_int64_t j; | |
2629 | bcopy(&ndif_64->ifindex, &j, sizeof (j)); | |
2630 | idx = (u_int32_t)j; | |
2631 | } else { | |
2632 | bcopy(&ndif_32->ifindex, &idx, sizeof (idx)); | |
2633 | } | |
2634 | ||
2635 | error = nd6_setdefaultiface(idx); | |
2636 | return (error); | |
2637 | /* NOTREACHED */ | |
2638 | } | |
2639 | } | |
2640 | return (error); | |
2641 | } | |
2642 | ||
2643 | /* | |
2644 | * Create neighbor cache entry and cache link-layer address, | |
2645 | * on reception of inbound ND6 packets. (RS/RA/NS/redirect) | |
2646 | */ | |
2647 | void | |
2648 | nd6_cache_lladdr( | |
2649 | struct ifnet *ifp, | |
2650 | struct in6_addr *from, | |
2651 | char *lladdr, | |
2652 | __unused int lladdrlen, | |
2653 | int type, /* ICMP6 type */ | |
2654 | int code) /* type dependent information */ | |
2655 | { | |
2656 | struct rtentry *rt = NULL; | |
2657 | struct llinfo_nd6 *ln = NULL; | |
2658 | int is_newentry; | |
2659 | struct sockaddr_dl *sdl = NULL; | |
2660 | int do_update; | |
2661 | int olladdr; | |
2662 | int llchange; | |
2663 | int newstate = 0; | |
2664 | struct timeval timenow; | |
2665 | ||
2666 | if (ifp == NULL) | |
2667 | panic("ifp == NULL in nd6_cache_lladdr"); | |
2668 | if (from == NULL) | |
2669 | panic("from == NULL in nd6_cache_lladdr"); | |
2670 | ||
2671 | /* nothing must be updated for unspecified address */ | |
2672 | if (IN6_IS_ADDR_UNSPECIFIED(from)) | |
2673 | return; | |
2674 | ||
2675 | /* | |
2676 | * Validation about ifp->if_addrlen and lladdrlen must be done in | |
2677 | * the caller. | |
2678 | * | |
2679 | * XXX If the link does not have link-layer adderss, what should | |
2680 | * we do? (ifp->if_addrlen == 0) | |
2681 | * Spec says nothing in sections for RA, RS and NA. There's small | |
2682 | * description on it in NS section (RFC 2461 7.2.3). | |
2683 | */ | |
2684 | getmicrotime(&timenow); | |
2685 | ||
2686 | rt = nd6_lookup(from, 0, ifp, 0); | |
2687 | if (rt == NULL) { | |
2688 | if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL) | |
2689 | return; | |
2690 | RT_LOCK_ASSERT_HELD(rt); | |
2691 | is_newentry = 1; | |
2692 | } else { | |
2693 | RT_LOCK_ASSERT_HELD(rt); | |
2694 | /* do nothing if static ndp is set */ | |
2695 | if (rt->rt_flags & RTF_STATIC) { | |
2696 | RT_REMREF_LOCKED(rt); | |
2697 | RT_UNLOCK(rt); | |
2698 | return; | |
2699 | } | |
2700 | is_newentry = 0; | |
2701 | } | |
2702 | ||
2703 | if (rt == NULL) | |
2704 | return; | |
2705 | if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) { | |
2706 | fail: | |
2707 | RT_UNLOCK(rt); | |
2708 | nd6_free(rt); | |
2709 | rtfree(rt); | |
2710 | return; | |
2711 | } | |
2712 | ln = (struct llinfo_nd6 *)rt->rt_llinfo; | |
2713 | if (ln == NULL) | |
2714 | goto fail; | |
2715 | if (rt->rt_gateway == NULL) | |
2716 | goto fail; | |
2717 | if (rt->rt_gateway->sa_family != AF_LINK) | |
2718 | goto fail; | |
2719 | sdl = SDL(rt->rt_gateway); | |
2720 | ||
2721 | olladdr = (sdl->sdl_alen) ? 1 : 0; | |
2722 | if (olladdr && lladdr) { | |
2723 | if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) | |
2724 | llchange = 1; | |
2725 | else | |
2726 | llchange = 0; | |
2727 | } else | |
2728 | llchange = 0; | |
2729 | ||
2730 | /* | |
2731 | * newentry olladdr lladdr llchange (*=record) | |
2732 | * 0 n n -- (1) | |
2733 | * 0 y n -- (2) | |
2734 | * 0 n y -- (3) * STALE | |
2735 | * 0 y y n (4) * | |
2736 | * 0 y y y (5) * STALE | |
2737 | * 1 -- n -- (6) NOSTATE(= PASSIVE) | |
2738 | * 1 -- y -- (7) * STALE | |
2739 | */ | |
2740 | ||
2741 | if (lladdr) { /* (3-5) and (7) */ | |
2742 | /* | |
2743 | * Record source link-layer address | |
2744 | * XXX is it dependent to ifp->if_type? | |
2745 | */ | |
2746 | sdl->sdl_alen = ifp->if_addrlen; | |
2747 | bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen); | |
2748 | ||
2749 | /* cache the gateway (sender HW) address */ | |
2750 | nd6_llreach_alloc(rt, ifp, LLADDR(sdl), sdl->sdl_alen, FALSE); | |
2751 | } | |
2752 | ||
2753 | if (!is_newentry) { | |
2754 | if ((!olladdr && lladdr != NULL) || /* (3) */ | |
2755 | (olladdr && lladdr != NULL && llchange)) { /* (5) */ | |
2756 | do_update = 1; | |
2757 | newstate = ND6_LLINFO_STALE; | |
2758 | } else /* (1-2,4) */ | |
2759 | do_update = 0; | |
2760 | } else { | |
2761 | do_update = 1; | |
2762 | if (lladdr == NULL) /* (6) */ | |
2763 | newstate = ND6_LLINFO_NOSTATE; | |
2764 | else /* (7) */ | |
2765 | newstate = ND6_LLINFO_STALE; | |
2766 | } | |
2767 | ||
2768 | if (do_update) { | |
2769 | /* | |
2770 | * Update the state of the neighbor cache. | |
2771 | */ | |
2772 | ln->ln_state = newstate; | |
2773 | ||
2774 | if (ln->ln_state == ND6_LLINFO_STALE) { | |
2775 | struct mbuf *m = ln->ln_hold; | |
2776 | /* | |
2777 | * XXX: since nd6_output() below will cause | |
2778 | * state tansition to DELAY and reset the timer, | |
2779 | * we must set the timer now, although it is actually | |
2780 | * meaningless. | |
2781 | */ | |
2782 | ln->ln_expire = timenow.tv_sec + nd6_gctimer; | |
2783 | ln->ln_hold = NULL; | |
2784 | ||
2785 | if (m != NULL) { | |
2786 | struct sockaddr_in6 sin6; | |
2787 | ||
2788 | rtkey_to_sa6(rt, &sin6); | |
2789 | /* | |
2790 | * we assume ifp is not a p2p here, so just | |
2791 | * set the 2nd argument as the 1st one. | |
2792 | */ | |
2793 | RT_UNLOCK(rt); | |
2794 | nd6_output(ifp, ifp, m, &sin6, rt, NULL); | |
2795 | RT_LOCK(rt); | |
2796 | } | |
2797 | } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) { | |
2798 | /* probe right away */ | |
2799 | ln->ln_expire = timenow.tv_sec; | |
2800 | } | |
2801 | } | |
2802 | ||
2803 | /* | |
2804 | * ICMP6 type dependent behavior. | |
2805 | * | |
2806 | * NS: clear IsRouter if new entry | |
2807 | * RS: clear IsRouter | |
2808 | * RA: set IsRouter if there's lladdr | |
2809 | * redir: clear IsRouter if new entry | |
2810 | * | |
2811 | * RA case, (1): | |
2812 | * The spec says that we must set IsRouter in the following cases: | |
2813 | * - If lladdr exist, set IsRouter. This means (1-5). | |
2814 | * - If it is old entry (!newentry), set IsRouter. This means (7). | |
2815 | * So, based on the spec, in (1-5) and (7) cases we must set IsRouter. | |
2816 | * A quetion arises for (1) case. (1) case has no lladdr in the | |
2817 | * neighbor cache, this is similar to (6). | |
2818 | * This case is rare but we figured that we MUST NOT set IsRouter. | |
2819 | * | |
2820 | * newentry olladdr lladdr llchange NS RS RA redir | |
2821 | * D R | |
2822 | * 0 n n -- (1) c ? s | |
2823 | * 0 y n -- (2) c s s | |
2824 | * 0 n y -- (3) c s s | |
2825 | * 0 y y n (4) c s s | |
2826 | * 0 y y y (5) c s s | |
2827 | * 1 -- n -- (6) c c c s | |
2828 | * 1 -- y -- (7) c c s c s | |
2829 | * | |
2830 | * (c=clear s=set) | |
2831 | */ | |
2832 | switch (type & 0xff) { | |
2833 | case ND_NEIGHBOR_SOLICIT: | |
2834 | /* | |
2835 | * New entry must have is_router flag cleared. | |
2836 | */ | |
2837 | if (is_newentry) /* (6-7) */ | |
2838 | ln->ln_router = 0; | |
2839 | break; | |
2840 | case ND_REDIRECT: | |
2841 | /* | |
2842 | * If the icmp is a redirect to a better router, always set the | |
2843 | * is_router flag. Otherwise, if the entry is newly created, | |
2844 | * clear the flag. [RFC 2461, sec 8.3] | |
2845 | */ | |
2846 | if (code == ND_REDIRECT_ROUTER) | |
2847 | ln->ln_router = 1; | |
2848 | else if (is_newentry) /* (6-7) */ | |
2849 | ln->ln_router = 0; | |
2850 | break; | |
2851 | case ND_ROUTER_SOLICIT: | |
2852 | /* | |
2853 | * is_router flag must always be cleared. | |
2854 | */ | |
2855 | ln->ln_router = 0; | |
2856 | break; | |
2857 | case ND_ROUTER_ADVERT: | |
2858 | /* | |
2859 | * Mark an entry with lladdr as a router. | |
2860 | */ | |
2861 | if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */ | |
2862 | (is_newentry && lladdr)) { /* (7) */ | |
2863 | ln->ln_router = 1; | |
2864 | } | |
2865 | break; | |
2866 | } | |
2867 | ||
2868 | /* | |
2869 | * When the link-layer address of a router changes, select the | |
2870 | * best router again. In particular, when the neighbor entry is newly | |
2871 | * created, it might affect the selection policy. | |
2872 | * Question: can we restrict the first condition to the "is_newentry" | |
2873 | * case? | |
2874 | * | |
2875 | * Note: Perform default router selection even when we are a router, | |
2876 | * if Scoped Routing is enabled. | |
2877 | */ | |
2878 | if (do_update && ln->ln_router && | |
2879 | (ip6_doscopedroute || !ip6_forwarding)) { | |
2880 | RT_REMREF_LOCKED(rt); | |
2881 | RT_UNLOCK(rt); | |
2882 | lck_mtx_lock(nd6_mutex); | |
2883 | defrouter_select(ifp); | |
2884 | lck_mtx_unlock(nd6_mutex); | |
2885 | } else { | |
2886 | RT_REMREF_LOCKED(rt); | |
2887 | RT_UNLOCK(rt); | |
2888 | } | |
2889 | } | |
2890 | ||
2891 | static void | |
2892 | nd6_slowtimo( | |
2893 | __unused void *ignored_arg) | |
2894 | { | |
2895 | int i; | |
2896 | struct nd_ifinfo *nd6if; | |
2897 | ||
2898 | lck_rw_lock_shared(nd_if_rwlock); | |
2899 | for (i = 1; i < if_index + 1; i++) { | |
2900 | if (!nd_ifinfo || i >= nd_ifinfo_indexlim) | |
2901 | break; | |
2902 | nd6if = &nd_ifinfo[i]; | |
2903 | if (!nd6if->initialized) | |
2904 | break; | |
2905 | lck_mtx_lock(&nd6if->lock); | |
2906 | if (nd6if->basereachable && /* already initialized */ | |
2907 | (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) { | |
2908 | /* | |
2909 | * Since reachable time rarely changes by router | |
2910 | * advertisements, we SHOULD insure that a new random | |
2911 | * value gets recomputed at least once every few hours. | |
2912 | * (RFC 2461, 6.3.4) | |
2913 | */ | |
2914 | nd6if->recalctm = nd6_recalc_reachtm_interval; | |
2915 | nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable); | |
2916 | } | |
2917 | lck_mtx_unlock(&nd6if->lock); | |
2918 | } | |
2919 | lck_rw_done(nd_if_rwlock); | |
2920 | timeout(nd6_slowtimo, (caddr_t)0, ND6_SLOWTIMER_INTERVAL * hz); | |
2921 | } | |
2922 | ||
2923 | #define senderr(e) { error = (e); goto bad;} | |
2924 | int | |
2925 | nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0, | |
2926 | struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv) | |
2927 | { | |
2928 | struct mbuf *m = m0; | |
2929 | struct rtentry *rt = hint0, *hint = hint0; | |
2930 | struct llinfo_nd6 *ln = NULL; | |
2931 | int error = 0; | |
2932 | struct timeval timenow; | |
2933 | struct rtentry *rtrele = NULL; | |
2934 | struct nd_ifinfo *ndi; | |
2935 | ||
2936 | if (rt != NULL) { | |
2937 | RT_LOCK_SPIN(rt); | |
2938 | RT_ADDREF_LOCKED(rt); | |
2939 | } | |
2940 | ||
2941 | if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr) || !nd6_need_cache(ifp)) { | |
2942 | if (rt != NULL) | |
2943 | RT_UNLOCK(rt); | |
2944 | goto sendpkt; | |
2945 | } | |
2946 | ||
2947 | /* | |
2948 | * Next hop determination. Because we may involve the gateway route | |
2949 | * in addition to the original route, locking is rather complicated. | |
2950 | * The general concept is that regardless of whether the route points | |
2951 | * to the original route or to the gateway route, this routine takes | |
2952 | * an extra reference on such a route. This extra reference will be | |
2953 | * released at the end. | |
2954 | * | |
2955 | * Care must be taken to ensure that the "hint0" route never gets freed | |
2956 | * via rtfree(), since the caller may have stored it inside a struct | |
2957 | * route with a reference held for that placeholder. | |
2958 | * | |
2959 | * This logic is similar to, though not exactly the same as the one | |
2960 | * used by route_to_gwroute(). | |
2961 | */ | |
2962 | if (rt != NULL) { | |
2963 | /* | |
2964 | * We have a reference to "rt" by now (or below via rtalloc1), | |
2965 | * which will either be released or freed at the end of this | |
2966 | * routine. | |
2967 | */ | |
2968 | RT_LOCK_ASSERT_HELD(rt); | |
2969 | if (!(rt->rt_flags & RTF_UP)) { | |
2970 | RT_REMREF_LOCKED(rt); | |
2971 | RT_UNLOCK(rt); | |
2972 | if ((hint = rt = rtalloc1_scoped((struct sockaddr *)dst, | |
2973 | 1, 0, ifp->if_index)) != NULL) { | |
2974 | RT_LOCK_SPIN(rt); | |
2975 | if (rt->rt_ifp != ifp) { | |
2976 | /* XXX: loop care? */ | |
2977 | RT_UNLOCK(rt); | |
2978 | error = nd6_output(ifp, origifp, m0, | |
2979 | dst, rt, adv); | |
2980 | rtfree(rt); | |
2981 | return (error); | |
2982 | } | |
2983 | } else { | |
2984 | senderr(EHOSTUNREACH); | |
2985 | } | |
2986 | } | |
2987 | ||
2988 | if (rt->rt_flags & RTF_GATEWAY) { | |
2989 | struct rtentry *gwrt; | |
2990 | struct in6_ifaddr *ia6 = NULL; | |
2991 | struct sockaddr_in6 gw6; | |
2992 | ||
2993 | rtgw_to_sa6(rt, &gw6); | |
2994 | /* | |
2995 | * Must drop rt_lock since nd6_is_addr_neighbor() | |
2996 | * calls nd6_lookup() and acquires rnh_lock. | |
2997 | */ | |
2998 | RT_UNLOCK(rt); | |
2999 | ||
3000 | /* | |
3001 | * We skip link-layer address resolution and NUD | |
3002 | * if the gateway is not a neighbor from ND point | |
3003 | * of view, regardless of the value of nd_ifinfo.flags. | |
3004 | * The second condition is a bit tricky; we skip | |
3005 | * if the gateway is our own address, which is | |
3006 | * sometimes used to install a route to a p2p link. | |
3007 | */ | |
3008 | if (!nd6_is_addr_neighbor(&gw6, ifp, 0) || | |
3009 | (ia6 = in6ifa_ifpwithaddr(ifp, &gw6.sin6_addr))) { | |
3010 | /* | |
3011 | * We allow this kind of tricky route only | |
3012 | * when the outgoing interface is p2p. | |
3013 | * XXX: we may need a more generic rule here. | |
3014 | */ | |
3015 | if (ia6 != NULL) | |
3016 | IFA_REMREF(&ia6->ia_ifa); | |
3017 | if ((ifp->if_flags & IFF_POINTOPOINT) == 0) | |
3018 | senderr(EHOSTUNREACH); | |
3019 | goto sendpkt; | |
3020 | } | |
3021 | ||
3022 | RT_LOCK_SPIN(rt); | |
3023 | gw6 = *((struct sockaddr_in6 *)(void *)rt->rt_gateway); | |
3024 | ||
3025 | /* If hint is now down, give up */ | |
3026 | if (!(rt->rt_flags & RTF_UP)) { | |
3027 | RT_UNLOCK(rt); | |
3028 | senderr(EHOSTUNREACH); | |
3029 | } | |
3030 | ||
3031 | /* If there's no gateway route, look it up */ | |
3032 | if ((gwrt = rt->rt_gwroute) == NULL) { | |
3033 | RT_UNLOCK(rt); | |
3034 | goto lookup; | |
3035 | } | |
3036 | /* Become a regular mutex */ | |
3037 | RT_CONVERT_LOCK(rt); | |
3038 | ||
3039 | /* | |
3040 | * Take gwrt's lock while holding route's lock; | |
3041 | * this is okay since gwrt never points back | |
3042 | * to rt, so no lock ordering issues. | |
3043 | */ | |
3044 | RT_LOCK_SPIN(gwrt); | |
3045 | if (!(gwrt->rt_flags & RTF_UP)) { | |
3046 | rt->rt_gwroute = NULL; | |
3047 | RT_UNLOCK(gwrt); | |
3048 | RT_UNLOCK(rt); | |
3049 | rtfree(gwrt); | |
3050 | lookup: | |
3051 | lck_mtx_lock(rnh_lock); | |
3052 | gwrt = rtalloc1_scoped_locked( | |
3053 | (struct sockaddr *)&gw6, 1, 0, | |
3054 | ifp->if_index); | |
3055 | ||
3056 | RT_LOCK(rt); | |
3057 | /* | |
3058 | * Bail out if the route is down, no route | |
3059 | * to gateway, circular route, or if the | |
3060 | * gateway portion of "rt" has changed. | |
3061 | */ | |
3062 | if (!(rt->rt_flags & RTF_UP) || | |
3063 | gwrt == NULL || gwrt == rt || | |
3064 | !equal(SA(&gw6), rt->rt_gateway)) { | |
3065 | if (gwrt == rt) { | |
3066 | RT_REMREF_LOCKED(gwrt); | |
3067 | gwrt = NULL; | |
3068 | } | |
3069 | RT_UNLOCK(rt); | |
3070 | if (gwrt != NULL) | |
3071 | rtfree_locked(gwrt); | |
3072 | lck_mtx_unlock(rnh_lock); | |
3073 | senderr(EHOSTUNREACH); | |
3074 | } | |
3075 | VERIFY(gwrt != NULL); | |
3076 | /* | |
3077 | * Set gateway route; callee adds ref to gwrt; | |
3078 | * gwrt has an extra ref from rtalloc1() for | |
3079 | * this routine. | |
3080 | */ | |
3081 | rt_set_gwroute(rt, rt_key(rt), gwrt); | |
3082 | RT_UNLOCK(rt); | |
3083 | lck_mtx_unlock(rnh_lock); | |
3084 | /* Remember to release/free "rt" at the end */ | |
3085 | rtrele = rt; | |
3086 | rt = gwrt; | |
3087 | } else { | |
3088 | RT_ADDREF_LOCKED(gwrt); | |
3089 | RT_UNLOCK(gwrt); | |
3090 | RT_UNLOCK(rt); | |
3091 | /* Remember to release/free "rt" at the end */ | |
3092 | rtrele = rt; | |
3093 | rt = gwrt; | |
3094 | } | |
3095 | VERIFY(rt == gwrt); | |
3096 | ||
3097 | /* | |
3098 | * This is an opportunity to revalidate the parent | |
3099 | * route's gwroute, in case it now points to a dead | |
3100 | * route entry. Parent route won't go away since the | |
3101 | * clone (hint) holds a reference to it. rt == gwrt. | |
3102 | */ | |
3103 | RT_LOCK_SPIN(hint); | |
3104 | if ((hint->rt_flags & (RTF_WASCLONED | RTF_UP)) == | |
3105 | (RTF_WASCLONED | RTF_UP)) { | |
3106 | struct rtentry *prt = hint->rt_parent; | |
3107 | VERIFY(prt != NULL); | |
3108 | ||
3109 | RT_CONVERT_LOCK(hint); | |
3110 | RT_ADDREF(prt); | |
3111 | RT_UNLOCK(hint); | |
3112 | rt_revalidate_gwroute(prt, rt); | |
3113 | RT_REMREF(prt); | |
3114 | } else { | |
3115 | RT_UNLOCK(hint); | |
3116 | } | |
3117 | ||
3118 | RT_LOCK_SPIN(rt); | |
3119 | /* rt == gwrt; if it is now down, give up */ | |
3120 | if (!(rt->rt_flags & RTF_UP)) { | |
3121 | RT_UNLOCK(rt); | |
3122 | rtfree(rt); | |
3123 | rt = NULL; | |
3124 | /* "rtrele" == original "rt" */ | |
3125 | senderr(EHOSTUNREACH); | |
3126 | } | |
3127 | } | |
3128 | ||
3129 | /* Become a regular mutex */ | |
3130 | RT_CONVERT_LOCK(rt); | |
3131 | } | |
3132 | ||
3133 | /* | |
3134 | * Address resolution or Neighbor Unreachability Detection | |
3135 | * for the next hop. | |
3136 | * At this point, the destination of the packet must be a unicast | |
3137 | * or an anycast address(i.e. not a multicast). | |
3138 | */ | |
3139 | ||
3140 | /* Look up the neighbor cache for the nexthop */ | |
3141 | if (rt && (rt->rt_flags & RTF_LLINFO) != 0) { | |
3142 | ln = rt->rt_llinfo; | |
3143 | } else { | |
3144 | struct sockaddr_in6 sin6; | |
3145 | /* | |
3146 | * Clear out Scope ID field in case it is set. | |
3147 | */ | |
3148 | sin6 = *dst; | |
3149 | sin6.sin6_scope_id = 0; | |
3150 | /* | |
3151 | * Since nd6_is_addr_neighbor() internally calls nd6_lookup(), | |
3152 | * the condition below is not very efficient. But we believe | |
3153 | * it is tolerable, because this should be a rare case. | |
3154 | * Must drop rt_lock since nd6_is_addr_neighbor() calls | |
3155 | * nd6_lookup() and acquires rnh_lock. | |
3156 | */ | |
3157 | if (rt != NULL) | |
3158 | RT_UNLOCK(rt); | |
3159 | if (nd6_is_addr_neighbor(&sin6, ifp, 0)) { | |
3160 | /* "rtrele" may have been used, so clean up "rt" now */ | |
3161 | if (rt != NULL) { | |
3162 | /* Don't free "hint0" */ | |
3163 | if (rt == hint0) | |
3164 | RT_REMREF(rt); | |
3165 | else | |
3166 | rtfree(rt); | |
3167 | } | |
3168 | /* Callee returns a locked route upon success */ | |
3169 | rt = nd6_lookup(&dst->sin6_addr, 1, ifp, 0); | |
3170 | if (rt != NULL) { | |
3171 | RT_LOCK_ASSERT_HELD(rt); | |
3172 | ln = rt->rt_llinfo; | |
3173 | } | |
3174 | } else if (rt != NULL) { | |
3175 | RT_LOCK(rt); | |
3176 | } | |
3177 | } | |
3178 | ||
3179 | if (!ln || !rt) { | |
3180 | if (rt != NULL) | |
3181 | RT_UNLOCK(rt); | |
3182 | lck_rw_lock_shared(nd_if_rwlock); | |
3183 | ndi = ND_IFINFO(ifp); | |
3184 | VERIFY(ndi != NULL && ndi->initialized); | |
3185 | lck_mtx_lock(&ndi->lock); | |
3186 | if ((ifp->if_flags & IFF_POINTOPOINT) == 0 && | |
3187 | !(ndi->flags & ND6_IFF_PERFORMNUD)) { | |
3188 | lck_mtx_unlock(&ndi->lock); | |
3189 | lck_rw_done(nd_if_rwlock); | |
3190 | log(LOG_DEBUG, | |
3191 | "nd6_output: can't allocate llinfo for %s " | |
3192 | "(ln=%p, rt=%p)\n", | |
3193 | ip6_sprintf(&dst->sin6_addr), ln, rt); | |
3194 | senderr(EIO); /* XXX: good error? */ | |
3195 | } | |
3196 | lck_mtx_unlock(&ndi->lock); | |
3197 | lck_rw_done(nd_if_rwlock); | |
3198 | ||
3199 | goto sendpkt; /* send anyway */ | |
3200 | } | |
3201 | ||
3202 | getmicrotime(&timenow); | |
3203 | ||
3204 | /* We don't have to do link-layer address resolution on a p2p link. */ | |
3205 | if ((ifp->if_flags & IFF_POINTOPOINT) != 0 && | |
3206 | ln->ln_state < ND6_LLINFO_REACHABLE) { | |
3207 | ln->ln_state = ND6_LLINFO_STALE; | |
3208 | ln->ln_expire = rt_expiry(rt, timenow.tv_sec, nd6_gctimer); | |
3209 | } | |
3210 | ||
3211 | /* | |
3212 | * The first time we send a packet to a neighbor whose entry is | |
3213 | * STALE, we have to change the state to DELAY and a sets a timer to | |
3214 | * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do | |
3215 | * neighbor unreachability detection on expiration. | |
3216 | * (RFC 2461 7.3.3) | |
3217 | */ | |
3218 | if (ln->ln_state == ND6_LLINFO_STALE) { | |
3219 | ln->ln_asked = 0; | |
3220 | ln->ln_state = ND6_LLINFO_DELAY; | |
3221 | ln->ln_expire = rt_expiry(rt, timenow.tv_sec, nd6_delay); | |
3222 | } | |
3223 | ||
3224 | /* | |
3225 | * If the neighbor cache entry has a state other than INCOMPLETE | |
3226 | * (i.e. its link-layer address is already resolved), just | |
3227 | * send the packet. | |
3228 | */ | |
3229 | if (ln->ln_state > ND6_LLINFO_INCOMPLETE) { | |
3230 | RT_UNLOCK(rt); | |
3231 | /* | |
3232 | * Move this entry to the head of the queue so that it is | |
3233 | * less likely for this entry to be a target of forced | |
3234 | * garbage collection (see nd6_rtrequest()). | |
3235 | */ | |
3236 | lck_mtx_lock(rnh_lock); | |
3237 | RT_LOCK_SPIN(rt); | |
3238 | if (ln->ln_flags & ND6_LNF_IN_USE) { | |
3239 | LN_DEQUEUE(ln); | |
3240 | LN_INSERTHEAD(ln); | |
3241 | } | |
3242 | RT_UNLOCK(rt); | |
3243 | lck_mtx_unlock(rnh_lock); | |
3244 | goto sendpkt; | |
3245 | } | |
3246 | ||
3247 | /* | |
3248 | * There is a neighbor cache entry, but no ethernet address | |
3249 | * response yet. Replace the held mbuf (if any) with this | |
3250 | * latest one. | |
3251 | * | |
3252 | * This code conforms to the rate-limiting rule described in Section | |
3253 | * 7.2.2 of RFC 2461, because the timer is set correctly after sending | |
3254 | * an NS below. | |
3255 | */ | |
3256 | if (ln->ln_state == ND6_LLINFO_NOSTATE) | |
3257 | ln->ln_state = ND6_LLINFO_INCOMPLETE; | |
3258 | if (ln->ln_hold) | |
3259 | m_freem(ln->ln_hold); | |
3260 | ln->ln_hold = m; | |
3261 | if (ln->ln_expire && ln->ln_asked < nd6_mmaxtries && | |
3262 | ln->ln_expire < timenow.tv_sec) { | |
3263 | ln->ln_asked++; | |
3264 | lck_rw_lock_shared(nd_if_rwlock); | |
3265 | ndi = ND_IFINFO(ifp); | |
3266 | VERIFY(ndi != NULL && ndi->initialized); | |
3267 | lck_mtx_lock(&ndi->lock); | |
3268 | ln->ln_expire = timenow.tv_sec + ndi->retrans / 1000; | |
3269 | lck_mtx_unlock(&ndi->lock); | |
3270 | lck_rw_done(nd_if_rwlock); | |
3271 | RT_UNLOCK(rt); | |
3272 | /* We still have a reference on rt (for ln) */ | |
3273 | if (ip6_forwarding) | |
3274 | nd6_prproxy_ns_output(ifp, NULL, &dst->sin6_addr, ln); | |
3275 | else | |
3276 | nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, 0); | |
3277 | } else { | |
3278 | RT_UNLOCK(rt); | |
3279 | } | |
3280 | /* | |
3281 | * Move this entry to the head of the queue so that it is | |
3282 | * less likely for this entry to be a target of forced | |
3283 | * garbage collection (see nd6_rtrequest()). | |
3284 | */ | |
3285 | lck_mtx_lock(rnh_lock); | |
3286 | RT_LOCK_SPIN(rt); | |
3287 | if (ln->ln_flags & ND6_LNF_IN_USE) { | |
3288 | LN_DEQUEUE(ln); | |
3289 | LN_INSERTHEAD(ln); | |
3290 | } | |
3291 | /* Clean up "rt" now while we can */ | |
3292 | if (rt == hint0) { | |
3293 | RT_REMREF_LOCKED(rt); | |
3294 | RT_UNLOCK(rt); | |
3295 | } else { | |
3296 | RT_UNLOCK(rt); | |
3297 | rtfree_locked(rt); | |
3298 | } | |
3299 | rt = NULL; /* "rt" has been taken care of */ | |
3300 | lck_mtx_unlock(rnh_lock); | |
3301 | ||
3302 | error = 0; | |
3303 | goto release; | |
3304 | ||
3305 | sendpkt: | |
3306 | if (rt != NULL) | |
3307 | RT_LOCK_ASSERT_NOTHELD(rt); | |
3308 | ||
3309 | /* discard the packet if IPv6 operation is disabled on the interface */ | |
3310 | lck_rw_lock_shared(nd_if_rwlock); | |
3311 | ndi = ND_IFINFO(ifp); | |
3312 | VERIFY(ndi != NULL && ndi->initialized); | |
3313 | /* test is done here without holding ndi lock, for performance */ | |
3314 | if (ndi->flags & ND6_IFF_IFDISABLED) { | |
3315 | lck_rw_done(nd_if_rwlock); | |
3316 | error = ENETDOWN; /* better error? */ | |
3317 | goto bad; | |
3318 | } | |
3319 | lck_rw_done(nd_if_rwlock); | |
3320 | ||
3321 | if ((ifp->if_flags & IFF_LOOPBACK) != 0) { | |
3322 | /* forwarding rules require the original scope_id */ | |
3323 | m->m_pkthdr.rcvif = origifp; | |
3324 | error = dlil_output(origifp, PF_INET6, m, (caddr_t)rt, | |
3325 | (struct sockaddr *)dst, 0, adv); | |
3326 | goto release; | |
3327 | } else { | |
3328 | /* Do not allow loopback address to wind up on a wire */ | |
3329 | struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); | |
3330 | ||
3331 | if ((IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) || | |
3332 | IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst))) { | |
3333 | ip6stat.ip6s_badscope++; | |
3334 | /* | |
3335 | * Do not simply drop the packet just like a | |
3336 | * firewall -- we want the the application to feel | |
3337 | * the pain. Return ENETUNREACH like ip6_output | |
3338 | * does in some similar cases. This can startle | |
3339 | * the otherwise clueless process that specifies | |
3340 | * loopback as the source address. | |
3341 | */ | |
3342 | error = ENETUNREACH; | |
3343 | goto bad; | |
3344 | } | |
3345 | } | |
3346 | ||
3347 | if (rt != NULL) { | |
3348 | RT_LOCK_SPIN(rt); | |
3349 | /* Mark use timestamp */ | |
3350 | if (rt->rt_llinfo != NULL) | |
3351 | nd6_llreach_use(rt->rt_llinfo); | |
3352 | RT_UNLOCK(rt); | |
3353 | } | |
3354 | ||
3355 | if (hint && nstat_collect) | |
3356 | nstat_route_tx(hint, 1, m->m_pkthdr.len, 0); | |
3357 | ||
3358 | m->m_pkthdr.rcvif = NULL; | |
3359 | error = dlil_output(ifp, PF_INET6, m, (caddr_t)rt, | |
3360 | (struct sockaddr *)dst, 0, adv); | |
3361 | goto release; | |
3362 | ||
3363 | bad: | |
3364 | if (m != NULL) | |
3365 | m_freem(m); | |
3366 | ||
3367 | release: | |
3368 | /* Clean up "rt" unless it's already been done */ | |
3369 | if (rt != NULL) { | |
3370 | RT_LOCK_SPIN(rt); | |
3371 | if (rt == hint0) { | |
3372 | RT_REMREF_LOCKED(rt); | |
3373 | RT_UNLOCK(rt); | |
3374 | } else { | |
3375 | RT_UNLOCK(rt); | |
3376 | rtfree(rt); | |
3377 | } | |
3378 | } | |
3379 | /* And now clean up "rtrele" if there is any */ | |
3380 | if (rtrele != NULL) { | |
3381 | RT_LOCK_SPIN(rtrele); | |
3382 | if (rtrele == hint0) { | |
3383 | RT_REMREF_LOCKED(rtrele); | |
3384 | RT_UNLOCK(rtrele); | |
3385 | } else { | |
3386 | RT_UNLOCK(rtrele); | |
3387 | rtfree(rtrele); | |
3388 | } | |
3389 | } | |
3390 | return (error); | |
3391 | } | |
3392 | #undef senderr | |
3393 | ||
3394 | int | |
3395 | nd6_need_cache( | |
3396 | struct ifnet *ifp) | |
3397 | { | |
3398 | /* | |
3399 | * XXX: we currently do not make neighbor cache on any interface | |
3400 | * other than ARCnet, Ethernet, FDDI and GIF. | |
3401 | * | |
3402 | * RFC2893 says: | |
3403 | * - unidirectional tunnels needs no ND | |
3404 | */ | |
3405 | switch (ifp->if_type) { | |
3406 | case IFT_ARCNET: | |
3407 | case IFT_ETHER: | |
3408 | case IFT_FDDI: | |
3409 | case IFT_IEEE1394: | |
3410 | case IFT_L2VLAN: | |
3411 | case IFT_IEEE8023ADLAG: | |
3412 | #if IFT_IEEE80211 | |
3413 | case IFT_IEEE80211: | |
3414 | #endif | |
3415 | case IFT_GIF: /* XXX need more cases? */ | |
3416 | case IFT_PPP: | |
3417 | #if IFT_TUNNEL | |
3418 | case IFT_TUNNEL: | |
3419 | #endif | |
3420 | case IFT_BRIDGE: | |
3421 | case IFT_CELLULAR: | |
3422 | return(1); | |
3423 | default: | |
3424 | return(0); | |
3425 | } | |
3426 | } | |
3427 | ||
3428 | int | |
3429 | nd6_storelladdr( | |
3430 | struct ifnet *ifp, | |
3431 | struct rtentry *rt, | |
3432 | struct mbuf *m, | |
3433 | struct sockaddr *dst, | |
3434 | u_char *desten) | |
3435 | { | |
3436 | int i; | |
3437 | struct sockaddr_dl *sdl; | |
3438 | ||
3439 | if (m->m_flags & M_MCAST) { | |
3440 | switch (ifp->if_type) { | |
3441 | case IFT_ETHER: | |
3442 | case IFT_FDDI: | |
3443 | case IFT_L2VLAN: | |
3444 | case IFT_IEEE8023ADLAG: | |
3445 | #if IFT_IEEE80211 | |
3446 | case IFT_IEEE80211: | |
3447 | #endif | |
3448 | case IFT_BRIDGE: | |
3449 | ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, | |
3450 | desten); | |
3451 | return(1); | |
3452 | case IFT_IEEE1394: | |
3453 | for (i = 0; i < ifp->if_addrlen; i++) | |
3454 | desten[i] = ~0; | |
3455 | return(1); | |
3456 | case IFT_ARCNET: | |
3457 | *desten = 0; | |
3458 | return(1); | |
3459 | default: | |
3460 | return(0); /* caller will free mbuf */ | |
3461 | } | |
3462 | } | |
3463 | ||
3464 | if (rt == NULL) { | |
3465 | /* this could happen, if we could not allocate memory */ | |
3466 | return(0); /* caller will free mbuf */ | |
3467 | } | |
3468 | RT_LOCK(rt); | |
3469 | if (rt->rt_gateway->sa_family != AF_LINK) { | |
3470 | printf("nd6_storelladdr: something odd happens\n"); | |
3471 | RT_UNLOCK(rt); | |
3472 | return(0); /* caller will free mbuf */ | |
3473 | } | |
3474 | sdl = SDL(rt->rt_gateway); | |
3475 | if (sdl->sdl_alen == 0) { | |
3476 | /* this should be impossible, but we bark here for debugging */ | |
3477 | printf("nd6_storelladdr: sdl_alen == 0\n"); | |
3478 | RT_UNLOCK(rt); | |
3479 | return(0); /* caller will free mbuf */ | |
3480 | } | |
3481 | ||
3482 | bcopy(LLADDR(sdl), desten, sdl->sdl_alen); | |
3483 | RT_UNLOCK(rt); | |
3484 | return(1); | |
3485 | } | |
3486 | ||
3487 | /* | |
3488 | * This is the ND pre-output routine; care must be taken to ensure that | |
3489 | * the "hint" route never gets freed via rtfree(), since the caller may | |
3490 | * have stored it inside a struct route with a reference held for that | |
3491 | * placeholder. | |
3492 | */ | |
3493 | errno_t | |
3494 | nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, | |
3495 | struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint, | |
3496 | mbuf_t packet) | |
3497 | { | |
3498 | route_t route = hint; | |
3499 | errno_t result = 0; | |
3500 | struct sockaddr_dl *sdl = NULL; | |
3501 | size_t copy_len; | |
3502 | ||
3503 | if (ip6_dest->sin6_family != AF_INET6) | |
3504 | return (EAFNOSUPPORT); | |
3505 | ||
3506 | if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) | |
3507 | return (ENETDOWN); | |
3508 | ||
3509 | if (hint != NULL) { | |
3510 | /* | |
3511 | * Callee holds a reference on the route and returns | |
3512 | * with the route entry locked, upon success. | |
3513 | */ | |
3514 | result = route_to_gwroute((const struct sockaddr *)ip6_dest, | |
3515 | hint, &route); | |
3516 | if (result != 0) | |
3517 | return (result); | |
3518 | if (route != NULL) | |
3519 | RT_LOCK_ASSERT_HELD(route); | |
3520 | } | |
3521 | ||
3522 | if ((packet->m_flags & M_MCAST) != 0) { | |
3523 | if (route != NULL) | |
3524 | RT_UNLOCK(route); | |
3525 | result = dlil_resolve_multi(ifp, | |
3526 | (const struct sockaddr*)ip6_dest, | |
3527 | (struct sockaddr *)ll_dest, ll_dest_len); | |
3528 | if (route != NULL) | |
3529 | RT_LOCK(route); | |
3530 | goto release; | |
3531 | } | |
3532 | ||
3533 | if (route == NULL) { | |
3534 | /* | |
3535 | * This could happen, if we could not allocate memory or | |
3536 | * if route_to_gwroute() didn't return a route. | |
3537 | */ | |
3538 | result = ENOBUFS; | |
3539 | goto release; | |
3540 | } | |
3541 | ||
3542 | if (route->rt_gateway->sa_family != AF_LINK) { | |
3543 | printf("nd6_lookup_ipv6: gateway address not AF_LINK\n"); | |
3544 | result = EADDRNOTAVAIL; | |
3545 | goto release; | |
3546 | } | |
3547 | ||
3548 | sdl = SDL(route->rt_gateway); | |
3549 | if (sdl->sdl_alen == 0) { | |
3550 | /* this should be impossible, but we bark here for debugging */ | |
3551 | printf("nd6_lookup_ipv6: sdl_alen == 0\n"); | |
3552 | result = EHOSTUNREACH; | |
3553 | goto release; | |
3554 | } | |
3555 | ||
3556 | copy_len = sdl->sdl_len <= ll_dest_len ? sdl->sdl_len : ll_dest_len; | |
3557 | bcopy(sdl, ll_dest, copy_len); | |
3558 | ||
3559 | release: | |
3560 | if (route != NULL) { | |
3561 | if (route == hint) { | |
3562 | RT_REMREF_LOCKED(route); | |
3563 | RT_UNLOCK(route); | |
3564 | } else { | |
3565 | RT_UNLOCK(route); | |
3566 | rtfree(route); | |
3567 | } | |
3568 | } | |
3569 | return (result); | |
3570 | } | |
3571 | ||
3572 | int | |
3573 | nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after) | |
3574 | { | |
3575 | /* | |
3576 | * We only care about ND6_IFF_PROXY_PREFIXES for now. | |
3577 | */ | |
3578 | before &= ND6_IFF_PROXY_PREFIXES; | |
3579 | after &= ND6_IFF_PROXY_PREFIXES; | |
3580 | ||
3581 | if (before == after) | |
3582 | return (0); | |
3583 | ||
3584 | return (nd6_if_prproxy(ifp, ((int32_t)(after - before) > 0))); | |
3585 | } | |
3586 | ||
3587 | SYSCTL_DECL(_net_inet6_icmp6); | |
3588 | ||
3589 | static int | |
3590 | nd6_sysctl_drlist SYSCTL_HANDLER_ARGS | |
3591 | { | |
3592 | #pragma unused(oidp, arg1, arg2) | |
3593 | int error = 0; | |
3594 | char buf[1024]; | |
3595 | struct nd_defrouter *dr; | |
3596 | int p64 = proc_is64bit(req->p); | |
3597 | ||
3598 | if (req->newptr) | |
3599 | return (EPERM); | |
3600 | ||
3601 | lck_mtx_lock(nd6_mutex); | |
3602 | if (p64) { | |
3603 | struct in6_defrouter_64 *d, *de; | |
3604 | ||
3605 | for (dr = TAILQ_FIRST(&nd_defrouter); | |
3606 | dr; | |
3607 | dr = TAILQ_NEXT(dr, dr_entry)) { | |
3608 | d = (struct in6_defrouter_64 *)(void *)buf; | |
3609 | de = (struct in6_defrouter_64 *) | |
3610 | (void *)(buf + sizeof (buf)); | |
3611 | ||
3612 | if (d + 1 <= de) { | |
3613 | bzero(d, sizeof (*d)); | |
3614 | d->rtaddr.sin6_family = AF_INET6; | |
3615 | d->rtaddr.sin6_len = sizeof (d->rtaddr); | |
3616 | if (in6_recoverscope(&d->rtaddr, &dr->rtaddr, | |
3617 | dr->ifp) != 0) | |
3618 | log(LOG_ERR, | |
3619 | "scope error in " | |
3620 | "default router list (%s)\n", | |
3621 | ip6_sprintf(&dr->rtaddr)); | |
3622 | d->flags = dr->flags; | |
3623 | d->stateflags = dr->stateflags; | |
3624 | d->stateflags &= ~NDDRF_PROCESSED; | |
3625 | d->rtlifetime = dr->rtlifetime; | |
3626 | d->expire = dr->expire; | |
3627 | d->if_index = dr->ifp->if_index; | |
3628 | } else { | |
3629 | panic("buffer too short"); | |
3630 | } | |
3631 | error = SYSCTL_OUT(req, buf, sizeof (*d)); | |
3632 | if (error) | |
3633 | break; | |
3634 | } | |
3635 | } else { | |
3636 | struct in6_defrouter_32 *d_32, *de_32; | |
3637 | ||
3638 | for (dr = TAILQ_FIRST(&nd_defrouter); | |
3639 | dr; | |
3640 | dr = TAILQ_NEXT(dr, dr_entry)) { | |
3641 | d_32 = (struct in6_defrouter_32 *)(void *)buf; | |
3642 | de_32 = (struct in6_defrouter_32 *) | |
3643 | (void *)(buf + sizeof (buf)); | |
3644 | ||
3645 | if (d_32 + 1 <= de_32) { | |
3646 | bzero(d_32, sizeof (*d_32)); | |
3647 | d_32->rtaddr.sin6_family = AF_INET6; | |
3648 | d_32->rtaddr.sin6_len = sizeof (d_32->rtaddr); | |
3649 | if (in6_recoverscope(&d_32->rtaddr, &dr->rtaddr, | |
3650 | dr->ifp) != 0) | |
3651 | log(LOG_ERR, | |
3652 | "scope error in " | |
3653 | "default router list (%s)\n", | |
3654 | ip6_sprintf(&dr->rtaddr)); | |
3655 | d_32->flags = dr->flags; | |
3656 | d_32->stateflags = dr->stateflags; | |
3657 | d_32->stateflags &= ~NDDRF_PROCESSED; | |
3658 | d_32->rtlifetime = dr->rtlifetime; | |
3659 | d_32->expire = dr->expire; | |
3660 | d_32->if_index = dr->ifp->if_index; | |
3661 | } else { | |
3662 | panic("buffer too short"); | |
3663 | } | |
3664 | error = SYSCTL_OUT(req, buf, sizeof (*d_32)); | |
3665 | if (error) | |
3666 | break; | |
3667 | } | |
3668 | } | |
3669 | lck_mtx_unlock(nd6_mutex); | |
3670 | return (error); | |
3671 | } | |
3672 | ||
3673 | static int | |
3674 | nd6_sysctl_prlist SYSCTL_HANDLER_ARGS | |
3675 | { | |
3676 | #pragma unused(oidp, arg1, arg2) | |
3677 | int error = 0; | |
3678 | char buf[1024]; | |
3679 | struct nd_prefix *pr; | |
3680 | int p64 = proc_is64bit(req->p); | |
3681 | ||
3682 | if (req->newptr) | |
3683 | return (EPERM); | |
3684 | ||
3685 | lck_mtx_lock(nd6_mutex); | |
3686 | if (p64) { | |
3687 | struct in6_prefix_64 *p, *pe; | |
3688 | ||
3689 | for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { | |
3690 | u_short advrtrs = 0; | |
3691 | size_t advance; | |
3692 | struct sockaddr_in6 *sin6, *s6; | |
3693 | struct nd_pfxrouter *pfr; | |
3694 | ||
3695 | p = (struct in6_prefix_64 *)(void *)buf; | |
3696 | pe = (struct in6_prefix_64 *) | |
3697 | (void *)(buf + sizeof (buf)); | |
3698 | ||
3699 | if (p + 1 <= pe) { | |
3700 | bzero(p, sizeof (*p)); | |
3701 | sin6 = (struct sockaddr_in6 *)(p + 1); | |
3702 | ||
3703 | NDPR_LOCK(pr); | |
3704 | p->prefix = pr->ndpr_prefix; | |
3705 | if (in6_recoverscope(&p->prefix, | |
3706 | &p->prefix.sin6_addr, pr->ndpr_ifp) != 0) | |
3707 | log(LOG_ERR, | |
3708 | "scope error in prefix list (%s)\n", | |
3709 | ip6_sprintf(&p->prefix.sin6_addr)); | |
3710 | p->raflags = pr->ndpr_raf; | |
3711 | p->prefixlen = pr->ndpr_plen; | |
3712 | p->vltime = pr->ndpr_vltime; | |
3713 | p->pltime = pr->ndpr_pltime; | |
3714 | p->if_index = pr->ndpr_ifp->if_index; | |
3715 | p->expire = pr->ndpr_expire; | |
3716 | p->refcnt = pr->ndpr_addrcnt; | |
3717 | p->flags = pr->ndpr_stateflags; | |
3718 | p->origin = PR_ORIG_RA; | |
3719 | advrtrs = 0; | |
3720 | for (pfr = pr->ndpr_advrtrs.lh_first; | |
3721 | pfr; | |
3722 | pfr = pfr->pfr_next) { | |
3723 | if ((void *)&sin6[advrtrs + 1] > | |
3724 | (void *)pe) { | |
3725 | advrtrs++; | |
3726 | continue; | |
3727 | } | |
3728 | s6 = &sin6[advrtrs]; | |
3729 | bzero(s6, sizeof (*s6)); | |
3730 | s6->sin6_family = AF_INET6; | |
3731 | s6->sin6_len = sizeof (*sin6); | |
3732 | if (in6_recoverscope(s6, | |
3733 | &pfr->router->rtaddr, | |
3734 | pfr->router->ifp) != 0) | |
3735 | log(LOG_ERR, "scope error in " | |
3736 | "prefix list (%s)\n", | |
3737 | ip6_sprintf(&pfr->router-> | |
3738 | rtaddr)); | |
3739 | advrtrs++; | |
3740 | } | |
3741 | p->advrtrs = advrtrs; | |
3742 | NDPR_UNLOCK(pr); | |
3743 | } else { | |
3744 | panic("buffer too short"); | |
3745 | } | |
3746 | advance = sizeof (*p) + sizeof (*sin6) * advrtrs; | |
3747 | error = SYSCTL_OUT(req, buf, advance); | |
3748 | if (error) | |
3749 | break; | |
3750 | } | |
3751 | } else { | |
3752 | struct in6_prefix_32 *p_32, *pe_32; | |
3753 | ||
3754 | for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { | |
3755 | u_short advrtrs = 0; | |
3756 | size_t advance; | |
3757 | struct sockaddr_in6 *sin6, *s6; | |
3758 | struct nd_pfxrouter *pfr; | |
3759 | ||
3760 | p_32 = (struct in6_prefix_32 *)(void *)buf; | |
3761 | pe_32 = (struct in6_prefix_32 *) | |
3762 | (void *)(buf + sizeof (buf)); | |
3763 | ||
3764 | if (p_32 + 1 <= pe_32) { | |
3765 | bzero(p_32, sizeof (*p_32)); | |
3766 | sin6 = (struct sockaddr_in6 *)(p_32 + 1); | |
3767 | ||
3768 | NDPR_LOCK(pr); | |
3769 | p_32->prefix = pr->ndpr_prefix; | |
3770 | if (in6_recoverscope(&p_32->prefix, | |
3771 | &p_32->prefix.sin6_addr, pr->ndpr_ifp) != 0) | |
3772 | log(LOG_ERR, "scope error in prefix " | |
3773 | "list (%s)\n", ip6_sprintf(&p_32-> | |
3774 | prefix.sin6_addr)); | |
3775 | p_32->raflags = pr->ndpr_raf; | |
3776 | p_32->prefixlen = pr->ndpr_plen; | |
3777 | p_32->vltime = pr->ndpr_vltime; | |
3778 | p_32->pltime = pr->ndpr_pltime; | |
3779 | p_32->if_index = pr->ndpr_ifp->if_index; | |
3780 | p_32->expire = pr->ndpr_expire; | |
3781 | p_32->refcnt = pr->ndpr_addrcnt; | |
3782 | p_32->flags = pr->ndpr_stateflags; | |
3783 | p_32->origin = PR_ORIG_RA; | |
3784 | advrtrs = 0; | |
3785 | for (pfr = pr->ndpr_advrtrs.lh_first; | |
3786 | pfr; | |
3787 | pfr = pfr->pfr_next) { | |
3788 | if ((void *)&sin6[advrtrs + 1] > | |
3789 | (void *)pe_32) { | |
3790 | advrtrs++; | |
3791 | continue; | |
3792 | } | |
3793 | s6 = &sin6[advrtrs]; | |
3794 | bzero(s6, sizeof (*s6)); | |
3795 | s6->sin6_family = AF_INET6; | |
3796 | s6->sin6_len = sizeof (*sin6); | |
3797 | if (in6_recoverscope(s6, | |
3798 | &pfr->router->rtaddr, | |
3799 | pfr->router->ifp) != 0) | |
3800 | log(LOG_ERR, "scope error in " | |
3801 | "prefix list (%s)\n", | |
3802 | ip6_sprintf(&pfr->router-> | |
3803 | rtaddr)); | |
3804 | advrtrs++; | |
3805 | } | |
3806 | p_32->advrtrs = advrtrs; | |
3807 | NDPR_UNLOCK(pr); | |
3808 | } else { | |
3809 | panic("buffer too short"); | |
3810 | } | |
3811 | advance = sizeof (*p_32) + sizeof (*sin6) * advrtrs; | |
3812 | error = SYSCTL_OUT(req, buf, advance); | |
3813 | if (error) | |
3814 | break; | |
3815 | } | |
3816 | } | |
3817 | lck_mtx_unlock(nd6_mutex); | |
3818 | return (error); | |
3819 | } | |
3820 | SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist, | |
3821 | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, nd6_sysctl_drlist, "S,in6_defrouter",""); | |
3822 | SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist, | |
3823 | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, nd6_sysctl_prlist, "S,in6_defrouter",""); | |
3824 |