]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/in_arp.c
01d60970a992136f6d825af3cd41c5cb95ee68f8
[apple/xnu.git] / bsd / netinet / in_arp.c
1 /*
2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 */
61
62 #include <kern/debug.h>
63 #include <netinet/in_arp.h>
64 #include <sys/types.h>
65 #include <sys/param.h>
66 #include <sys/kernel_types.h>
67 #include <sys/syslog.h>
68 #include <sys/systm.h>
69 #include <sys/time.h>
70 #include <sys/kernel.h>
71 #include <sys/mbuf.h>
72 #include <sys/sysctl.h>
73 #include <sys/mcache.h>
74 #include <sys/protosw.h>
75 #include <string.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/dlil.h>
79 #include <net/if_types.h>
80 #include <net/if_llreach.h>
81 #include <net/route.h>
82 #include <net/nwk_wq.h>
83
84 #include <netinet/if_ether.h>
85 #include <netinet/in_var.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip6.h>
88 #include <kern/zalloc.h>
89
90 #include <kern/thread.h>
91 #include <kern/sched_prim.h>
92
93 #define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen))
94
95 static const size_t MAX_HW_LEN = 10;
96
97 /*
98 * Synchronization notes:
99 *
100 * The global list of ARP entries are stored in llinfo_arp; an entry
101 * gets inserted into the list when the route is created and gets
102 * removed from the list when it is deleted; this is done as part
103 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
104 *
105 * Because rnh_lock and rt_lock for the entry are held during those
106 * operations, the same locks (and thus lock ordering) must be used
107 * elsewhere to access the relevant data structure fields:
108 *
109 * la_le.{le_next,le_prev}, la_rt
110 *
111 * - Routing lock (rnh_lock)
112 *
113 * la_holdq, la_asked, la_llreach, la_lastused, la_flags
114 *
115 * - Routing entry lock (rt_lock)
116 *
117 * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
118 * as the route entry itself. When a route is deleted (RTM_DELETE),
119 * it is simply removed from the global list but the memory is not
120 * freed until the route itself is freed.
121 */
122 struct llinfo_arp {
123 /*
124 * The following are protected by rnh_lock
125 */
126 LIST_ENTRY(llinfo_arp) la_le;
127 struct rtentry *la_rt;
128 /*
129 * The following are protected by rt_lock
130 */
131 class_queue_t la_holdq; /* packets awaiting resolution */
132 struct if_llreach *la_llreach; /* link-layer reachability record */
133 u_int64_t la_lastused; /* last used timestamp */
134 u_int32_t la_asked; /* # of requests sent */
135 u_int32_t la_maxtries; /* retry limit */
136 u_int64_t la_probeexp; /* probe deadline timestamp */
137 u_int32_t la_prbreq_cnt; /* probe request count */
138 u_int32_t la_flags;
139 #define LLINFO_RTRFAIL_EVTSENT 0x1 /* sent an ARP event */
140 #define LLINFO_PROBING 0x2 /* waiting for an ARP reply */
141 };
142
143 static LIST_HEAD(, llinfo_arp) llinfo_arp;
144
145 static thread_call_t arp_timeout_tcall;
146 static int arp_timeout_run; /* arp_timeout is scheduled to run */
147 static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1);
148 static void arp_sched_timeout(struct timeval *);
149
150 static thread_call_t arp_probe_tcall;
151 static int arp_probe_run; /* arp_probe is scheduled to run */
152 static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1);
153 static void arp_sched_probe(struct timeval *);
154
155 static void arptfree(struct llinfo_arp *, void *);
156 static errno_t arp_lookup_route(const struct in_addr *, int,
157 int, route_t *, unsigned int);
158 static int arp_getstat SYSCTL_HANDLER_ARGS;
159
160 static struct llinfo_arp *arp_llinfo_alloc(zalloc_flags_t);
161 static void arp_llinfo_free(void *);
162 static uint32_t arp_llinfo_flushq(struct llinfo_arp *);
163 static void arp_llinfo_purge(struct rtentry *);
164 static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
165 static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
166 static void arp_llinfo_refresh(struct rtentry *);
167
168 static __inline void arp_llreach_use(struct llinfo_arp *);
169 static __inline int arp_llreach_reachable(struct llinfo_arp *);
170 static void arp_llreach_alloc(struct rtentry *, struct ifnet *, void *,
171 unsigned int, boolean_t, uint32_t *);
172
173 extern int tvtohz(struct timeval *);
174
175 static int arpinit_done;
176
177 SYSCTL_DECL(_net_link_ether);
178 SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
179
180 static int arpt_prune = (5 * 60 * 1); /* walk list every 5 minutes */
181 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
182 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
183
184 #define ARP_PROBE_TIME 7 /* seconds */
185 static u_int32_t arpt_probe = ARP_PROBE_TIME;
186 SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl,
187 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, "");
188
189 static int arpt_keep = (20 * 60); /* once resolved, good for 20 more minutes */
190 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
191 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
192
193 static int arpt_down = 20; /* once declared down, don't send for 20 sec */
194 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
195 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
196
197 static int arp_llreach_base = 120; /* seconds */
198 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
199 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0,
200 "default ARP link-layer reachability max lifetime (in seconds)");
201
202 #define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */
203 static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
204 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
205 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
206 "number of unicast ARP refresh probes before using broadcast");
207
208 static u_int32_t arp_maxtries = 5;
209 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
210 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
211
212 static u_int32_t arp_maxhold = 16;
213 SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold,
214 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, "");
215
216 static int useloopback = 1; /* use loopback interface for local traffic */
217 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
218 CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
219
220 static int arp_proxyall = 0;
221 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
222 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
223
224 static int arp_sendllconflict = 0;
225 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
227
228 static int log_arp_warnings = 0; /* Thread safe: no accumulated state */
229 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
230 CTLFLAG_RW | CTLFLAG_LOCKED,
231 &log_arp_warnings, 0,
232 "log arp warning messages");
233
234 static int keep_announcements = 1; /* Thread safe: no aging of state */
235 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
236 CTLFLAG_RW | CTLFLAG_LOCKED,
237 &keep_announcements, 0,
238 "keep arp announcements");
239
240 static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
241 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
242 CTLFLAG_RW | CTLFLAG_LOCKED,
243 &send_conflicting_probes, 0,
244 "send conflicting link-local arp probes");
245
246 static int arp_verbose;
247 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
248 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
249
250 static uint32_t arp_maxhold_total = 1024; /* max total packets in the holdq */
251 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold_total,
252 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold_total, 0, "");
253
254
255 /*
256 * Generally protected by rnh_lock; use atomic operations on fields
257 * that are also modified outside of that lock (if needed).
258 */
259 struct arpstat arpstat __attribute__((aligned(sizeof(uint64_t))));
260 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
261 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
262 0, 0, arp_getstat, "S,arpstat",
263 "ARP statistics (struct arpstat, net/if_arp.h)");
264
265 static ZONE_DECLARE(llinfo_arp_zone, "llinfo_arp",
266 sizeof(struct llinfo_arp), ZC_ZFREE_CLEARMEM);
267
268 void
269 arp_init(void)
270 {
271 VERIFY(!arpinit_done);
272
273 LIST_INIT(&llinfo_arp);
274
275 arpinit_done = 1;
276 }
277
278 static struct llinfo_arp *
279 arp_llinfo_alloc(zalloc_flags_t how)
280 {
281 struct llinfo_arp *la = zalloc_flags(llinfo_arp_zone, how | Z_ZERO);
282
283 if (la) {
284 /*
285 * The type of queue (Q_DROPHEAD) here is just a hint;
286 * the actual logic that works on this queue performs
287 * a head drop, details in arp_llinfo_addq().
288 */
289 _qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ?
290 (uint32_t)-1 : arp_maxhold, QP_MBUF);
291 }
292 return la;
293 }
294
295 static void
296 arp_llinfo_free(void *arg)
297 {
298 struct llinfo_arp *la = arg;
299
300 if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
301 panic("%s: trying to free %p when it is in use", __func__, la);
302 /* NOTREACHED */
303 }
304
305 /* Free any held packets */
306 (void) arp_llinfo_flushq(la);
307
308 /* Purge any link-layer info caching */
309 VERIFY(la->la_rt->rt_llinfo == la);
310 if (la->la_rt->rt_llinfo_purge != NULL) {
311 la->la_rt->rt_llinfo_purge(la->la_rt);
312 }
313
314 zfree(llinfo_arp_zone, la);
315 }
316
317 static bool
318 arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m)
319 {
320 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
321
322 if (arpstat.held >= arp_maxhold_total) {
323 if (arp_verbose) {
324 log(LOG_DEBUG,
325 "%s: dropping packet due to maxhold_total\n",
326 __func__);
327 }
328 atomic_add_32(&arpstat.dropped, 1);
329 return false;
330 }
331
332 if (qlen(&la->la_holdq) >= qlimit(&la->la_holdq)) {
333 struct mbuf *_m;
334 /* prune less than CTL, else take what's at the head */
335 _getq_scidx_lt(&la->la_holdq, &pkt, SCIDX_CTL);
336 _m = pkt.cp_mbuf;
337 if (_m == NULL) {
338 _getq(&la->la_holdq, &pkt);
339 _m = pkt.cp_mbuf;
340 }
341 VERIFY(_m != NULL);
342 if (arp_verbose) {
343 log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n",
344 __func__, MBUF_SCIDX(mbuf_get_service_class(_m)));
345 }
346 m_freem(_m);
347 atomic_add_32(&arpstat.dropped, 1);
348 atomic_add_32(&arpstat.held, -1);
349 }
350 CLASSQ_PKT_INIT_MBUF(&pkt, m);
351 _addq(&la->la_holdq, &pkt);
352 atomic_add_32(&arpstat.held, 1);
353 if (arp_verbose) {
354 log(LOG_DEBUG, "%s: enqueued packet (scidx %u), qlen now %u\n",
355 __func__, MBUF_SCIDX(mbuf_get_service_class(m)),
356 qlen(&la->la_holdq));
357 }
358
359 return true;
360 }
361
362 static uint32_t
363 arp_llinfo_flushq(struct llinfo_arp *la)
364 {
365 uint32_t held = qlen(&la->la_holdq);
366
367 if (held != 0) {
368 atomic_add_32(&arpstat.purged, held);
369 atomic_add_32(&arpstat.held, -held);
370 _flushq(&la->la_holdq);
371 }
372 la->la_prbreq_cnt = 0;
373 VERIFY(qempty(&la->la_holdq));
374 return held;
375 }
376
377 static void
378 arp_llinfo_purge(struct rtentry *rt)
379 {
380 struct llinfo_arp *la = rt->rt_llinfo;
381
382 RT_LOCK_ASSERT_HELD(rt);
383 VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
384
385 if (la->la_llreach != NULL) {
386 RT_CONVERT_LOCK(rt);
387 ifnet_llreach_free(la->la_llreach);
388 la->la_llreach = NULL;
389 }
390 la->la_lastused = 0;
391 }
392
393 static void
394 arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
395 {
396 struct llinfo_arp *la = rt->rt_llinfo;
397 struct if_llreach *lr = la->la_llreach;
398
399 if (lr == NULL) {
400 bzero(ri, sizeof(*ri));
401 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
402 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
403 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
404 } else {
405 IFLR_LOCK(lr);
406 /* Export to rt_reach_info structure */
407 ifnet_lr2ri(lr, ri);
408 /* Export ARP send expiration (calendar) time */
409 ri->ri_snd_expire =
410 ifnet_llreach_up2calexp(lr, la->la_lastused);
411 IFLR_UNLOCK(lr);
412 }
413 }
414
415 static void
416 arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
417 {
418 struct llinfo_arp *la = rt->rt_llinfo;
419 struct if_llreach *lr = la->la_llreach;
420
421 if (lr == NULL) {
422 bzero(iflri, sizeof(*iflri));
423 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
424 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
425 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
426 } else {
427 IFLR_LOCK(lr);
428 /* Export to ifnet_llreach_info structure */
429 ifnet_lr2iflri(lr, iflri);
430 /* Export ARP send expiration (uptime) time */
431 iflri->iflri_snd_expire =
432 ifnet_llreach_up2upexp(lr, la->la_lastused);
433 IFLR_UNLOCK(lr);
434 }
435 }
436
437 static void
438 arp_llinfo_refresh(struct rtentry *rt)
439 {
440 uint64_t timenow = net_uptime();
441 /*
442 * If route entry is permanent or if expiry is less
443 * than timenow and extra time taken for unicast probe
444 * we can't expedite the refresh
445 */
446 if ((rt->rt_expire == 0) ||
447 (rt->rt_flags & RTF_STATIC) ||
448 !(rt->rt_flags & RTF_LLINFO)) {
449 return;
450 }
451
452 if (rt->rt_expire > timenow) {
453 rt->rt_expire = timenow;
454 }
455 return;
456 }
457
458 void
459 arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen)
460 {
461 /* Nothing more to do if it's disabled */
462 if (arp_llreach_base == 0) {
463 return;
464 }
465
466 ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
467 }
468
469 static __inline void
470 arp_llreach_use(struct llinfo_arp *la)
471 {
472 if (la->la_llreach != NULL) {
473 la->la_lastused = net_uptime();
474 }
475 }
476
477 static __inline int
478 arp_llreach_reachable(struct llinfo_arp *la)
479 {
480 struct if_llreach *lr;
481 const char *why = NULL;
482
483 /* Nothing more to do if it's disabled; pretend it's reachable */
484 if (arp_llreach_base == 0) {
485 return 1;
486 }
487
488 if ((lr = la->la_llreach) == NULL) {
489 /*
490 * Link-layer reachability record isn't present for this
491 * ARP entry; pretend it's reachable and use it as is.
492 */
493 return 1;
494 } else if (ifnet_llreach_reachable(lr)) {
495 /*
496 * Record is present, it's not shared with other ARP
497 * entries and a packet has recently been received
498 * from the remote host; consider it reachable.
499 */
500 if (lr->lr_reqcnt == 1) {
501 return 1;
502 }
503
504 /* Prime it up, if this is the first time */
505 if (la->la_lastused == 0) {
506 VERIFY(la->la_llreach != NULL);
507 arp_llreach_use(la);
508 }
509
510 /*
511 * Record is present and shared with one or more ARP
512 * entries, and a packet has recently been received
513 * from the remote host. Since it's shared by more
514 * than one IP addresses, we can't rely on the link-
515 * layer reachability alone; consider it reachable if
516 * this ARP entry has been used "recently."
517 */
518 if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) {
519 return 1;
520 }
521
522 why = "has alias(es) and hasn't been used in a while";
523 } else {
524 why = "haven't heard from it in a while";
525 }
526
527 if (arp_verbose > 1) {
528 char tmp[MAX_IPv4_STR_LEN];
529 u_int64_t now = net_uptime();
530
531 log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
532 "%s [lastused %lld, lastrcvd %lld] secs ago\n",
533 if_name(lr->lr_ifp), inet_ntop(AF_INET,
534 &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof(tmp)), why,
535 (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
536 (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
537 }
538 return 0;
539 }
540
541 /*
542 * Obtain a link-layer source cache entry for the sender.
543 *
544 * NOTE: This is currently only for ARP/Ethernet.
545 */
546 static void
547 arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr,
548 unsigned int alen, boolean_t solicited, uint32_t *p_rt_event_code)
549 {
550 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
551 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
552
553 if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
554 !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
555 ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */
556 alen == ifp->if_addrlen) {
557 struct llinfo_arp *la = rt->rt_llinfo;
558 struct if_llreach *lr;
559 const char *why = NULL, *type = "";
560
561 /* Become a regular mutex, just in case */
562 RT_CONVERT_LOCK(rt);
563
564 if ((lr = la->la_llreach) != NULL) {
565 type = (solicited ? "ARP reply" : "ARP announcement");
566 /*
567 * If target has changed, create a new record;
568 * otherwise keep existing record.
569 */
570 IFLR_LOCK(lr);
571 if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
572 IFLR_UNLOCK(lr);
573 /* Purge any link-layer info caching */
574 VERIFY(rt->rt_llinfo_purge != NULL);
575 rt->rt_llinfo_purge(rt);
576 lr = NULL;
577 why = " for different target HW address; "
578 "using new llreach record";
579 *p_rt_event_code = ROUTE_LLENTRY_CHANGED;
580 } else {
581 /*
582 * If we were doing unicast probing, we need to
583 * deliver an event for neighbor cache resolution
584 */
585 if (lr->lr_probes != 0) {
586 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
587 }
588
589 lr->lr_probes = 0; /* reset probe count */
590 IFLR_UNLOCK(lr);
591 if (solicited) {
592 why = " for same target HW address; "
593 "keeping existing llreach record";
594 }
595 }
596 }
597
598 if (lr == NULL) {
599 lr = la->la_llreach = ifnet_llreach_alloc(ifp,
600 ETHERTYPE_IP, addr, alen, arp_llreach_base);
601 if (lr != NULL) {
602 lr->lr_probes = 0; /* reset probe count */
603 if (why == NULL) {
604 why = "creating new llreach record";
605 }
606 }
607 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
608 }
609
610 if (arp_verbose > 1 && lr != NULL && why != NULL) {
611 char tmp[MAX_IPv4_STR_LEN];
612
613 log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
614 type, why, inet_ntop(AF_INET,
615 &SIN(rt_key(rt))->sin_addr, tmp, sizeof(tmp)));
616 }
617 }
618 }
619
620 struct arptf_arg {
621 boolean_t draining;
622 boolean_t probing;
623 uint32_t killed;
624 uint32_t aging;
625 uint32_t sticky;
626 uint32_t found;
627 uint32_t qlen;
628 uint32_t qsize;
629 };
630
631 /*
632 * Free an arp entry.
633 */
634 static void
635 arptfree(struct llinfo_arp *la, void *arg)
636 {
637 struct arptf_arg *ap = arg;
638 struct rtentry *rt = la->la_rt;
639 uint64_t timenow;
640
641 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
642
643 /* rnh_lock acquired by caller protects rt from going away */
644 RT_LOCK(rt);
645
646 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
647 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
648
649 ap->found++;
650 timenow = net_uptime();
651
652 /* If we're probing, flush out held packets upon probe expiration */
653 if (ap->probing && (la->la_flags & LLINFO_PROBING) &&
654 la->la_probeexp <= timenow) {
655 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
656 if (sdl != NULL) {
657 sdl->sdl_alen = 0;
658 }
659 (void) arp_llinfo_flushq(la);
660 /*
661 * Enqueue work item to invoke callback for this route entry
662 */
663 route_event_enqueue_nwk_wq_entry(rt, NULL,
664 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
665 }
666
667 /*
668 * The following is mostly being used to arm the timer
669 * again and for logging.
670 * qlen is used to re-arm the timer. Therefore, pure probe
671 * requests can be considered as 0 length packets
672 * contributing only to length but not to the size.
673 */
674 ap->qlen += qlen(&la->la_holdq);
675 ap->qlen += la->la_prbreq_cnt;
676 ap->qsize += qsize(&la->la_holdq);
677
678 if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
679 ap->sticky++;
680 /* ARP entry is permanent? */
681 if (rt->rt_expire == 0) {
682 RT_UNLOCK(rt);
683 return;
684 }
685 }
686
687 /* ARP entry hasn't expired and we're not draining? */
688 if (!ap->draining && rt->rt_expire > timenow) {
689 RT_UNLOCK(rt);
690 ap->aging++;
691 return;
692 }
693
694 if (rt->rt_refcnt > 0) {
695 /*
696 * ARP entry has expired, with outstanding refcnt.
697 * If we're not draining, force ARP query to be
698 * generated next time this entry is used.
699 */
700 if (!ap->draining && !ap->probing) {
701 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
702 if (sdl != NULL) {
703 sdl->sdl_alen = 0;
704 }
705 la->la_asked = 0;
706 rt->rt_flags &= ~RTF_REJECT;
707 }
708 RT_UNLOCK(rt);
709 } else if (!(rt->rt_flags & RTF_STATIC) && !ap->probing) {
710 /*
711 * ARP entry has no outstanding refcnt, and we're either
712 * draining or it has expired; delete it from the routing
713 * table. Safe to drop rt_lock and use rt_key, since holding
714 * rnh_lock here prevents another thread from calling
715 * rt_setgate() on this route.
716 */
717 RT_UNLOCK(rt);
718 rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
719 rt_mask(rt), 0, NULL);
720 arpstat.timeouts++;
721 ap->killed++;
722 } else {
723 /* ARP entry is static; let it linger */
724 RT_UNLOCK(rt);
725 }
726 }
727
728 void
729 in_arpdrain(void *arg)
730 {
731 #pragma unused(arg)
732 struct llinfo_arp *la, *ola;
733 struct arptf_arg farg;
734
735 if (arp_verbose) {
736 log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
737 }
738
739 lck_mtx_lock(rnh_lock);
740 la = llinfo_arp.lh_first;
741 bzero(&farg, sizeof(farg));
742 farg.draining = TRUE;
743 while ((ola = la) != NULL) {
744 la = la->la_le.le_next;
745 arptfree(ola, &farg);
746 }
747 if (arp_verbose) {
748 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
749 "%u pkts held (%u bytes)\n", __func__, farg.found,
750 farg.aging, farg.sticky, farg.killed, farg.qlen,
751 farg.qsize);
752 }
753 lck_mtx_unlock(rnh_lock);
754 }
755
756 /*
757 * Timeout routine. Age arp_tab entries periodically.
758 */
759 static void
760 arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1)
761 {
762 #pragma unused(arg0, arg1)
763 struct llinfo_arp *la, *ola;
764 struct timeval atv;
765 struct arptf_arg farg;
766
767 lck_mtx_lock(rnh_lock);
768 la = llinfo_arp.lh_first;
769 bzero(&farg, sizeof(farg));
770 while ((ola = la) != NULL) {
771 la = la->la_le.le_next;
772 arptfree(ola, &farg);
773 }
774 if (arp_verbose) {
775 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
776 "%u pkts held (%u bytes)\n", __func__, farg.found,
777 farg.aging, farg.sticky, farg.killed, farg.qlen,
778 farg.qsize);
779 }
780 atv.tv_usec = 0;
781 atv.tv_sec = MAX(arpt_prune, 5);
782 /* re-arm the timer if there's work to do */
783 arp_timeout_run = 0;
784 if (farg.aging > 0) {
785 arp_sched_timeout(&atv);
786 } else if (arp_verbose) {
787 log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
788 }
789 lck_mtx_unlock(rnh_lock);
790 }
791
792 static void
793 arp_sched_timeout(struct timeval *atv)
794 {
795 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
796
797 if (!arp_timeout_run) {
798 struct timeval tv;
799 uint64_t deadline = 0;
800
801 if (arp_timeout_tcall == NULL) {
802 arp_timeout_tcall =
803 thread_call_allocate(arp_timeout, NULL);
804 VERIFY(arp_timeout_tcall != NULL);
805 }
806
807 if (atv == NULL) {
808 tv.tv_usec = 0;
809 tv.tv_sec = MAX(arpt_prune / 5, 1);
810 atv = &tv;
811 }
812 if (arp_verbose) {
813 log(LOG_DEBUG, "%s: timer scheduled in "
814 "T+%llus.%lluu\n", __func__,
815 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
816 }
817 arp_timeout_run = 1;
818
819 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
820 mach_absolute_time(), &deadline);
821 (void) thread_call_enter_delayed(arp_timeout_tcall, deadline);
822 }
823 }
824
825 /*
826 * Probe routine.
827 */
828 static void
829 arp_probe(thread_call_param_t arg0, thread_call_param_t arg1)
830 {
831 #pragma unused(arg0, arg1)
832 struct llinfo_arp *la, *ola;
833 struct timeval atv;
834 struct arptf_arg farg;
835
836 lck_mtx_lock(rnh_lock);
837 la = llinfo_arp.lh_first;
838 bzero(&farg, sizeof(farg));
839 farg.probing = TRUE;
840 while ((ola = la) != NULL) {
841 la = la->la_le.le_next;
842 arptfree(ola, &farg);
843 }
844 if (arp_verbose) {
845 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
846 "%u pkts held (%u bytes)\n", __func__, farg.found,
847 farg.aging, farg.sticky, farg.killed, farg.qlen,
848 farg.qsize);
849 }
850 atv.tv_usec = 0;
851 atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
852 /* re-arm the probe if there's work to do */
853 arp_probe_run = 0;
854 if (farg.qlen > 0) {
855 arp_sched_probe(&atv);
856 } else if (arp_verbose) {
857 log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__);
858 }
859 lck_mtx_unlock(rnh_lock);
860 }
861
862 static void
863 arp_sched_probe(struct timeval *atv)
864 {
865 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
866
867 if (!arp_probe_run) {
868 struct timeval tv;
869 uint64_t deadline = 0;
870
871 if (arp_probe_tcall == NULL) {
872 arp_probe_tcall =
873 thread_call_allocate(arp_probe, NULL);
874 VERIFY(arp_probe_tcall != NULL);
875 }
876
877 if (atv == NULL) {
878 tv.tv_usec = 0;
879 tv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
880 atv = &tv;
881 }
882 if (arp_verbose) {
883 log(LOG_DEBUG, "%s: probe scheduled in "
884 "T+%llus.%lluu\n", __func__,
885 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
886 }
887 arp_probe_run = 1;
888
889 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
890 mach_absolute_time(), &deadline);
891 (void) thread_call_enter_delayed(arp_probe_tcall, deadline);
892 }
893 }
894
895 /*
896 * ifa_rtrequest() callback
897 */
898 static void
899 arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
900 {
901 #pragma unused(sa)
902 struct sockaddr *gate = rt->rt_gateway;
903 struct llinfo_arp *la = rt->rt_llinfo;
904 static struct sockaddr_dl null_sdl =
905 { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK };
906 uint64_t timenow;
907 char buf[MAX_IPv4_STR_LEN];
908
909 VERIFY(arpinit_done);
910 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
911 RT_LOCK_ASSERT_HELD(rt);
912
913 if (rt->rt_flags & RTF_GATEWAY) {
914 return;
915 }
916
917 timenow = net_uptime();
918 switch (req) {
919 case RTM_ADD:
920 /*
921 * XXX: If this is a manually added route to interface
922 * such as older version of routed or gated might provide,
923 * restore cloning bit.
924 */
925 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
926 SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) {
927 rt->rt_flags |= RTF_CLONING;
928 }
929
930 if (rt->rt_flags & RTF_CLONING) {
931 /*
932 * Case 1: This route should come from a route to iface.
933 */
934 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
935 gate = rt->rt_gateway;
936 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
937 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
938 /*
939 * In case we're called before 1.0 sec.
940 * has elapsed.
941 */
942 rt_setexpire(rt, MAX(timenow, 1));
943 }
944 break;
945 }
946 /* Announce a new entry if requested. */
947 if (rt->rt_flags & RTF_ANNOUNCE) {
948 if (la != NULL) {
949 arp_llreach_use(la); /* Mark use timestamp */
950 }
951 RT_UNLOCK(rt);
952 dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
953 SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
954 RT_LOCK(rt);
955 arpstat.txannounces++;
956 }
957 OS_FALLTHROUGH;
958 case RTM_RESOLVE:
959 if (gate->sa_family != AF_LINK ||
960 gate->sa_len < sizeof(null_sdl)) {
961 arpstat.invalidreqs++;
962 log(LOG_ERR, "%s: route to %s has bad gateway address "
963 "(sa_family %u sa_len %u) on %s\n",
964 __func__, inet_ntop(AF_INET,
965 &SIN(rt_key(rt))->sin_addr.s_addr, buf,
966 sizeof(buf)), gate->sa_family, gate->sa_len,
967 if_name(rt->rt_ifp));
968 break;
969 }
970 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
971 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
972
973 if (la != NULL) {
974 break; /* This happens on a route change */
975 }
976 /*
977 * Case 2: This route may come from cloning, or a manual route
978 * add with a LL address.
979 */
980 rt->rt_llinfo = la = arp_llinfo_alloc(Z_WAITOK);
981
982 rt->rt_llinfo_get_ri = arp_llinfo_get_ri;
983 rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
984 rt->rt_llinfo_purge = arp_llinfo_purge;
985 rt->rt_llinfo_free = arp_llinfo_free;
986 rt->rt_llinfo_refresh = arp_llinfo_refresh;
987 rt->rt_flags |= RTF_LLINFO;
988 la->la_rt = rt;
989 LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
990 arpstat.inuse++;
991
992 /* We have at least one entry; arm the timer if not already */
993 arp_sched_timeout(NULL);
994
995 /*
996 * This keeps the multicast addresses from showing up
997 * in `arp -a' listings as unresolved. It's not actually
998 * functional. Then the same for broadcast. For IPv4
999 * link-local address, keep the entry around even after
1000 * it has expired.
1001 */
1002 if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
1003 RT_UNLOCK(rt);
1004 dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
1005 sizeof(struct sockaddr_dl));
1006 RT_LOCK(rt);
1007 rt_setexpire(rt, 0);
1008 } else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
1009 rt->rt_ifp)) {
1010 struct sockaddr_dl *gate_ll = SDL(gate);
1011 size_t broadcast_len;
1012 int ret = ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
1013 LLADDR(gate_ll), sizeof(gate_ll->sdl_data),
1014 &broadcast_len);
1015 if (ret == 0 && broadcast_len <= UINT8_MAX) {
1016 gate_ll->sdl_alen = (u_char)broadcast_len;
1017 gate_ll->sdl_family = AF_LINK;
1018 gate_ll->sdl_len = sizeof(struct sockaddr_dl);
1019 }
1020 /* In case we're called before 1.0 sec. has elapsed */
1021 rt_setexpire(rt, MAX(timenow, 1));
1022 } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
1023 sin_addr.s_addr))) {
1024 rt->rt_flags |= RTF_STATIC;
1025 }
1026
1027 /* Set default maximum number of retries */
1028 la->la_maxtries = arp_maxtries;
1029
1030 /* Become a regular mutex, just in case */
1031 RT_CONVERT_LOCK(rt);
1032 IFA_LOCK_SPIN(rt->rt_ifa);
1033 if (SIN(rt_key(rt))->sin_addr.s_addr ==
1034 (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
1035 IFA_UNLOCK(rt->rt_ifa);
1036 /*
1037 * This test used to be
1038 * if (loif.if_flags & IFF_UP)
1039 * It allowed local traffic to be forced through the
1040 * hardware by configuring the loopback down. However,
1041 * it causes problems during network configuration
1042 * for boards that can't receive packets they send.
1043 * It is now necessary to clear "useloopback" and
1044 * remove the route to force traffic out to the
1045 * hardware.
1046 */
1047 rt_setexpire(rt, 0);
1048 ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)),
1049 SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
1050 if (useloopback) {
1051 if (rt->rt_ifp != lo_ifp) {
1052 /*
1053 * Purge any link-layer info caching.
1054 */
1055 if (rt->rt_llinfo_purge != NULL) {
1056 rt->rt_llinfo_purge(rt);
1057 }
1058
1059 /*
1060 * Adjust route ref count for the
1061 * interfaces.
1062 */
1063 if (rt->rt_if_ref_fn != NULL) {
1064 rt->rt_if_ref_fn(lo_ifp, 1);
1065 rt->rt_if_ref_fn(rt->rt_ifp, -1);
1066 }
1067 }
1068 rt->rt_ifp = lo_ifp;
1069 /*
1070 * If rmx_mtu is not locked, update it
1071 * to the MTU used by the new interface.
1072 */
1073 if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1074 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1075 }
1076 }
1077 } else {
1078 IFA_UNLOCK(rt->rt_ifa);
1079 }
1080 break;
1081
1082 case RTM_DELETE:
1083 if (la == NULL) {
1084 break;
1085 }
1086 /*
1087 * Unchain it but defer the actual freeing until the route
1088 * itself is to be freed. rt->rt_llinfo still points to
1089 * llinfo_arp, and likewise, la->la_rt still points to this
1090 * route entry, except that RTF_LLINFO is now cleared.
1091 */
1092 LIST_REMOVE(la, la_le);
1093 la->la_le.le_next = NULL;
1094 la->la_le.le_prev = NULL;
1095 arpstat.inuse--;
1096
1097 /*
1098 * Purge any link-layer info caching.
1099 */
1100 if (rt->rt_llinfo_purge != NULL) {
1101 rt->rt_llinfo_purge(rt);
1102 }
1103
1104 rt->rt_flags &= ~RTF_LLINFO;
1105 (void) arp_llinfo_flushq(la);
1106 }
1107 }
1108
1109 /*
1110 * convert hardware address to hex string for logging errors.
1111 */
1112 static const char *
1113 sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen)
1114 {
1115 char *buf = orig_buf;
1116 int i;
1117 const u_char *lladdr = (u_char *)(size_t)sdl->sdl_data;
1118 int maxbytes = buflen / 3;
1119
1120 if (maxbytes > sdl->sdl_alen) {
1121 maxbytes = sdl->sdl_alen;
1122 }
1123 *buf = '\0';
1124 for (i = 0; i < maxbytes; i++) {
1125 snprintf(buf, 3, "%02x", lladdr[i]);
1126 buf += 2;
1127 *buf = (i == maxbytes - 1) ? '\0' : ':';
1128 buf++;
1129 }
1130 return orig_buf;
1131 }
1132
1133 /*
1134 * arp_lookup_route will lookup the route for a given address.
1135 *
1136 * The address must be for a host on a local network on this interface.
1137 * If the returned route is non-NULL, the route is locked and the caller
1138 * is responsible for unlocking it and releasing its reference.
1139 */
1140 static errno_t
1141 arp_lookup_route(const struct in_addr *addr, int create, int proxy,
1142 route_t *route, unsigned int ifscope)
1143 {
1144 struct sockaddr_inarp sin =
1145 { sizeof(sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
1146 const char *why = NULL;
1147 errno_t error = 0;
1148 route_t rt;
1149
1150 *route = NULL;
1151
1152 sin.sin_addr.s_addr = addr->s_addr;
1153 sin.sin_other = proxy ? SIN_PROXY : 0;
1154
1155 /*
1156 * If the destination is a link-local address, don't
1157 * constrain the lookup (don't scope it).
1158 */
1159 if (IN_LINKLOCAL(ntohl(addr->s_addr))) {
1160 ifscope = IFSCOPE_NONE;
1161 }
1162
1163 rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope);
1164 if (rt == NULL) {
1165 return ENETUNREACH;
1166 }
1167
1168 RT_LOCK(rt);
1169
1170 if (rt->rt_flags & RTF_GATEWAY) {
1171 why = "host is not on local network";
1172 error = ENETUNREACH;
1173 } else if (!(rt->rt_flags & RTF_LLINFO)) {
1174 why = "could not allocate llinfo";
1175 error = ENOMEM;
1176 } else if (rt->rt_gateway->sa_family != AF_LINK) {
1177 why = "gateway route is not ours";
1178 error = EPROTONOSUPPORT;
1179 }
1180
1181 if (error != 0) {
1182 if (create && (arp_verbose || log_arp_warnings)) {
1183 char tmp[MAX_IPv4_STR_LEN];
1184 log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
1185 __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
1186 sizeof(tmp)), why);
1187 }
1188
1189 /*
1190 * If there are no references to this route, and it is
1191 * a cloned route, and not static, and ARP had created
1192 * the route, then purge it from the routing table as
1193 * it is probably bogus.
1194 */
1195 if (rt->rt_refcnt == 1 &&
1196 (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
1197 RTF_WASCLONED) {
1198 /*
1199 * Prevent another thread from modiying rt_key,
1200 * rt_gateway via rt_setgate() after rt_lock is
1201 * dropped by marking the route as defunct.
1202 */
1203 rt->rt_flags |= RTF_CONDEMNED;
1204 RT_UNLOCK(rt);
1205 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1206 rt_mask(rt), rt->rt_flags, NULL);
1207 rtfree(rt);
1208 } else {
1209 RT_REMREF_LOCKED(rt);
1210 RT_UNLOCK(rt);
1211 }
1212 return error;
1213 }
1214
1215 /*
1216 * Caller releases reference and does RT_UNLOCK(rt).
1217 */
1218 *route = rt;
1219 return 0;
1220 }
1221
1222 boolean_t
1223 arp_is_entry_probing(route_t p_route)
1224 {
1225 struct llinfo_arp *llinfo = p_route->rt_llinfo;
1226
1227 if (llinfo != NULL &&
1228 llinfo->la_llreach != NULL &&
1229 llinfo->la_llreach->lr_probes != 0) {
1230 return TRUE;
1231 }
1232
1233 return FALSE;
1234 }
1235
1236 /*
1237 * This is the ARP pre-output routine; care must be taken to ensure that
1238 * the "hint" route never gets freed via rtfree(), since the caller may
1239 * have stored it inside a struct route with a reference held for that
1240 * placeholder.
1241 */
1242 errno_t
1243 arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
1244 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
1245 mbuf_t packet)
1246 {
1247 route_t route = NULL; /* output route */
1248 errno_t result = 0;
1249 struct sockaddr_dl *gateway;
1250 struct llinfo_arp *llinfo = NULL;
1251 boolean_t usable, probing = FALSE;
1252 uint64_t timenow;
1253 struct if_llreach *lr;
1254 struct ifaddr *rt_ifa;
1255 struct sockaddr *sa;
1256 uint32_t rtflags;
1257 struct sockaddr_dl sdl;
1258 boolean_t send_probe_notif = FALSE;
1259 boolean_t enqueued = FALSE;
1260
1261 if (ifp == NULL || net_dest == NULL) {
1262 return EINVAL;
1263 }
1264
1265 if (net_dest->sin_family != AF_INET) {
1266 return EAFNOSUPPORT;
1267 }
1268
1269 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) {
1270 return ENETDOWN;
1271 }
1272
1273 /*
1274 * If we were given a route, verify the route and grab the gateway
1275 */
1276 if (hint != NULL) {
1277 /*
1278 * Callee holds a reference on the route and returns
1279 * with the route entry locked, upon success.
1280 */
1281 result = route_to_gwroute((const struct sockaddr *)
1282 net_dest, hint, &route);
1283 if (result != 0) {
1284 return result;
1285 }
1286 if (route != NULL) {
1287 RT_LOCK_ASSERT_HELD(route);
1288 }
1289 }
1290
1291 if ((packet != NULL && (packet->m_flags & M_BCAST)) ||
1292 in_broadcast(net_dest->sin_addr, ifp)) {
1293 size_t broadcast_len;
1294 bzero(ll_dest, ll_dest_len);
1295 result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1296 ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1297 &broadcast_len);
1298 if (result == 0 && broadcast_len <= UINT8_MAX) {
1299 ll_dest->sdl_alen = (u_char)broadcast_len;
1300 ll_dest->sdl_family = AF_LINK;
1301 ll_dest->sdl_len = sizeof(struct sockaddr_dl);
1302 }
1303 goto release;
1304 }
1305 if ((packet != NULL && (packet->m_flags & M_MCAST)) ||
1306 ((ifp->if_flags & IFF_MULTICAST) &&
1307 IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) {
1308 if (route != NULL) {
1309 RT_UNLOCK(route);
1310 }
1311 result = dlil_resolve_multi(ifp,
1312 (const struct sockaddr *)net_dest,
1313 (struct sockaddr *)ll_dest, ll_dest_len);
1314 if (route != NULL) {
1315 RT_LOCK(route);
1316 }
1317 goto release;
1318 }
1319
1320 /*
1321 * If we didn't find a route, or the route doesn't have
1322 * link layer information, trigger the creation of the
1323 * route and link layer information.
1324 */
1325 if (route == NULL || route->rt_llinfo == NULL) {
1326 /* Clean up now while we can */
1327 if (route != NULL) {
1328 if (route == hint) {
1329 RT_REMREF_LOCKED(route);
1330 RT_UNLOCK(route);
1331 } else {
1332 RT_UNLOCK(route);
1333 rtfree(route);
1334 }
1335 }
1336 /*
1337 * Callee holds a reference on the route and returns
1338 * with the route entry locked, upon success.
1339 */
1340 result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1341 ifp->if_index);
1342 if (result == 0) {
1343 RT_LOCK_ASSERT_HELD(route);
1344 }
1345 }
1346
1347 if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
1348 /* In case result is 0 but no route, return an error */
1349 if (result == 0) {
1350 result = EHOSTUNREACH;
1351 }
1352
1353 if (route != NULL && route->rt_llinfo == NULL) {
1354 char tmp[MAX_IPv4_STR_LEN];
1355 log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1356 __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
1357 tmp, sizeof(tmp)));
1358 }
1359 goto release;
1360 }
1361
1362 /*
1363 * Now that we have the right route, is it filled in?
1364 */
1365 gateway = SDL(route->rt_gateway);
1366 timenow = net_uptime();
1367 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1368 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1369
1370 usable = ((route->rt_expire == 0 || route->rt_expire > timenow) &&
1371 gateway != NULL && gateway->sdl_family == AF_LINK &&
1372 gateway->sdl_alen != 0);
1373
1374 if (usable) {
1375 boolean_t unreachable = !arp_llreach_reachable(llinfo);
1376
1377 /* Entry is usable, so fill in info for caller */
1378 bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
1379 result = 0;
1380 arp_llreach_use(llinfo); /* Mark use timestamp */
1381
1382 lr = llinfo->la_llreach;
1383 if (lr == NULL) {
1384 goto release;
1385 }
1386 rt_ifa = route->rt_ifa;
1387
1388 /* Become a regular mutex, just in case */
1389 RT_CONVERT_LOCK(route);
1390 IFLR_LOCK_SPIN(lr);
1391
1392 if ((unreachable || (llinfo->la_flags & LLINFO_PROBING)) &&
1393 lr->lr_probes < arp_unicast_lim) {
1394 /*
1395 * Thus mark the entry with la_probeexp deadline to
1396 * trigger the probe timer to be scheduled (if not
1397 * already). This gets cleared the moment we get
1398 * an ARP reply.
1399 */
1400 probing = TRUE;
1401 if (lr->lr_probes == 0) {
1402 llinfo->la_probeexp = (timenow + arpt_probe);
1403 llinfo->la_flags |= LLINFO_PROBING;
1404 /*
1405 * Provide notification that ARP unicast
1406 * probing has started.
1407 * We only do it for the first unicast probe
1408 * attempt.
1409 */
1410 send_probe_notif = TRUE;
1411 }
1412
1413 /*
1414 * Start the unicast probe and anticipate a reply;
1415 * afterwards, return existing entry to caller and
1416 * let it be used anyway. If peer is non-existent
1417 * we'll broadcast ARP next time around.
1418 */
1419 lr->lr_probes++;
1420 bzero(&sdl, sizeof(sdl));
1421 sdl.sdl_alen = ifp->if_addrlen;
1422 bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1423 ifp->if_addrlen);
1424 IFLR_UNLOCK(lr);
1425 IFA_LOCK_SPIN(rt_ifa);
1426 IFA_ADDREF_LOCKED(rt_ifa);
1427 sa = rt_ifa->ifa_addr;
1428 IFA_UNLOCK(rt_ifa);
1429 rtflags = route->rt_flags;
1430 RT_UNLOCK(route);
1431 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1432 (const struct sockaddr_dl *)&sdl,
1433 (const struct sockaddr *)net_dest, rtflags);
1434 IFA_REMREF(rt_ifa);
1435 RT_LOCK(route);
1436 goto release;
1437 } else {
1438 IFLR_UNLOCK(lr);
1439 if (!unreachable &&
1440 !(llinfo->la_flags & LLINFO_PROBING)) {
1441 /*
1442 * Normal case where peer is still reachable,
1443 * we're not probing and if_addrlen is anything
1444 * but IF_LLREACH_MAXLEN.
1445 */
1446 goto release;
1447 }
1448 }
1449 }
1450
1451 if (ifp->if_flags & IFF_NOARP) {
1452 result = ENOTSUP;
1453 goto release;
1454 }
1455
1456 /*
1457 * Route wasn't complete/valid; we need to send out ARP request.
1458 * If we've exceeded the limit of la_holdq, drop from the head
1459 * of queue and add this packet to the tail. If we end up with
1460 * RTF_REJECT below, we'll dequeue this from tail and have the
1461 * caller free the packet instead. It's safe to do that since
1462 * we still hold the route's rt_lock.
1463 */
1464 if (packet != NULL) {
1465 enqueued = arp_llinfo_addq(llinfo, packet);
1466 } else {
1467 llinfo->la_prbreq_cnt++;
1468 }
1469 /*
1470 * Regardless of permanent vs. expirable entry, we need to
1471 * avoid having packets sit in la_holdq forever; thus mark the
1472 * entry with la_probeexp deadline to trigger the probe timer
1473 * to be scheduled (if not already). This gets cleared the
1474 * moment we get an ARP reply.
1475 */
1476 probing = TRUE;
1477 if ((qlen(&llinfo->la_holdq) + llinfo->la_prbreq_cnt) == 1) {
1478 llinfo->la_probeexp = (timenow + arpt_probe);
1479 llinfo->la_flags |= LLINFO_PROBING;
1480 }
1481
1482 if (route->rt_expire) {
1483 route->rt_flags &= ~RTF_REJECT;
1484 if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
1485 rt_setexpire(route, timenow);
1486 if (llinfo->la_asked++ < llinfo->la_maxtries) {
1487 struct kev_msg ev_msg;
1488 struct kev_in_arpfailure in_arpfailure;
1489 boolean_t sendkev = FALSE;
1490
1491 rt_ifa = route->rt_ifa;
1492 lr = llinfo->la_llreach;
1493 /* Become a regular mutex, just in case */
1494 RT_CONVERT_LOCK(route);
1495 /* Update probe count, if applicable */
1496 if (lr != NULL) {
1497 IFLR_LOCK_SPIN(lr);
1498 lr->lr_probes++;
1499 IFLR_UNLOCK(lr);
1500 }
1501 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1502 route->rt_flags & RTF_ROUTER &&
1503 llinfo->la_asked > 1) {
1504 sendkev = TRUE;
1505 llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1506 }
1507 IFA_LOCK_SPIN(rt_ifa);
1508 IFA_ADDREF_LOCKED(rt_ifa);
1509 sa = rt_ifa->ifa_addr;
1510 IFA_UNLOCK(rt_ifa);
1511 arp_llreach_use(llinfo); /* Mark use tstamp */
1512 rtflags = route->rt_flags;
1513 RT_UNLOCK(route);
1514 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1515 NULL, (const struct sockaddr *)net_dest,
1516 rtflags);
1517 IFA_REMREF(rt_ifa);
1518 if (sendkev) {
1519 bzero(&ev_msg, sizeof(ev_msg));
1520 bzero(&in_arpfailure,
1521 sizeof(in_arpfailure));
1522 in_arpfailure.link_data.if_family =
1523 ifp->if_family;
1524 in_arpfailure.link_data.if_unit =
1525 ifp->if_unit;
1526 strlcpy(in_arpfailure.link_data.if_name,
1527 ifp->if_name, IFNAMSIZ);
1528 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1529 ev_msg.kev_class = KEV_NETWORK_CLASS;
1530 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1531 ev_msg.event_code =
1532 KEV_INET_ARPRTRFAILURE;
1533 ev_msg.dv[0].data_ptr = &in_arpfailure;
1534 ev_msg.dv[0].data_length =
1535 sizeof(struct
1536 kev_in_arpfailure);
1537 dlil_post_complete_msg(NULL, &ev_msg);
1538 }
1539 result = EJUSTRETURN;
1540 RT_LOCK(route);
1541 goto release;
1542 } else {
1543 route->rt_flags |= RTF_REJECT;
1544 rt_setexpire(route,
1545 route->rt_expire + arpt_down);
1546 llinfo->la_asked = 0;
1547 /*
1548 * Remove the packet that was just added above;
1549 * don't free it since we're not returning
1550 * EJUSTRETURN. The caller will handle the
1551 * freeing. Since we haven't dropped rt_lock
1552 * from the time of _addq() above, this packet
1553 * must be at the tail.
1554 */
1555 if (packet != NULL && enqueued) {
1556 classq_pkt_t pkt =
1557 CLASSQ_PKT_INITIALIZER(pkt);
1558
1559 _getq_tail(&llinfo->la_holdq, &pkt);
1560 atomic_add_32(&arpstat.held, -1);
1561 VERIFY(pkt.cp_mbuf == packet);
1562 }
1563 result = EHOSTUNREACH;
1564 /*
1565 * Enqueue work item to invoke callback for this route entry
1566 */
1567 route_event_enqueue_nwk_wq_entry(route, NULL,
1568 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
1569 goto release;
1570 }
1571 }
1572 }
1573
1574 /* The packet is now held inside la_holdq or dropped */
1575 result = EJUSTRETURN;
1576 if (packet != NULL && !enqueued) {
1577 mbuf_free(packet);
1578 packet = NULL;
1579 }
1580
1581 release:
1582 if (result == EHOSTUNREACH) {
1583 atomic_add_32(&arpstat.dropped, 1);
1584 }
1585
1586 if (route != NULL) {
1587 if (send_probe_notif) {
1588 route_event_enqueue_nwk_wq_entry(route, NULL,
1589 ROUTE_LLENTRY_PROBED, NULL, TRUE);
1590
1591 if (route->rt_flags & RTF_ROUTER) {
1592 struct radix_node_head *rnh = NULL;
1593 struct route_event rt_ev;
1594 route_event_init(&rt_ev, route, NULL, ROUTE_LLENTRY_PROBED);
1595 /*
1596 * We already have a reference on rt. The function
1597 * frees it before returning.
1598 */
1599 RT_UNLOCK(route);
1600 lck_mtx_lock(rnh_lock);
1601 rnh = rt_tables[AF_INET];
1602
1603 if (rnh != NULL) {
1604 (void) rnh->rnh_walktree(rnh,
1605 route_event_walktree, (void *)&rt_ev);
1606 }
1607 lck_mtx_unlock(rnh_lock);
1608 RT_LOCK(route);
1609 }
1610 }
1611
1612 if (route == hint) {
1613 RT_REMREF_LOCKED(route);
1614 RT_UNLOCK(route);
1615 } else {
1616 RT_UNLOCK(route);
1617 rtfree(route);
1618 }
1619 }
1620 if (probing) {
1621 /* Do this after we drop rt_lock to preserve ordering */
1622 lck_mtx_lock(rnh_lock);
1623 arp_sched_probe(NULL);
1624 lck_mtx_unlock(rnh_lock);
1625 }
1626 return result;
1627 }
1628
1629 errno_t
1630 arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1631 const struct sockaddr_dl *sender_hw, const struct sockaddr_in *sender_ip,
1632 const struct sockaddr_in *target_ip)
1633 {
1634 char ipv4str[MAX_IPv4_STR_LEN];
1635 struct sockaddr_dl proxied;
1636 struct sockaddr_dl *gateway, *target_hw = NULL;
1637 struct ifaddr *ifa;
1638 struct in_ifaddr *ia;
1639 struct in_ifaddr *best_ia = NULL;
1640 struct sockaddr_in best_ia_sin;
1641 route_t route = NULL;
1642 char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
1643 struct llinfo_arp *llinfo;
1644 errno_t error;
1645 int created_announcement = 0;
1646 int bridged = 0, is_bridge = 0;
1647 uint32_t rt_evcode = 0;
1648
1649 /*
1650 * Here and other places within this routine where we don't hold
1651 * rnh_lock, trade accuracy for speed for the common scenarios
1652 * and avoid the use of atomic updates.
1653 */
1654 arpstat.received++;
1655
1656 /* Do not respond to requests for 0.0.0.0 */
1657 if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) {
1658 goto done;
1659 }
1660
1661 if (ifp->if_bridge) {
1662 bridged = 1;
1663 }
1664 if (ifp->if_type == IFT_BRIDGE) {
1665 is_bridge = 1;
1666 }
1667
1668 if (arpop == ARPOP_REPLY) {
1669 arpstat.rxreplies++;
1670 }
1671
1672 /*
1673 * Determine if this ARP is for us
1674 */
1675 lck_rw_lock_shared(in_ifaddr_rwlock);
1676 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
1677 IFA_LOCK_SPIN(&ia->ia_ifa);
1678 if (ia->ia_ifp == ifp &&
1679 ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
1680 best_ia = ia;
1681 best_ia_sin = best_ia->ia_addr;
1682 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1683 IFA_UNLOCK(&ia->ia_ifa);
1684 lck_rw_done(in_ifaddr_rwlock);
1685 goto match;
1686 }
1687 IFA_UNLOCK(&ia->ia_ifa);
1688 }
1689
1690 TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
1691 IFA_LOCK_SPIN(&ia->ia_ifa);
1692 if (ia->ia_ifp == ifp &&
1693 ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1694 best_ia = ia;
1695 best_ia_sin = best_ia->ia_addr;
1696 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1697 IFA_UNLOCK(&ia->ia_ifa);
1698 lck_rw_done(in_ifaddr_rwlock);
1699 goto match;
1700 }
1701 IFA_UNLOCK(&ia->ia_ifa);
1702 }
1703
1704 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
1705 (ia->ia_ifp->if_bridge == ifp->if_softc && \
1706 bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
1707 addr == ia->ia_addr.sin_addr.s_addr)
1708 /*
1709 * Check the case when bridge shares its MAC address with
1710 * some of its children, so packets are claimed by bridge
1711 * itself (bridge_input() does it first), but they are really
1712 * meant to be destined to the bridge member.
1713 */
1714 if (is_bridge) {
1715 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1716 ia_hash) {
1717 IFA_LOCK_SPIN(&ia->ia_ifa);
1718 if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1719 ifp, ia)) {
1720 ifp = ia->ia_ifp;
1721 best_ia = ia;
1722 best_ia_sin = best_ia->ia_addr;
1723 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1724 IFA_UNLOCK(&ia->ia_ifa);
1725 lck_rw_done(in_ifaddr_rwlock);
1726 goto match;
1727 }
1728 IFA_UNLOCK(&ia->ia_ifa);
1729 }
1730 }
1731 #undef BDG_MEMBER_MATCHES_ARP
1732 lck_rw_done(in_ifaddr_rwlock);
1733
1734 /*
1735 * No match, use the first inet address on the receive interface
1736 * as a dummy address for the rest of the function; we may be
1737 * proxying for another address.
1738 */
1739 ifnet_lock_shared(ifp);
1740 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1741 IFA_LOCK_SPIN(ifa);
1742 if (ifa->ifa_addr->sa_family != AF_INET) {
1743 IFA_UNLOCK(ifa);
1744 continue;
1745 }
1746 best_ia = (struct in_ifaddr *)ifa;
1747 best_ia_sin = best_ia->ia_addr;
1748 IFA_ADDREF_LOCKED(ifa);
1749 IFA_UNLOCK(ifa);
1750 ifnet_lock_done(ifp);
1751 goto match;
1752 }
1753 ifnet_lock_done(ifp);
1754
1755 /*
1756 * If we're not a bridge member, or if we are but there's no
1757 * IPv4 address to use for the interface, drop the packet.
1758 */
1759 if (!bridged || best_ia == NULL) {
1760 goto done;
1761 }
1762
1763 match:
1764 /* If the packet is from this interface, ignore the packet */
1765 if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
1766 sender_hw->sdl_alen) == 0) {
1767 goto done;
1768 }
1769
1770 /* Check for a conflict */
1771 if (!bridged &&
1772 sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1773 struct kev_msg ev_msg;
1774 struct kev_in_collision *in_collision;
1775 u_char storage[sizeof(struct kev_in_collision) + MAX_HW_LEN];
1776
1777 bzero(&ev_msg, sizeof(struct kev_msg));
1778 bzero(storage, (sizeof(struct kev_in_collision) + MAX_HW_LEN));
1779 in_collision = (struct kev_in_collision *)(void *)storage;
1780 log(LOG_ERR, "%s duplicate IP address %s sent from "
1781 "address %s\n", if_name(ifp),
1782 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1783 sizeof(ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1784 sizeof(buf)));
1785
1786 /* Send a kernel event so anyone can learn of the conflict */
1787 in_collision->link_data.if_family = ifp->if_family;
1788 in_collision->link_data.if_unit = ifp->if_unit;
1789 strlcpy(&in_collision->link_data.if_name[0],
1790 ifp->if_name, IFNAMSIZ);
1791 in_collision->ia_ipaddr = sender_ip->sin_addr;
1792 in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1793 sender_hw->sdl_alen : MAX_HW_LEN;
1794 bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1795 in_collision->hw_len);
1796 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1797 ev_msg.kev_class = KEV_NETWORK_CLASS;
1798 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1799 ev_msg.event_code = KEV_INET_ARPCOLLISION;
1800 ev_msg.dv[0].data_ptr = in_collision;
1801 ev_msg.dv[0].data_length =
1802 sizeof(struct kev_in_collision) + in_collision->hw_len;
1803 ev_msg.dv[1].data_length = 0;
1804 dlil_post_complete_msg(NULL, &ev_msg);
1805 atomic_add_32(&arpstat.dupips, 1);
1806 goto respond;
1807 }
1808
1809 /*
1810 * Look up the routing entry. If it doesn't exist and we are the
1811 * target, and the sender isn't 0.0.0.0, go ahead and create one.
1812 * Callee holds a reference on the route and returns with the route
1813 * entry locked, upon success.
1814 */
1815 error = arp_lookup_route(&sender_ip->sin_addr,
1816 (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
1817 sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
1818
1819 if (error == 0) {
1820 RT_LOCK_ASSERT_HELD(route);
1821 }
1822
1823 if (error || route == NULL || route->rt_gateway == NULL) {
1824 if (arpop != ARPOP_REQUEST) {
1825 goto respond;
1826 }
1827
1828 if (arp_sendllconflict && send_conflicting_probes != 0 &&
1829 (ifp->if_eflags & IFEF_ARPLL) &&
1830 IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1831 sender_ip->sin_addr.s_addr == INADDR_ANY) {
1832 /*
1833 * Verify this ARP probe doesn't conflict with
1834 * an IPv4LL we know of on another interface.
1835 */
1836 if (route != NULL) {
1837 RT_REMREF_LOCKED(route);
1838 RT_UNLOCK(route);
1839 route = NULL;
1840 }
1841 /*
1842 * Callee holds a reference on the route and returns
1843 * with the route entry locked, upon success.
1844 */
1845 error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1846 &route, ifp->if_index);
1847
1848 if (error != 0 || route == NULL ||
1849 route->rt_gateway == NULL) {
1850 goto respond;
1851 }
1852
1853 RT_LOCK_ASSERT_HELD(route);
1854
1855 gateway = SDL(route->rt_gateway);
1856 if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1857 (gateway->sdl_alen != sender_hw->sdl_alen ||
1858 bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1859 gateway->sdl_alen) != 0)) {
1860 /*
1861 * A node is probing for an IPv4LL we know
1862 * exists on a different interface. We respond
1863 * with a conflicting probe to force the new
1864 * device to pick a different IPv4LL address.
1865 */
1866 if (arp_verbose || log_arp_warnings) {
1867 log(LOG_INFO, "arp: %s on %s sent "
1868 "probe for %s, already on %s\n",
1869 sdl_addr_to_hex(sender_hw, buf,
1870 sizeof(buf)), if_name(ifp),
1871 inet_ntop(AF_INET,
1872 &target_ip->sin_addr, ipv4str,
1873 sizeof(ipv4str)),
1874 if_name(route->rt_ifp));
1875 log(LOG_INFO, "arp: sending "
1876 "conflicting probe to %s on %s\n",
1877 sdl_addr_to_hex(sender_hw, buf,
1878 sizeof(buf)), if_name(ifp));
1879 }
1880 /* Mark use timestamp */
1881 if (route->rt_llinfo != NULL) {
1882 arp_llreach_use(route->rt_llinfo);
1883 }
1884 /* We're done with the route */
1885 RT_REMREF_LOCKED(route);
1886 RT_UNLOCK(route);
1887 route = NULL;
1888 /*
1889 * Send a conservative unicast "ARP probe".
1890 * This should force the other device to pick
1891 * a new number. This will not force the
1892 * device to pick a new number if the device
1893 * has already assigned that number. This will
1894 * not imply to the device that we own that
1895 * address. The link address is always
1896 * present; it's never freed.
1897 */
1898 ifnet_lock_shared(ifp);
1899 ifa = ifp->if_lladdr;
1900 IFA_ADDREF(ifa);
1901 ifnet_lock_done(ifp);
1902 dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1903 SDL(ifa->ifa_addr),
1904 (const struct sockaddr *)sender_ip,
1905 sender_hw,
1906 (const struct sockaddr *)target_ip);
1907 IFA_REMREF(ifa);
1908 ifa = NULL;
1909 atomic_add_32(&arpstat.txconflicts, 1);
1910 }
1911 goto respond;
1912 } else if (keep_announcements != 0 &&
1913 target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1914 /*
1915 * Don't create entry if link-local address and
1916 * link-local is disabled
1917 */
1918 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1919 (ifp->if_eflags & IFEF_ARPLL)) {
1920 if (route != NULL) {
1921 RT_REMREF_LOCKED(route);
1922 RT_UNLOCK(route);
1923 route = NULL;
1924 }
1925 /*
1926 * Callee holds a reference on the route and
1927 * returns with the route entry locked, upon
1928 * success.
1929 */
1930 error = arp_lookup_route(&sender_ip->sin_addr,
1931 1, 0, &route, ifp->if_index);
1932
1933 if (error == 0) {
1934 RT_LOCK_ASSERT_HELD(route);
1935 }
1936
1937 if (error == 0 && route != NULL &&
1938 route->rt_gateway != NULL) {
1939 created_announcement = 1;
1940 }
1941 }
1942 if (created_announcement == 0) {
1943 goto respond;
1944 }
1945 } else {
1946 goto respond;
1947 }
1948 }
1949
1950 RT_LOCK_ASSERT_HELD(route);
1951 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1952 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1953
1954 gateway = SDL(route->rt_gateway);
1955 if (!bridged && route->rt_ifp != ifp) {
1956 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1957 !(ifp->if_eflags & IFEF_ARPLL)) {
1958 if (arp_verbose || log_arp_warnings) {
1959 log(LOG_ERR, "arp: %s is on %s but got "
1960 "reply from %s on %s\n",
1961 inet_ntop(AF_INET, &sender_ip->sin_addr,
1962 ipv4str, sizeof(ipv4str)),
1963 if_name(route->rt_ifp),
1964 sdl_addr_to_hex(sender_hw, buf,
1965 sizeof(buf)), if_name(ifp));
1966 }
1967 goto respond;
1968 } else {
1969 /* Don't change a permanent address */
1970 if (route->rt_expire == 0) {
1971 goto respond;
1972 }
1973
1974 /*
1975 * We're about to check and/or change the route's ifp
1976 * and ifa, so do the lock dance: drop rt_lock, hold
1977 * rnh_lock and re-hold rt_lock to avoid violating the
1978 * lock ordering. We have an extra reference on the
1979 * route, so it won't go away while we do this.
1980 */
1981 RT_UNLOCK(route);
1982 lck_mtx_lock(rnh_lock);
1983 RT_LOCK(route);
1984 /*
1985 * Don't change the cloned route away from the
1986 * parent's interface if the address did resolve
1987 * or if the route is defunct. rt_ifp on both
1988 * the parent and the clone can now be freely
1989 * accessed now that we have acquired rnh_lock.
1990 */
1991 gateway = SDL(route->rt_gateway);
1992 if ((gateway->sdl_alen != 0 &&
1993 route->rt_parent != NULL &&
1994 route->rt_parent->rt_ifp == route->rt_ifp) ||
1995 (route->rt_flags & RTF_CONDEMNED)) {
1996 RT_REMREF_LOCKED(route);
1997 RT_UNLOCK(route);
1998 route = NULL;
1999 lck_mtx_unlock(rnh_lock);
2000 goto respond;
2001 }
2002 if (route->rt_ifp != ifp) {
2003 /*
2004 * Purge any link-layer info caching.
2005 */
2006 if (route->rt_llinfo_purge != NULL) {
2007 route->rt_llinfo_purge(route);
2008 }
2009
2010 /* Adjust route ref count for the interfaces */
2011 if (route->rt_if_ref_fn != NULL) {
2012 route->rt_if_ref_fn(ifp, 1);
2013 route->rt_if_ref_fn(route->rt_ifp, -1);
2014 }
2015 }
2016 /* Change the interface when the existing route is on */
2017 route->rt_ifp = ifp;
2018 /*
2019 * If rmx_mtu is not locked, update it
2020 * to the MTU used by the new interface.
2021 */
2022 if (!(route->rt_rmx.rmx_locks & RTV_MTU)) {
2023 route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
2024 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
2025 route->rt_rmx.rmx_mtu = IN6_LINKMTU(route->rt_ifp);
2026 /* Further adjust the size for CLAT46 expansion */
2027 route->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
2028 }
2029 }
2030
2031 rtsetifa(route, &best_ia->ia_ifa);
2032 gateway->sdl_index = ifp->if_index;
2033 RT_UNLOCK(route);
2034 lck_mtx_unlock(rnh_lock);
2035 RT_LOCK(route);
2036 /* Don't bother if the route is down */
2037 if (!(route->rt_flags & RTF_UP)) {
2038 goto respond;
2039 }
2040 /* Refresh gateway pointer */
2041 gateway = SDL(route->rt_gateway);
2042 }
2043 RT_LOCK_ASSERT_HELD(route);
2044 }
2045
2046 if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
2047 CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
2048 if (route->rt_expire != 0 &&
2049 (arp_verbose || log_arp_warnings)) {
2050 char buf2[3 * MAX_HW_LEN];
2051 log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
2052 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
2053 sizeof(ipv4str)),
2054 sdl_addr_to_hex(gateway, buf, sizeof(buf)),
2055 sdl_addr_to_hex(sender_hw, buf2, sizeof(buf2)),
2056 if_name(ifp));
2057 } else if (route->rt_expire == 0) {
2058 if (arp_verbose || log_arp_warnings) {
2059 log(LOG_ERR, "arp: %s attempts to modify "
2060 "permanent entry for %s on %s\n",
2061 sdl_addr_to_hex(sender_hw, buf,
2062 sizeof(buf)),
2063 inet_ntop(AF_INET, &sender_ip->sin_addr,
2064 ipv4str, sizeof(ipv4str)),
2065 if_name(ifp));
2066 }
2067 goto respond;
2068 }
2069 }
2070
2071 /* Copy the sender hardware address in to the route's gateway address */
2072 gateway->sdl_alen = sender_hw->sdl_alen;
2073 bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
2074
2075 /* Update the expire time for the route and clear the reject flag */
2076 if (route->rt_expire != 0) {
2077 rt_setexpire(route, net_uptime() + arpt_keep);
2078 }
2079 route->rt_flags &= ~RTF_REJECT;
2080
2081 /* cache the gateway (sender HW) address */
2082 arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
2083 (arpop == ARPOP_REPLY), &rt_evcode);
2084
2085 llinfo = route->rt_llinfo;
2086 /* send a notification that the route is back up */
2087 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
2088 route->rt_flags & RTF_ROUTER &&
2089 llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
2090 struct kev_msg ev_msg;
2091 struct kev_in_arpalive in_arpalive;
2092
2093 llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
2094 RT_UNLOCK(route);
2095 bzero(&ev_msg, sizeof(ev_msg));
2096 bzero(&in_arpalive, sizeof(in_arpalive));
2097 in_arpalive.link_data.if_family = ifp->if_family;
2098 in_arpalive.link_data.if_unit = ifp->if_unit;
2099 strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
2100 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2101 ev_msg.kev_class = KEV_NETWORK_CLASS;
2102 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
2103 ev_msg.event_code = KEV_INET_ARPRTRALIVE;
2104 ev_msg.dv[0].data_ptr = &in_arpalive;
2105 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
2106 dlil_post_complete_msg(NULL, &ev_msg);
2107 RT_LOCK(route);
2108 }
2109 /* Update the llinfo, send out all queued packets at once */
2110 llinfo->la_asked = 0;
2111 llinfo->la_flags &= ~LLINFO_PROBING;
2112 llinfo->la_prbreq_cnt = 0;
2113
2114 if (rt_evcode) {
2115 /*
2116 * Enqueue work item to invoke callback for this route entry
2117 */
2118 route_event_enqueue_nwk_wq_entry(route, NULL, rt_evcode, NULL, TRUE);
2119
2120 if (route->rt_flags & RTF_ROUTER) {
2121 struct radix_node_head *rnh = NULL;
2122 struct route_event rt_ev;
2123 route_event_init(&rt_ev, route, NULL, rt_evcode);
2124 /*
2125 * We already have a reference on rt. The function
2126 * frees it before returning.
2127 */
2128 RT_UNLOCK(route);
2129 lck_mtx_lock(rnh_lock);
2130 rnh = rt_tables[AF_INET];
2131
2132 if (rnh != NULL) {
2133 (void) rnh->rnh_walktree(rnh, route_event_walktree,
2134 (void *)&rt_ev);
2135 }
2136 lck_mtx_unlock(rnh_lock);
2137 RT_LOCK(route);
2138 }
2139 }
2140
2141 if (!qempty(&llinfo->la_holdq)) {
2142 uint32_t held;
2143 struct mbuf *m0;
2144 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
2145
2146 _getq_all(&llinfo->la_holdq, &pkt, NULL, &held, NULL);
2147 m0 = pkt.cp_mbuf;
2148 if (arp_verbose) {
2149 log(LOG_DEBUG, "%s: sending %u held packets\n",
2150 __func__, held);
2151 }
2152 atomic_add_32(&arpstat.held, -held);
2153 VERIFY(qempty(&llinfo->la_holdq));
2154 RT_UNLOCK(route);
2155 dlil_output(ifp, PF_INET, m0, (caddr_t)route,
2156 rt_key(route), 0, NULL);
2157 RT_REMREF(route);
2158 route = NULL;
2159 }
2160
2161 respond:
2162 if (route != NULL) {
2163 /* Mark use timestamp if we're going to send a reply */
2164 if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) {
2165 arp_llreach_use(route->rt_llinfo);
2166 }
2167 RT_REMREF_LOCKED(route);
2168 RT_UNLOCK(route);
2169 route = NULL;
2170 }
2171
2172 if (arpop != ARPOP_REQUEST) {
2173 goto done;
2174 }
2175
2176 /* See comments at the beginning of this routine */
2177 arpstat.rxrequests++;
2178
2179 /* If we are not the target, check if we should proxy */
2180 if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
2181 /*
2182 * Find a proxy route; callee holds a reference on the
2183 * route and returns with the route entry locked, upon
2184 * success.
2185 */
2186 error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
2187 &route, ifp->if_index);
2188
2189 if (error == 0) {
2190 RT_LOCK_ASSERT_HELD(route);
2191 /*
2192 * Return proxied ARP replies only on the interface
2193 * or bridge cluster where this network resides.
2194 * Otherwise we may conflict with the host we are
2195 * proxying for.
2196 */
2197 if (route->rt_ifp != ifp &&
2198 (route->rt_ifp->if_bridge != ifp->if_bridge ||
2199 ifp->if_bridge == NULL)) {
2200 RT_REMREF_LOCKED(route);
2201 RT_UNLOCK(route);
2202 goto done;
2203 }
2204 proxied = *SDL(route->rt_gateway);
2205 target_hw = &proxied;
2206 } else {
2207 /*
2208 * We don't have a route entry indicating we should
2209 * use proxy. If we aren't supposed to proxy all,
2210 * we are done.
2211 */
2212 if (!arp_proxyall) {
2213 goto done;
2214 }
2215
2216 /*
2217 * See if we have a route to the target ip before
2218 * we proxy it.
2219 */
2220 route = rtalloc1_scoped((struct sockaddr *)
2221 (size_t)target_ip, 0, 0, ifp->if_index);
2222 if (!route) {
2223 goto done;
2224 }
2225
2226 /*
2227 * Don't proxy for hosts already on the same interface.
2228 */
2229 RT_LOCK(route);
2230 if (route->rt_ifp == ifp) {
2231 RT_UNLOCK(route);
2232 rtfree(route);
2233 goto done;
2234 }
2235 }
2236 /* Mark use timestamp */
2237 if (route->rt_llinfo != NULL) {
2238 arp_llreach_use(route->rt_llinfo);
2239 }
2240 RT_REMREF_LOCKED(route);
2241 RT_UNLOCK(route);
2242 }
2243
2244 dlil_send_arp(ifp, ARPOP_REPLY,
2245 target_hw, (const struct sockaddr *)target_ip,
2246 sender_hw, (const struct sockaddr *)sender_ip, 0);
2247
2248 done:
2249 if (best_ia != NULL) {
2250 IFA_REMREF(&best_ia->ia_ifa);
2251 }
2252 return 0;
2253 }
2254
2255 void
2256 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
2257 {
2258 struct sockaddr *sa;
2259
2260 IFA_LOCK(ifa);
2261 ifa->ifa_rtrequest = arp_rtrequest;
2262 ifa->ifa_flags |= RTF_CLONING;
2263 sa = ifa->ifa_addr;
2264 IFA_UNLOCK(ifa);
2265 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
2266 }
2267
2268 static int
2269 arp_getstat SYSCTL_HANDLER_ARGS
2270 {
2271 #pragma unused(oidp, arg1, arg2)
2272 if (req->oldptr == USER_ADDR_NULL) {
2273 req->oldlen = (size_t)sizeof(struct arpstat);
2274 }
2275
2276 return SYSCTL_OUT(req, &arpstat, MIN(sizeof(arpstat), req->oldlen));
2277 }