]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet/in_arp.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / netinet / in_arp.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 */
61
62#include <kern/debug.h>
63#include <netinet/in_arp.h>
64#include <sys/types.h>
65#include <sys/param.h>
66#include <sys/kernel_types.h>
67#include <sys/syslog.h>
68#include <sys/systm.h>
69#include <sys/time.h>
70#include <sys/kernel.h>
71#include <sys/mbuf.h>
72#include <sys/sysctl.h>
73#include <sys/mcache.h>
74#include <sys/protosw.h>
75#include <string.h>
76#include <net/if_arp.h>
77#include <net/if_dl.h>
78#include <net/dlil.h>
79#include <net/if_types.h>
80#include <net/if_llreach.h>
81#include <net/route.h>
82#include <net/nwk_wq.h>
83
84#include <netinet/if_ether.h>
85#include <netinet/in_var.h>
86#include <netinet/ip.h>
87#include <netinet/ip6.h>
88#include <kern/zalloc.h>
89
90#include <kern/thread.h>
91#include <kern/sched_prim.h>
92
93#define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen))
94
95static const size_t MAX_HW_LEN = 10;
96
97/*
98 * Synchronization notes:
99 *
100 * The global list of ARP entries are stored in llinfo_arp; an entry
101 * gets inserted into the list when the route is created and gets
102 * removed from the list when it is deleted; this is done as part
103 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
104 *
105 * Because rnh_lock and rt_lock for the entry are held during those
106 * operations, the same locks (and thus lock ordering) must be used
107 * elsewhere to access the relevant data structure fields:
108 *
109 * la_le.{le_next,le_prev}, la_rt
110 *
111 * - Routing lock (rnh_lock)
112 *
113 * la_holdq, la_asked, la_llreach, la_lastused, la_flags
114 *
115 * - Routing entry lock (rt_lock)
116 *
117 * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
118 * as the route entry itself. When a route is deleted (RTM_DELETE),
119 * it is simply removed from the global list but the memory is not
120 * freed until the route itself is freed.
121 */
122struct llinfo_arp {
123 /*
124 * The following are protected by rnh_lock
125 */
126 LIST_ENTRY(llinfo_arp) la_le;
127 struct rtentry *la_rt;
128 /*
129 * The following are protected by rt_lock
130 */
131 class_queue_t la_holdq; /* packets awaiting resolution */
132 struct if_llreach *la_llreach; /* link-layer reachability record */
133 u_int64_t la_lastused; /* last used timestamp */
134 u_int32_t la_asked; /* # of requests sent */
135 u_int32_t la_maxtries; /* retry limit */
136 u_int64_t la_probeexp; /* probe deadline timestamp */
137 u_int32_t la_prbreq_cnt; /* probe request count */
138 u_int32_t la_flags;
139#define LLINFO_RTRFAIL_EVTSENT 0x1 /* sent an ARP event */
140#define LLINFO_PROBING 0x2 /* waiting for an ARP reply */
141};
142
143static LIST_HEAD(, llinfo_arp) llinfo_arp;
144
145static thread_call_t arp_timeout_tcall;
146static int arp_timeout_run; /* arp_timeout is scheduled to run */
147static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1);
148static void arp_sched_timeout(struct timeval *);
149
150static thread_call_t arp_probe_tcall;
151static int arp_probe_run; /* arp_probe is scheduled to run */
152static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1);
153static void arp_sched_probe(struct timeval *);
154
155static void arptfree(struct llinfo_arp *, void *);
156static errno_t arp_lookup_route(const struct in_addr *, int,
157 int, route_t *, unsigned int);
158static int arp_getstat SYSCTL_HANDLER_ARGS;
159
160static struct llinfo_arp *arp_llinfo_alloc(int);
161static void arp_llinfo_free(void *);
162static uint32_t arp_llinfo_flushq(struct llinfo_arp *);
163static void arp_llinfo_purge(struct rtentry *);
164static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
165static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
166static void arp_llinfo_refresh(struct rtentry *);
167
168static __inline void arp_llreach_use(struct llinfo_arp *);
169static __inline int arp_llreach_reachable(struct llinfo_arp *);
170static void arp_llreach_alloc(struct rtentry *, struct ifnet *, void *,
171 unsigned int, boolean_t, uint32_t *);
172
173extern int tvtohz(struct timeval *);
174
175static int arpinit_done;
176
177SYSCTL_DECL(_net_link_ether);
178SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
179
180static int arpt_prune = (5 * 60 * 1); /* walk list every 5 minutes */
181SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
182 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
183
184#define ARP_PROBE_TIME 7 /* seconds */
185static u_int32_t arpt_probe = ARP_PROBE_TIME;
186SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl,
187 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, "");
188
189static int arpt_keep = (20 * 60); /* once resolved, good for 20 more minutes */
190SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
191 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
192
193static int arpt_down = 20; /* once declared down, don't send for 20 sec */
194SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
195 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
196
197static int arp_llreach_base = 120; /* seconds */
198SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
199 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0,
200 "default ARP link-layer reachability max lifetime (in seconds)");
201
202#define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */
203static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
204SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
205 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
206 "number of unicast ARP refresh probes before using broadcast");
207
208static u_int32_t arp_maxtries = 5;
209SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
210 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
211
212static u_int32_t arp_maxhold = 16;
213SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold,
214 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, "");
215
216static int useloopback = 1; /* use loopback interface for local traffic */
217SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
218 CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
219
220static int arp_proxyall = 0;
221SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
222 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
223
224static int arp_sendllconflict = 0;
225SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
227
228static int log_arp_warnings = 0; /* Thread safe: no accumulated state */
229SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
230 CTLFLAG_RW | CTLFLAG_LOCKED,
231 &log_arp_warnings, 0,
232 "log arp warning messages");
233
234static int keep_announcements = 1; /* Thread safe: no aging of state */
235SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
236 CTLFLAG_RW | CTLFLAG_LOCKED,
237 &keep_announcements, 0,
238 "keep arp announcements");
239
240static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
241SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
242 CTLFLAG_RW | CTLFLAG_LOCKED,
243 &send_conflicting_probes, 0,
244 "send conflicting link-local arp probes");
245
246static int arp_verbose;
247SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
248 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
249
250static uint32_t arp_maxhold_total = 1024; /* max total packets in the holdq */
251SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold_total,
252 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold_total, 0, "");
253
254
255/*
256 * Generally protected by rnh_lock; use atomic operations on fields
257 * that are also modified outside of that lock (if needed).
258 */
259struct arpstat arpstat __attribute__((aligned(sizeof(uint64_t))));
260SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
261 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
262 0, 0, arp_getstat, "S,arpstat",
263 "ARP statistics (struct arpstat, net/if_arp.h)");
264
265static struct zone *llinfo_arp_zone;
266#define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */
267#define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */
268
269void
270arp_init(void)
271{
272 VERIFY(!arpinit_done);
273
274 LIST_INIT(&llinfo_arp);
275
276 llinfo_arp_zone = zinit(sizeof(struct llinfo_arp),
277 LLINFO_ARP_ZONE_MAX * sizeof(struct llinfo_arp), 0,
278 LLINFO_ARP_ZONE_NAME);
279 if (llinfo_arp_zone == NULL) {
280 panic("%s: failed allocating llinfo_arp_zone", __func__);
281 }
282
283 zone_change(llinfo_arp_zone, Z_EXPAND, TRUE);
284 zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE);
285
286 arpinit_done = 1;
287}
288
289static struct llinfo_arp *
290arp_llinfo_alloc(int how)
291{
292 struct llinfo_arp *la;
293
294 la = (how == M_WAITOK) ? zalloc(llinfo_arp_zone) :
295 zalloc_noblock(llinfo_arp_zone);
296 if (la != NULL) {
297 bzero(la, sizeof(*la));
298 /*
299 * The type of queue (Q_DROPHEAD) here is just a hint;
300 * the actual logic that works on this queue performs
301 * a head drop, details in arp_llinfo_addq().
302 */
303 _qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ?
304 (uint32_t)-1 : arp_maxhold, QP_MBUF);
305 }
306
307 return la;
308}
309
310static void
311arp_llinfo_free(void *arg)
312{
313 struct llinfo_arp *la = arg;
314
315 if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
316 panic("%s: trying to free %p when it is in use", __func__, la);
317 /* NOTREACHED */
318 }
319
320 /* Free any held packets */
321 (void) arp_llinfo_flushq(la);
322
323 /* Purge any link-layer info caching */
324 VERIFY(la->la_rt->rt_llinfo == la);
325 if (la->la_rt->rt_llinfo_purge != NULL) {
326 la->la_rt->rt_llinfo_purge(la->la_rt);
327 }
328
329 zfree(llinfo_arp_zone, la);
330}
331
332static bool
333arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m)
334{
335 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
336
337 if (arpstat.held >= arp_maxhold_total) {
338 if (arp_verbose) {
339 log(LOG_DEBUG,
340 "%s: dropping packet due to maxhold_total\n",
341 __func__);
342 }
343 atomic_add_32(&arpstat.dropped, 1);
344 return false;
345 }
346
347 if (qlen(&la->la_holdq) >= qlimit(&la->la_holdq)) {
348 struct mbuf *_m;
349 /* prune less than CTL, else take what's at the head */
350 _getq_scidx_lt(&la->la_holdq, &pkt, SCIDX_CTL);
351 _m = pkt.cp_mbuf;
352 if (_m == NULL) {
353 _getq(&la->la_holdq, &pkt);
354 _m = pkt.cp_mbuf;
355 }
356 VERIFY(_m != NULL);
357 if (arp_verbose) {
358 log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n",
359 __func__, MBUF_SCIDX(mbuf_get_service_class(_m)));
360 }
361 m_freem(_m);
362 atomic_add_32(&arpstat.dropped, 1);
363 atomic_add_32(&arpstat.held, -1);
364 }
365 CLASSQ_PKT_INIT_MBUF(&pkt, m);
366 _addq(&la->la_holdq, &pkt);
367 atomic_add_32(&arpstat.held, 1);
368 if (arp_verbose) {
369 log(LOG_DEBUG, "%s: enqueued packet (scidx %u), qlen now %u\n",
370 __func__, MBUF_SCIDX(mbuf_get_service_class(m)),
371 qlen(&la->la_holdq));
372 }
373
374 return true;
375}
376
377static uint32_t
378arp_llinfo_flushq(struct llinfo_arp *la)
379{
380 uint32_t held = qlen(&la->la_holdq);
381
382 if (held != 0) {
383 atomic_add_32(&arpstat.purged, held);
384 atomic_add_32(&arpstat.held, -held);
385 _flushq(&la->la_holdq);
386 }
387 la->la_prbreq_cnt = 0;
388 VERIFY(qempty(&la->la_holdq));
389 return held;
390}
391
392static void
393arp_llinfo_purge(struct rtentry *rt)
394{
395 struct llinfo_arp *la = rt->rt_llinfo;
396
397 RT_LOCK_ASSERT_HELD(rt);
398 VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
399
400 if (la->la_llreach != NULL) {
401 RT_CONVERT_LOCK(rt);
402 ifnet_llreach_free(la->la_llreach);
403 la->la_llreach = NULL;
404 }
405 la->la_lastused = 0;
406}
407
408static void
409arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
410{
411 struct llinfo_arp *la = rt->rt_llinfo;
412 struct if_llreach *lr = la->la_llreach;
413
414 if (lr == NULL) {
415 bzero(ri, sizeof(*ri));
416 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
417 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
418 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
419 } else {
420 IFLR_LOCK(lr);
421 /* Export to rt_reach_info structure */
422 ifnet_lr2ri(lr, ri);
423 /* Export ARP send expiration (calendar) time */
424 ri->ri_snd_expire =
425 ifnet_llreach_up2calexp(lr, la->la_lastused);
426 IFLR_UNLOCK(lr);
427 }
428}
429
430static void
431arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
432{
433 struct llinfo_arp *la = rt->rt_llinfo;
434 struct if_llreach *lr = la->la_llreach;
435
436 if (lr == NULL) {
437 bzero(iflri, sizeof(*iflri));
438 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
439 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
440 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
441 } else {
442 IFLR_LOCK(lr);
443 /* Export to ifnet_llreach_info structure */
444 ifnet_lr2iflri(lr, iflri);
445 /* Export ARP send expiration (uptime) time */
446 iflri->iflri_snd_expire =
447 ifnet_llreach_up2upexp(lr, la->la_lastused);
448 IFLR_UNLOCK(lr);
449 }
450}
451
452static void
453arp_llinfo_refresh(struct rtentry *rt)
454{
455 uint64_t timenow = net_uptime();
456 /*
457 * If route entry is permanent or if expiry is less
458 * than timenow and extra time taken for unicast probe
459 * we can't expedite the refresh
460 */
461 if ((rt->rt_expire == 0) ||
462 (rt->rt_flags & RTF_STATIC) ||
463 !(rt->rt_flags & RTF_LLINFO)) {
464 return;
465 }
466
467 if (rt->rt_expire > timenow) {
468 rt->rt_expire = timenow;
469 }
470 return;
471}
472
473void
474arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen)
475{
476 /* Nothing more to do if it's disabled */
477 if (arp_llreach_base == 0) {
478 return;
479 }
480
481 ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
482}
483
484static __inline void
485arp_llreach_use(struct llinfo_arp *la)
486{
487 if (la->la_llreach != NULL) {
488 la->la_lastused = net_uptime();
489 }
490}
491
492static __inline int
493arp_llreach_reachable(struct llinfo_arp *la)
494{
495 struct if_llreach *lr;
496 const char *why = NULL;
497
498 /* Nothing more to do if it's disabled; pretend it's reachable */
499 if (arp_llreach_base == 0) {
500 return 1;
501 }
502
503 if ((lr = la->la_llreach) == NULL) {
504 /*
505 * Link-layer reachability record isn't present for this
506 * ARP entry; pretend it's reachable and use it as is.
507 */
508 return 1;
509 } else if (ifnet_llreach_reachable(lr)) {
510 /*
511 * Record is present, it's not shared with other ARP
512 * entries and a packet has recently been received
513 * from the remote host; consider it reachable.
514 */
515 if (lr->lr_reqcnt == 1) {
516 return 1;
517 }
518
519 /* Prime it up, if this is the first time */
520 if (la->la_lastused == 0) {
521 VERIFY(la->la_llreach != NULL);
522 arp_llreach_use(la);
523 }
524
525 /*
526 * Record is present and shared with one or more ARP
527 * entries, and a packet has recently been received
528 * from the remote host. Since it's shared by more
529 * than one IP addresses, we can't rely on the link-
530 * layer reachability alone; consider it reachable if
531 * this ARP entry has been used "recently."
532 */
533 if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) {
534 return 1;
535 }
536
537 why = "has alias(es) and hasn't been used in a while";
538 } else {
539 why = "haven't heard from it in a while";
540 }
541
542 if (arp_verbose > 1) {
543 char tmp[MAX_IPv4_STR_LEN];
544 u_int64_t now = net_uptime();
545
546 log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
547 "%s [lastused %lld, lastrcvd %lld] secs ago\n",
548 if_name(lr->lr_ifp), inet_ntop(AF_INET,
549 &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof(tmp)), why,
550 (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
551 (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
552 }
553 return 0;
554}
555
556/*
557 * Obtain a link-layer source cache entry for the sender.
558 *
559 * NOTE: This is currently only for ARP/Ethernet.
560 */
561static void
562arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr,
563 unsigned int alen, boolean_t solicited, uint32_t *p_rt_event_code)
564{
565 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
566 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
567
568 if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
569 !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
570 ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */
571 alen == ifp->if_addrlen) {
572 struct llinfo_arp *la = rt->rt_llinfo;
573 struct if_llreach *lr;
574 const char *why = NULL, *type = "";
575
576 /* Become a regular mutex, just in case */
577 RT_CONVERT_LOCK(rt);
578
579 if ((lr = la->la_llreach) != NULL) {
580 type = (solicited ? "ARP reply" : "ARP announcement");
581 /*
582 * If target has changed, create a new record;
583 * otherwise keep existing record.
584 */
585 IFLR_LOCK(lr);
586 if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
587 IFLR_UNLOCK(lr);
588 /* Purge any link-layer info caching */
589 VERIFY(rt->rt_llinfo_purge != NULL);
590 rt->rt_llinfo_purge(rt);
591 lr = NULL;
592 why = " for different target HW address; "
593 "using new llreach record";
594 *p_rt_event_code = ROUTE_LLENTRY_CHANGED;
595 } else {
596 /*
597 * If we were doing unicast probing, we need to
598 * deliver an event for neighbor cache resolution
599 */
600 if (lr->lr_probes != 0) {
601 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
602 }
603
604 lr->lr_probes = 0; /* reset probe count */
605 IFLR_UNLOCK(lr);
606 if (solicited) {
607 why = " for same target HW address; "
608 "keeping existing llreach record";
609 }
610 }
611 }
612
613 if (lr == NULL) {
614 lr = la->la_llreach = ifnet_llreach_alloc(ifp,
615 ETHERTYPE_IP, addr, alen, arp_llreach_base);
616 if (lr != NULL) {
617 lr->lr_probes = 0; /* reset probe count */
618 if (why == NULL) {
619 why = "creating new llreach record";
620 }
621 }
622 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
623 }
624
625 if (arp_verbose > 1 && lr != NULL && why != NULL) {
626 char tmp[MAX_IPv4_STR_LEN];
627
628 log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
629 type, why, inet_ntop(AF_INET,
630 &SIN(rt_key(rt))->sin_addr, tmp, sizeof(tmp)));
631 }
632 }
633}
634
635struct arptf_arg {
636 boolean_t draining;
637 boolean_t probing;
638 uint32_t killed;
639 uint32_t aging;
640 uint32_t sticky;
641 uint32_t found;
642 uint32_t qlen;
643 uint32_t qsize;
644};
645
646/*
647 * Free an arp entry.
648 */
649static void
650arptfree(struct llinfo_arp *la, void *arg)
651{
652 struct arptf_arg *ap = arg;
653 struct rtentry *rt = la->la_rt;
654 uint64_t timenow;
655
656 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
657
658 /* rnh_lock acquired by caller protects rt from going away */
659 RT_LOCK(rt);
660
661 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
662 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
663
664 ap->found++;
665 timenow = net_uptime();
666
667 /* If we're probing, flush out held packets upon probe expiration */
668 if (ap->probing && (la->la_flags & LLINFO_PROBING) &&
669 la->la_probeexp <= timenow) {
670 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
671 if (sdl != NULL) {
672 sdl->sdl_alen = 0;
673 }
674 (void) arp_llinfo_flushq(la);
675 /*
676 * Enqueue work item to invoke callback for this route entry
677 */
678 route_event_enqueue_nwk_wq_entry(rt, NULL,
679 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
680 }
681
682 /*
683 * The following is mostly being used to arm the timer
684 * again and for logging.
685 * qlen is used to re-arm the timer. Therefore, pure probe
686 * requests can be considered as 0 length packets
687 * contributing only to length but not to the size.
688 */
689 ap->qlen += qlen(&la->la_holdq);
690 ap->qlen += la->la_prbreq_cnt;
691 ap->qsize += qsize(&la->la_holdq);
692
693 if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
694 ap->sticky++;
695 /* ARP entry is permanent? */
696 if (rt->rt_expire == 0) {
697 RT_UNLOCK(rt);
698 return;
699 }
700 }
701
702 /* ARP entry hasn't expired and we're not draining? */
703 if (!ap->draining && rt->rt_expire > timenow) {
704 RT_UNLOCK(rt);
705 ap->aging++;
706 return;
707 }
708
709 if (rt->rt_refcnt > 0) {
710 /*
711 * ARP entry has expired, with outstanding refcnt.
712 * If we're not draining, force ARP query to be
713 * generated next time this entry is used.
714 */
715 if (!ap->draining && !ap->probing) {
716 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
717 if (sdl != NULL) {
718 sdl->sdl_alen = 0;
719 }
720 la->la_asked = 0;
721 rt->rt_flags &= ~RTF_REJECT;
722 }
723 RT_UNLOCK(rt);
724 } else if (!(rt->rt_flags & RTF_STATIC) && !ap->probing) {
725 /*
726 * ARP entry has no outstanding refcnt, and we're either
727 * draining or it has expired; delete it from the routing
728 * table. Safe to drop rt_lock and use rt_key, since holding
729 * rnh_lock here prevents another thread from calling
730 * rt_setgate() on this route.
731 */
732 RT_UNLOCK(rt);
733 rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
734 rt_mask(rt), 0, NULL);
735 arpstat.timeouts++;
736 ap->killed++;
737 } else {
738 /* ARP entry is static; let it linger */
739 RT_UNLOCK(rt);
740 }
741}
742
743void
744in_arpdrain(void *arg)
745{
746#pragma unused(arg)
747 struct llinfo_arp *la, *ola;
748 struct arptf_arg farg;
749
750 if (arp_verbose) {
751 log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
752 }
753
754 lck_mtx_lock(rnh_lock);
755 la = llinfo_arp.lh_first;
756 bzero(&farg, sizeof(farg));
757 farg.draining = TRUE;
758 while ((ola = la) != NULL) {
759 la = la->la_le.le_next;
760 arptfree(ola, &farg);
761 }
762 if (arp_verbose) {
763 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
764 "%u pkts held (%u bytes)\n", __func__, farg.found,
765 farg.aging, farg.sticky, farg.killed, farg.qlen,
766 farg.qsize);
767 }
768 lck_mtx_unlock(rnh_lock);
769}
770
771/*
772 * Timeout routine. Age arp_tab entries periodically.
773 */
774static void
775arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1)
776{
777#pragma unused(arg0, arg1)
778 struct llinfo_arp *la, *ola;
779 struct timeval atv;
780 struct arptf_arg farg;
781
782 lck_mtx_lock(rnh_lock);
783 la = llinfo_arp.lh_first;
784 bzero(&farg, sizeof(farg));
785 while ((ola = la) != NULL) {
786 la = la->la_le.le_next;
787 arptfree(ola, &farg);
788 }
789 if (arp_verbose) {
790 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
791 "%u pkts held (%u bytes)\n", __func__, farg.found,
792 farg.aging, farg.sticky, farg.killed, farg.qlen,
793 farg.qsize);
794 }
795 atv.tv_usec = 0;
796 atv.tv_sec = MAX(arpt_prune, 5);
797 /* re-arm the timer if there's work to do */
798 arp_timeout_run = 0;
799 if (farg.aging > 0) {
800 arp_sched_timeout(&atv);
801 } else if (arp_verbose) {
802 log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
803 }
804 lck_mtx_unlock(rnh_lock);
805}
806
807static void
808arp_sched_timeout(struct timeval *atv)
809{
810 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
811
812 if (!arp_timeout_run) {
813 struct timeval tv;
814 uint64_t deadline = 0;
815
816 if (arp_timeout_tcall == NULL) {
817 arp_timeout_tcall =
818 thread_call_allocate(arp_timeout, NULL);
819 VERIFY(arp_timeout_tcall != NULL);
820 }
821
822 if (atv == NULL) {
823 tv.tv_usec = 0;
824 tv.tv_sec = MAX(arpt_prune / 5, 1);
825 atv = &tv;
826 }
827 if (arp_verbose) {
828 log(LOG_DEBUG, "%s: timer scheduled in "
829 "T+%llus.%lluu\n", __func__,
830 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
831 }
832 arp_timeout_run = 1;
833
834 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
835 mach_absolute_time(), &deadline);
836 (void) thread_call_enter_delayed(arp_timeout_tcall, deadline);
837 }
838}
839
840/*
841 * Probe routine.
842 */
843static void
844arp_probe(thread_call_param_t arg0, thread_call_param_t arg1)
845{
846#pragma unused(arg0, arg1)
847 struct llinfo_arp *la, *ola;
848 struct timeval atv;
849 struct arptf_arg farg;
850
851 lck_mtx_lock(rnh_lock);
852 la = llinfo_arp.lh_first;
853 bzero(&farg, sizeof(farg));
854 farg.probing = TRUE;
855 while ((ola = la) != NULL) {
856 la = la->la_le.le_next;
857 arptfree(ola, &farg);
858 }
859 if (arp_verbose) {
860 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
861 "%u pkts held (%u bytes)\n", __func__, farg.found,
862 farg.aging, farg.sticky, farg.killed, farg.qlen,
863 farg.qsize);
864 }
865 atv.tv_usec = 0;
866 atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
867 /* re-arm the probe if there's work to do */
868 arp_probe_run = 0;
869 if (farg.qlen > 0) {
870 arp_sched_probe(&atv);
871 } else if (arp_verbose) {
872 log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__);
873 }
874 lck_mtx_unlock(rnh_lock);
875}
876
877static void
878arp_sched_probe(struct timeval *atv)
879{
880 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
881
882 if (!arp_probe_run) {
883 struct timeval tv;
884 uint64_t deadline = 0;
885
886 if (arp_probe_tcall == NULL) {
887 arp_probe_tcall =
888 thread_call_allocate(arp_probe, NULL);
889 VERIFY(arp_probe_tcall != NULL);
890 }
891
892 if (atv == NULL) {
893 tv.tv_usec = 0;
894 tv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
895 atv = &tv;
896 }
897 if (arp_verbose) {
898 log(LOG_DEBUG, "%s: probe scheduled in "
899 "T+%llus.%lluu\n", __func__,
900 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
901 }
902 arp_probe_run = 1;
903
904 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
905 mach_absolute_time(), &deadline);
906 (void) thread_call_enter_delayed(arp_probe_tcall, deadline);
907 }
908}
909
910/*
911 * ifa_rtrequest() callback
912 */
913static void
914arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
915{
916#pragma unused(sa)
917 struct sockaddr *gate = rt->rt_gateway;
918 struct llinfo_arp *la = rt->rt_llinfo;
919 static struct sockaddr_dl null_sdl =
920 { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK };
921 uint64_t timenow;
922 char buf[MAX_IPv4_STR_LEN];
923
924 VERIFY(arpinit_done);
925 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
926 RT_LOCK_ASSERT_HELD(rt);
927
928 if (rt->rt_flags & RTF_GATEWAY) {
929 return;
930 }
931
932 timenow = net_uptime();
933 switch (req) {
934 case RTM_ADD:
935 /*
936 * XXX: If this is a manually added route to interface
937 * such as older version of routed or gated might provide,
938 * restore cloning bit.
939 */
940 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
941 SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) {
942 rt->rt_flags |= RTF_CLONING;
943 }
944
945 if (rt->rt_flags & RTF_CLONING) {
946 /*
947 * Case 1: This route should come from a route to iface.
948 */
949 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
950 gate = rt->rt_gateway;
951 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
952 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
953 /*
954 * In case we're called before 1.0 sec.
955 * has elapsed.
956 */
957 rt_setexpire(rt, MAX(timenow, 1));
958 }
959 break;
960 }
961 /* Announce a new entry if requested. */
962 if (rt->rt_flags & RTF_ANNOUNCE) {
963 if (la != NULL) {
964 arp_llreach_use(la); /* Mark use timestamp */
965 }
966 RT_UNLOCK(rt);
967 dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
968 SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
969 RT_LOCK(rt);
970 arpstat.txannounces++;
971 }
972 /* FALLTHRU */
973 case RTM_RESOLVE:
974 if (gate->sa_family != AF_LINK ||
975 gate->sa_len < sizeof(null_sdl)) {
976 arpstat.invalidreqs++;
977 log(LOG_ERR, "%s: route to %s has bad gateway address "
978 "(sa_family %u sa_len %u) on %s\n",
979 __func__, inet_ntop(AF_INET,
980 &SIN(rt_key(rt))->sin_addr.s_addr, buf,
981 sizeof(buf)), gate->sa_family, gate->sa_len,
982 if_name(rt->rt_ifp));
983 break;
984 }
985 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
986 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
987
988 if (la != NULL) {
989 break; /* This happens on a route change */
990 }
991 /*
992 * Case 2: This route may come from cloning, or a manual route
993 * add with a LL address.
994 */
995 rt->rt_llinfo = la = arp_llinfo_alloc(M_WAITOK);
996 if (la == NULL) {
997 arpstat.reqnobufs++;
998 break;
999 }
1000 rt->rt_llinfo_get_ri = arp_llinfo_get_ri;
1001 rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
1002 rt->rt_llinfo_purge = arp_llinfo_purge;
1003 rt->rt_llinfo_free = arp_llinfo_free;
1004 rt->rt_llinfo_refresh = arp_llinfo_refresh;
1005 rt->rt_flags |= RTF_LLINFO;
1006 la->la_rt = rt;
1007 LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
1008 arpstat.inuse++;
1009
1010 /* We have at least one entry; arm the timer if not already */
1011 arp_sched_timeout(NULL);
1012
1013 /*
1014 * This keeps the multicast addresses from showing up
1015 * in `arp -a' listings as unresolved. It's not actually
1016 * functional. Then the same for broadcast. For IPv4
1017 * link-local address, keep the entry around even after
1018 * it has expired.
1019 */
1020 if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
1021 RT_UNLOCK(rt);
1022 dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
1023 sizeof(struct sockaddr_dl));
1024 RT_LOCK(rt);
1025 rt_setexpire(rt, 0);
1026 } else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
1027 rt->rt_ifp)) {
1028 struct sockaddr_dl *gate_ll = SDL(gate);
1029 size_t broadcast_len;
1030 ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
1031 LLADDR(gate_ll), sizeof(gate_ll->sdl_data),
1032 &broadcast_len);
1033 gate_ll->sdl_alen = broadcast_len;
1034 gate_ll->sdl_family = AF_LINK;
1035 gate_ll->sdl_len = sizeof(struct sockaddr_dl);
1036 /* In case we're called before 1.0 sec. has elapsed */
1037 rt_setexpire(rt, MAX(timenow, 1));
1038 } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
1039 sin_addr.s_addr))) {
1040 rt->rt_flags |= RTF_STATIC;
1041 }
1042
1043 /* Set default maximum number of retries */
1044 la->la_maxtries = arp_maxtries;
1045
1046 /* Become a regular mutex, just in case */
1047 RT_CONVERT_LOCK(rt);
1048 IFA_LOCK_SPIN(rt->rt_ifa);
1049 if (SIN(rt_key(rt))->sin_addr.s_addr ==
1050 (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
1051 IFA_UNLOCK(rt->rt_ifa);
1052 /*
1053 * This test used to be
1054 * if (loif.if_flags & IFF_UP)
1055 * It allowed local traffic to be forced through the
1056 * hardware by configuring the loopback down. However,
1057 * it causes problems during network configuration
1058 * for boards that can't receive packets they send.
1059 * It is now necessary to clear "useloopback" and
1060 * remove the route to force traffic out to the
1061 * hardware.
1062 */
1063 rt_setexpire(rt, 0);
1064 ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)),
1065 SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
1066 if (useloopback) {
1067 if (rt->rt_ifp != lo_ifp) {
1068 /*
1069 * Purge any link-layer info caching.
1070 */
1071 if (rt->rt_llinfo_purge != NULL) {
1072 rt->rt_llinfo_purge(rt);
1073 }
1074
1075 /*
1076 * Adjust route ref count for the
1077 * interfaces.
1078 */
1079 if (rt->rt_if_ref_fn != NULL) {
1080 rt->rt_if_ref_fn(lo_ifp, 1);
1081 rt->rt_if_ref_fn(rt->rt_ifp, -1);
1082 }
1083 }
1084 rt->rt_ifp = lo_ifp;
1085 /*
1086 * If rmx_mtu is not locked, update it
1087 * to the MTU used by the new interface.
1088 */
1089 if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
1090 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
1091 }
1092 }
1093 } else {
1094 IFA_UNLOCK(rt->rt_ifa);
1095 }
1096 break;
1097
1098 case RTM_DELETE:
1099 if (la == NULL) {
1100 break;
1101 }
1102 /*
1103 * Unchain it but defer the actual freeing until the route
1104 * itself is to be freed. rt->rt_llinfo still points to
1105 * llinfo_arp, and likewise, la->la_rt still points to this
1106 * route entry, except that RTF_LLINFO is now cleared.
1107 */
1108 LIST_REMOVE(la, la_le);
1109 la->la_le.le_next = NULL;
1110 la->la_le.le_prev = NULL;
1111 arpstat.inuse--;
1112
1113 /*
1114 * Purge any link-layer info caching.
1115 */
1116 if (rt->rt_llinfo_purge != NULL) {
1117 rt->rt_llinfo_purge(rt);
1118 }
1119
1120 rt->rt_flags &= ~RTF_LLINFO;
1121 (void) arp_llinfo_flushq(la);
1122 }
1123}
1124
1125/*
1126 * convert hardware address to hex string for logging errors.
1127 */
1128static const char *
1129sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen)
1130{
1131 char *buf = orig_buf;
1132 int i;
1133 const u_char *lladdr = (u_char *)(size_t)sdl->sdl_data;
1134 int maxbytes = buflen / 3;
1135
1136 if (maxbytes > sdl->sdl_alen) {
1137 maxbytes = sdl->sdl_alen;
1138 }
1139 *buf = '\0';
1140 for (i = 0; i < maxbytes; i++) {
1141 snprintf(buf, 3, "%02x", lladdr[i]);
1142 buf += 2;
1143 *buf = (i == maxbytes - 1) ? '\0' : ':';
1144 buf++;
1145 }
1146 return orig_buf;
1147}
1148
1149/*
1150 * arp_lookup_route will lookup the route for a given address.
1151 *
1152 * The address must be for a host on a local network on this interface.
1153 * If the returned route is non-NULL, the route is locked and the caller
1154 * is responsible for unlocking it and releasing its reference.
1155 */
1156static errno_t
1157arp_lookup_route(const struct in_addr *addr, int create, int proxy,
1158 route_t *route, unsigned int ifscope)
1159{
1160 struct sockaddr_inarp sin =
1161 { sizeof(sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
1162 const char *why = NULL;
1163 errno_t error = 0;
1164 route_t rt;
1165
1166 *route = NULL;
1167
1168 sin.sin_addr.s_addr = addr->s_addr;
1169 sin.sin_other = proxy ? SIN_PROXY : 0;
1170
1171 /*
1172 * If the destination is a link-local address, don't
1173 * constrain the lookup (don't scope it).
1174 */
1175 if (IN_LINKLOCAL(ntohl(addr->s_addr))) {
1176 ifscope = IFSCOPE_NONE;
1177 }
1178
1179 rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope);
1180 if (rt == NULL) {
1181 return ENETUNREACH;
1182 }
1183
1184 RT_LOCK(rt);
1185
1186 if (rt->rt_flags & RTF_GATEWAY) {
1187 why = "host is not on local network";
1188 error = ENETUNREACH;
1189 } else if (!(rt->rt_flags & RTF_LLINFO)) {
1190 why = "could not allocate llinfo";
1191 error = ENOMEM;
1192 } else if (rt->rt_gateway->sa_family != AF_LINK) {
1193 why = "gateway route is not ours";
1194 error = EPROTONOSUPPORT;
1195 }
1196
1197 if (error != 0) {
1198 if (create && (arp_verbose || log_arp_warnings)) {
1199 char tmp[MAX_IPv4_STR_LEN];
1200 log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
1201 __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
1202 sizeof(tmp)), why);
1203 }
1204
1205 /*
1206 * If there are no references to this route, and it is
1207 * a cloned route, and not static, and ARP had created
1208 * the route, then purge it from the routing table as
1209 * it is probably bogus.
1210 */
1211 if (rt->rt_refcnt == 1 &&
1212 (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
1213 RTF_WASCLONED) {
1214 /*
1215 * Prevent another thread from modiying rt_key,
1216 * rt_gateway via rt_setgate() after rt_lock is
1217 * dropped by marking the route as defunct.
1218 */
1219 rt->rt_flags |= RTF_CONDEMNED;
1220 RT_UNLOCK(rt);
1221 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1222 rt_mask(rt), rt->rt_flags, NULL);
1223 rtfree(rt);
1224 } else {
1225 RT_REMREF_LOCKED(rt);
1226 RT_UNLOCK(rt);
1227 }
1228 return error;
1229 }
1230
1231 /*
1232 * Caller releases reference and does RT_UNLOCK(rt).
1233 */
1234 *route = rt;
1235 return 0;
1236}
1237
1238boolean_t
1239arp_is_entry_probing(route_t p_route)
1240{
1241 struct llinfo_arp *llinfo = p_route->rt_llinfo;
1242
1243 if (llinfo != NULL &&
1244 llinfo->la_llreach != NULL &&
1245 llinfo->la_llreach->lr_probes != 0) {
1246 return TRUE;
1247 }
1248
1249 return FALSE;
1250}
1251
1252/*
1253 * This is the ARP pre-output routine; care must be taken to ensure that
1254 * the "hint" route never gets freed via rtfree(), since the caller may
1255 * have stored it inside a struct route with a reference held for that
1256 * placeholder.
1257 */
1258errno_t
1259arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
1260 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
1261 mbuf_t packet)
1262{
1263 route_t route = NULL; /* output route */
1264 errno_t result = 0;
1265 struct sockaddr_dl *gateway;
1266 struct llinfo_arp *llinfo = NULL;
1267 boolean_t usable, probing = FALSE;
1268 uint64_t timenow;
1269 struct if_llreach *lr;
1270 struct ifaddr *rt_ifa;
1271 struct sockaddr *sa;
1272 uint32_t rtflags;
1273 struct sockaddr_dl sdl;
1274 boolean_t send_probe_notif = FALSE;
1275 boolean_t enqueued = FALSE;
1276
1277 if (ifp == NULL || net_dest == NULL) {
1278 return EINVAL;
1279 }
1280
1281 if (net_dest->sin_family != AF_INET) {
1282 return EAFNOSUPPORT;
1283 }
1284
1285 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) {
1286 return ENETDOWN;
1287 }
1288
1289 /*
1290 * If we were given a route, verify the route and grab the gateway
1291 */
1292 if (hint != NULL) {
1293 /*
1294 * Callee holds a reference on the route and returns
1295 * with the route entry locked, upon success.
1296 */
1297 result = route_to_gwroute((const struct sockaddr *)
1298 net_dest, hint, &route);
1299 if (result != 0) {
1300 return result;
1301 }
1302 if (route != NULL) {
1303 RT_LOCK_ASSERT_HELD(route);
1304 }
1305 }
1306
1307 if ((packet != NULL && (packet->m_flags & M_BCAST)) ||
1308 in_broadcast(net_dest->sin_addr, ifp)) {
1309 size_t broadcast_len;
1310 bzero(ll_dest, ll_dest_len);
1311 result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1312 ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1313 &broadcast_len);
1314 if (result == 0) {
1315 ll_dest->sdl_alen = broadcast_len;
1316 ll_dest->sdl_family = AF_LINK;
1317 ll_dest->sdl_len = sizeof(struct sockaddr_dl);
1318 }
1319 goto release;
1320 }
1321 if ((packet != NULL && (packet->m_flags & M_MCAST)) ||
1322 ((ifp->if_flags & IFF_MULTICAST) &&
1323 IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) {
1324 if (route != NULL) {
1325 RT_UNLOCK(route);
1326 }
1327 result = dlil_resolve_multi(ifp,
1328 (const struct sockaddr *)net_dest,
1329 (struct sockaddr *)ll_dest, ll_dest_len);
1330 if (route != NULL) {
1331 RT_LOCK(route);
1332 }
1333 goto release;
1334 }
1335
1336 /*
1337 * If we didn't find a route, or the route doesn't have
1338 * link layer information, trigger the creation of the
1339 * route and link layer information.
1340 */
1341 if (route == NULL || route->rt_llinfo == NULL) {
1342 /* Clean up now while we can */
1343 if (route != NULL) {
1344 if (route == hint) {
1345 RT_REMREF_LOCKED(route);
1346 RT_UNLOCK(route);
1347 } else {
1348 RT_UNLOCK(route);
1349 rtfree(route);
1350 }
1351 }
1352 /*
1353 * Callee holds a reference on the route and returns
1354 * with the route entry locked, upon success.
1355 */
1356 result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1357 ifp->if_index);
1358 if (result == 0) {
1359 RT_LOCK_ASSERT_HELD(route);
1360 }
1361 }
1362
1363 if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
1364 /* In case result is 0 but no route, return an error */
1365 if (result == 0) {
1366 result = EHOSTUNREACH;
1367 }
1368
1369 if (route != NULL && route->rt_llinfo == NULL) {
1370 char tmp[MAX_IPv4_STR_LEN];
1371 log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1372 __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
1373 tmp, sizeof(tmp)));
1374 }
1375 goto release;
1376 }
1377
1378 /*
1379 * Now that we have the right route, is it filled in?
1380 */
1381 gateway = SDL(route->rt_gateway);
1382 timenow = net_uptime();
1383 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1384 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1385
1386 usable = ((route->rt_expire == 0 || route->rt_expire > timenow) &&
1387 gateway != NULL && gateway->sdl_family == AF_LINK &&
1388 gateway->sdl_alen != 0);
1389
1390 if (usable) {
1391 boolean_t unreachable = !arp_llreach_reachable(llinfo);
1392
1393 /* Entry is usable, so fill in info for caller */
1394 bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
1395 result = 0;
1396 arp_llreach_use(llinfo); /* Mark use timestamp */
1397
1398 lr = llinfo->la_llreach;
1399 if (lr == NULL) {
1400 goto release;
1401 }
1402 rt_ifa = route->rt_ifa;
1403
1404 /* Become a regular mutex, just in case */
1405 RT_CONVERT_LOCK(route);
1406 IFLR_LOCK_SPIN(lr);
1407
1408 if ((unreachable || (llinfo->la_flags & LLINFO_PROBING)) &&
1409 lr->lr_probes < arp_unicast_lim) {
1410 /*
1411 * Thus mark the entry with la_probeexp deadline to
1412 * trigger the probe timer to be scheduled (if not
1413 * already). This gets cleared the moment we get
1414 * an ARP reply.
1415 */
1416 probing = TRUE;
1417 if (lr->lr_probes == 0) {
1418 llinfo->la_probeexp = (timenow + arpt_probe);
1419 llinfo->la_flags |= LLINFO_PROBING;
1420 /*
1421 * Provide notification that ARP unicast
1422 * probing has started.
1423 * We only do it for the first unicast probe
1424 * attempt.
1425 */
1426 send_probe_notif = TRUE;
1427 }
1428
1429 /*
1430 * Start the unicast probe and anticipate a reply;
1431 * afterwards, return existing entry to caller and
1432 * let it be used anyway. If peer is non-existent
1433 * we'll broadcast ARP next time around.
1434 */
1435 lr->lr_probes++;
1436 bzero(&sdl, sizeof(sdl));
1437 sdl.sdl_alen = ifp->if_addrlen;
1438 bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1439 ifp->if_addrlen);
1440 IFLR_UNLOCK(lr);
1441 IFA_LOCK_SPIN(rt_ifa);
1442 IFA_ADDREF_LOCKED(rt_ifa);
1443 sa = rt_ifa->ifa_addr;
1444 IFA_UNLOCK(rt_ifa);
1445 rtflags = route->rt_flags;
1446 RT_UNLOCK(route);
1447 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1448 (const struct sockaddr_dl *)&sdl,
1449 (const struct sockaddr *)net_dest, rtflags);
1450 IFA_REMREF(rt_ifa);
1451 RT_LOCK(route);
1452 goto release;
1453 } else {
1454 IFLR_UNLOCK(lr);
1455 if (!unreachable &&
1456 !(llinfo->la_flags & LLINFO_PROBING)) {
1457 /*
1458 * Normal case where peer is still reachable,
1459 * we're not probing and if_addrlen is anything
1460 * but IF_LLREACH_MAXLEN.
1461 */
1462 goto release;
1463 }
1464 }
1465 }
1466
1467 if (ifp->if_flags & IFF_NOARP) {
1468 result = ENOTSUP;
1469 goto release;
1470 }
1471
1472 /*
1473 * Route wasn't complete/valid; we need to send out ARP request.
1474 * If we've exceeded the limit of la_holdq, drop from the head
1475 * of queue and add this packet to the tail. If we end up with
1476 * RTF_REJECT below, we'll dequeue this from tail and have the
1477 * caller free the packet instead. It's safe to do that since
1478 * we still hold the route's rt_lock.
1479 */
1480 if (packet != NULL) {
1481 enqueued = arp_llinfo_addq(llinfo, packet);
1482 } else {
1483 llinfo->la_prbreq_cnt++;
1484 }
1485 /*
1486 * Regardless of permanent vs. expirable entry, we need to
1487 * avoid having packets sit in la_holdq forever; thus mark the
1488 * entry with la_probeexp deadline to trigger the probe timer
1489 * to be scheduled (if not already). This gets cleared the
1490 * moment we get an ARP reply.
1491 */
1492 probing = TRUE;
1493 if ((qlen(&llinfo->la_holdq) + llinfo->la_prbreq_cnt) == 1) {
1494 llinfo->la_probeexp = (timenow + arpt_probe);
1495 llinfo->la_flags |= LLINFO_PROBING;
1496 }
1497
1498 if (route->rt_expire) {
1499 route->rt_flags &= ~RTF_REJECT;
1500 if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
1501 rt_setexpire(route, timenow);
1502 if (llinfo->la_asked++ < llinfo->la_maxtries) {
1503 struct kev_msg ev_msg;
1504 struct kev_in_arpfailure in_arpfailure;
1505 boolean_t sendkev = FALSE;
1506
1507 rt_ifa = route->rt_ifa;
1508 lr = llinfo->la_llreach;
1509 /* Become a regular mutex, just in case */
1510 RT_CONVERT_LOCK(route);
1511 /* Update probe count, if applicable */
1512 if (lr != NULL) {
1513 IFLR_LOCK_SPIN(lr);
1514 lr->lr_probes++;
1515 IFLR_UNLOCK(lr);
1516 }
1517 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1518 route->rt_flags & RTF_ROUTER &&
1519 llinfo->la_asked > 1) {
1520 sendkev = TRUE;
1521 llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1522 }
1523 IFA_LOCK_SPIN(rt_ifa);
1524 IFA_ADDREF_LOCKED(rt_ifa);
1525 sa = rt_ifa->ifa_addr;
1526 IFA_UNLOCK(rt_ifa);
1527 arp_llreach_use(llinfo); /* Mark use tstamp */
1528 rtflags = route->rt_flags;
1529 RT_UNLOCK(route);
1530 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1531 NULL, (const struct sockaddr *)net_dest,
1532 rtflags);
1533 IFA_REMREF(rt_ifa);
1534 if (sendkev) {
1535 bzero(&ev_msg, sizeof(ev_msg));
1536 bzero(&in_arpfailure,
1537 sizeof(in_arpfailure));
1538 in_arpfailure.link_data.if_family =
1539 ifp->if_family;
1540 in_arpfailure.link_data.if_unit =
1541 ifp->if_unit;
1542 strlcpy(in_arpfailure.link_data.if_name,
1543 ifp->if_name, IFNAMSIZ);
1544 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1545 ev_msg.kev_class = KEV_NETWORK_CLASS;
1546 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1547 ev_msg.event_code =
1548 KEV_INET_ARPRTRFAILURE;
1549 ev_msg.dv[0].data_ptr = &in_arpfailure;
1550 ev_msg.dv[0].data_length =
1551 sizeof(struct
1552 kev_in_arpfailure);
1553 dlil_post_complete_msg(NULL, &ev_msg);
1554 }
1555 result = EJUSTRETURN;
1556 RT_LOCK(route);
1557 goto release;
1558 } else {
1559 route->rt_flags |= RTF_REJECT;
1560 rt_setexpire(route,
1561 route->rt_expire + arpt_down);
1562 llinfo->la_asked = 0;
1563 /*
1564 * Remove the packet that was just added above;
1565 * don't free it since we're not returning
1566 * EJUSTRETURN. The caller will handle the
1567 * freeing. Since we haven't dropped rt_lock
1568 * from the time of _addq() above, this packet
1569 * must be at the tail.
1570 */
1571 if (packet != NULL && enqueued) {
1572 classq_pkt_t pkt =
1573 CLASSQ_PKT_INITIALIZER(pkt);
1574
1575 _getq_tail(&llinfo->la_holdq, &pkt);
1576 atomic_add_32(&arpstat.held, -1);
1577 VERIFY(pkt.cp_mbuf == packet);
1578 }
1579 result = EHOSTUNREACH;
1580 /*
1581 * Enqueue work item to invoke callback for this route entry
1582 */
1583 route_event_enqueue_nwk_wq_entry(route, NULL,
1584 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
1585 goto release;
1586 }
1587 }
1588 }
1589
1590 /* The packet is now held inside la_holdq or dropped */
1591 result = EJUSTRETURN;
1592 if (packet != NULL && !enqueued) {
1593 mbuf_free(packet);
1594 packet = NULL;
1595 }
1596
1597release:
1598 if (result == EHOSTUNREACH) {
1599 atomic_add_32(&arpstat.dropped, 1);
1600 }
1601
1602 if (route != NULL) {
1603 if (send_probe_notif) {
1604 route_event_enqueue_nwk_wq_entry(route, NULL,
1605 ROUTE_LLENTRY_PROBED, NULL, TRUE);
1606
1607 if (route->rt_flags & RTF_ROUTER) {
1608 struct radix_node_head *rnh = NULL;
1609 struct route_event rt_ev;
1610 route_event_init(&rt_ev, route, NULL, ROUTE_LLENTRY_PROBED);
1611 /*
1612 * We already have a reference on rt. The function
1613 * frees it before returning.
1614 */
1615 RT_UNLOCK(route);
1616 lck_mtx_lock(rnh_lock);
1617 rnh = rt_tables[AF_INET];
1618
1619 if (rnh != NULL) {
1620 (void) rnh->rnh_walktree(rnh,
1621 route_event_walktree, (void *)&rt_ev);
1622 }
1623 lck_mtx_unlock(rnh_lock);
1624 RT_LOCK(route);
1625 }
1626 }
1627
1628 if (route == hint) {
1629 RT_REMREF_LOCKED(route);
1630 RT_UNLOCK(route);
1631 } else {
1632 RT_UNLOCK(route);
1633 rtfree(route);
1634 }
1635 }
1636 if (probing) {
1637 /* Do this after we drop rt_lock to preserve ordering */
1638 lck_mtx_lock(rnh_lock);
1639 arp_sched_probe(NULL);
1640 lck_mtx_unlock(rnh_lock);
1641 }
1642 return result;
1643}
1644
1645errno_t
1646arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1647 const struct sockaddr_dl *sender_hw, const struct sockaddr_in *sender_ip,
1648 const struct sockaddr_in *target_ip)
1649{
1650 char ipv4str[MAX_IPv4_STR_LEN];
1651 struct sockaddr_dl proxied;
1652 struct sockaddr_dl *gateway, *target_hw = NULL;
1653 struct ifaddr *ifa;
1654 struct in_ifaddr *ia;
1655 struct in_ifaddr *best_ia = NULL;
1656 struct sockaddr_in best_ia_sin;
1657 route_t route = NULL;
1658 char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
1659 struct llinfo_arp *llinfo;
1660 errno_t error;
1661 int created_announcement = 0;
1662 int bridged = 0, is_bridge = 0;
1663 uint32_t rt_evcode = 0;
1664
1665 /*
1666 * Here and other places within this routine where we don't hold
1667 * rnh_lock, trade accuracy for speed for the common scenarios
1668 * and avoid the use of atomic updates.
1669 */
1670 arpstat.received++;
1671
1672 /* Do not respond to requests for 0.0.0.0 */
1673 if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) {
1674 goto done;
1675 }
1676
1677 if (ifp->if_bridge) {
1678 bridged = 1;
1679 }
1680 if (ifp->if_type == IFT_BRIDGE) {
1681 is_bridge = 1;
1682 }
1683
1684 if (arpop == ARPOP_REPLY) {
1685 arpstat.rxreplies++;
1686 }
1687
1688 /*
1689 * Determine if this ARP is for us
1690 */
1691 lck_rw_lock_shared(in_ifaddr_rwlock);
1692 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
1693 IFA_LOCK_SPIN(&ia->ia_ifa);
1694 if (ia->ia_ifp == ifp &&
1695 ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
1696 best_ia = ia;
1697 best_ia_sin = best_ia->ia_addr;
1698 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1699 IFA_UNLOCK(&ia->ia_ifa);
1700 lck_rw_done(in_ifaddr_rwlock);
1701 goto match;
1702 }
1703 IFA_UNLOCK(&ia->ia_ifa);
1704 }
1705
1706 TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
1707 IFA_LOCK_SPIN(&ia->ia_ifa);
1708 if (ia->ia_ifp == ifp &&
1709 ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1710 best_ia = ia;
1711 best_ia_sin = best_ia->ia_addr;
1712 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1713 IFA_UNLOCK(&ia->ia_ifa);
1714 lck_rw_done(in_ifaddr_rwlock);
1715 goto match;
1716 }
1717 IFA_UNLOCK(&ia->ia_ifa);
1718 }
1719
1720#define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
1721 (ia->ia_ifp->if_bridge == ifp->if_softc && \
1722 bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
1723 addr == ia->ia_addr.sin_addr.s_addr)
1724 /*
1725 * Check the case when bridge shares its MAC address with
1726 * some of its children, so packets are claimed by bridge
1727 * itself (bridge_input() does it first), but they are really
1728 * meant to be destined to the bridge member.
1729 */
1730 if (is_bridge) {
1731 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1732 ia_hash) {
1733 IFA_LOCK_SPIN(&ia->ia_ifa);
1734 if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1735 ifp, ia)) {
1736 ifp = ia->ia_ifp;
1737 best_ia = ia;
1738 best_ia_sin = best_ia->ia_addr;
1739 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1740 IFA_UNLOCK(&ia->ia_ifa);
1741 lck_rw_done(in_ifaddr_rwlock);
1742 goto match;
1743 }
1744 IFA_UNLOCK(&ia->ia_ifa);
1745 }
1746 }
1747#undef BDG_MEMBER_MATCHES_ARP
1748 lck_rw_done(in_ifaddr_rwlock);
1749
1750 /*
1751 * No match, use the first inet address on the receive interface
1752 * as a dummy address for the rest of the function; we may be
1753 * proxying for another address.
1754 */
1755 ifnet_lock_shared(ifp);
1756 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1757 IFA_LOCK_SPIN(ifa);
1758 if (ifa->ifa_addr->sa_family != AF_INET) {
1759 IFA_UNLOCK(ifa);
1760 continue;
1761 }
1762 best_ia = (struct in_ifaddr *)ifa;
1763 best_ia_sin = best_ia->ia_addr;
1764 IFA_ADDREF_LOCKED(ifa);
1765 IFA_UNLOCK(ifa);
1766 ifnet_lock_done(ifp);
1767 goto match;
1768 }
1769 ifnet_lock_done(ifp);
1770
1771 /*
1772 * If we're not a bridge member, or if we are but there's no
1773 * IPv4 address to use for the interface, drop the packet.
1774 */
1775 if (!bridged || best_ia == NULL) {
1776 goto done;
1777 }
1778
1779match:
1780 /* If the packet is from this interface, ignore the packet */
1781 if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
1782 sender_hw->sdl_alen) == 0) {
1783 goto done;
1784 }
1785
1786 /* Check for a conflict */
1787 if (!bridged &&
1788 sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1789 struct kev_msg ev_msg;
1790 struct kev_in_collision *in_collision;
1791 u_char storage[sizeof(struct kev_in_collision) + MAX_HW_LEN];
1792
1793 bzero(&ev_msg, sizeof(struct kev_msg));
1794 bzero(storage, (sizeof(struct kev_in_collision) + MAX_HW_LEN));
1795 in_collision = (struct kev_in_collision *)(void *)storage;
1796 log(LOG_ERR, "%s duplicate IP address %s sent from "
1797 "address %s\n", if_name(ifp),
1798 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1799 sizeof(ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1800 sizeof(buf)));
1801
1802 /* Send a kernel event so anyone can learn of the conflict */
1803 in_collision->link_data.if_family = ifp->if_family;
1804 in_collision->link_data.if_unit = ifp->if_unit;
1805 strlcpy(&in_collision->link_data.if_name[0],
1806 ifp->if_name, IFNAMSIZ);
1807 in_collision->ia_ipaddr = sender_ip->sin_addr;
1808 in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1809 sender_hw->sdl_alen : MAX_HW_LEN;
1810 bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1811 in_collision->hw_len);
1812 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1813 ev_msg.kev_class = KEV_NETWORK_CLASS;
1814 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1815 ev_msg.event_code = KEV_INET_ARPCOLLISION;
1816 ev_msg.dv[0].data_ptr = in_collision;
1817 ev_msg.dv[0].data_length =
1818 sizeof(struct kev_in_collision) + in_collision->hw_len;
1819 ev_msg.dv[1].data_length = 0;
1820 dlil_post_complete_msg(NULL, &ev_msg);
1821 atomic_add_32(&arpstat.dupips, 1);
1822 goto respond;
1823 }
1824
1825 /*
1826 * Look up the routing entry. If it doesn't exist and we are the
1827 * target, and the sender isn't 0.0.0.0, go ahead and create one.
1828 * Callee holds a reference on the route and returns with the route
1829 * entry locked, upon success.
1830 */
1831 error = arp_lookup_route(&sender_ip->sin_addr,
1832 (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
1833 sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
1834
1835 if (error == 0) {
1836 RT_LOCK_ASSERT_HELD(route);
1837 }
1838
1839 if (error || route == NULL || route->rt_gateway == NULL) {
1840 if (arpop != ARPOP_REQUEST) {
1841 goto respond;
1842 }
1843
1844 if (arp_sendllconflict && send_conflicting_probes != 0 &&
1845 (ifp->if_eflags & IFEF_ARPLL) &&
1846 IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1847 sender_ip->sin_addr.s_addr == INADDR_ANY) {
1848 /*
1849 * Verify this ARP probe doesn't conflict with
1850 * an IPv4LL we know of on another interface.
1851 */
1852 if (route != NULL) {
1853 RT_REMREF_LOCKED(route);
1854 RT_UNLOCK(route);
1855 route = NULL;
1856 }
1857 /*
1858 * Callee holds a reference on the route and returns
1859 * with the route entry locked, upon success.
1860 */
1861 error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1862 &route, ifp->if_index);
1863
1864 if (error != 0 || route == NULL ||
1865 route->rt_gateway == NULL) {
1866 goto respond;
1867 }
1868
1869 RT_LOCK_ASSERT_HELD(route);
1870
1871 gateway = SDL(route->rt_gateway);
1872 if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1873 (gateway->sdl_alen != sender_hw->sdl_alen ||
1874 bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1875 gateway->sdl_alen) != 0)) {
1876 /*
1877 * A node is probing for an IPv4LL we know
1878 * exists on a different interface. We respond
1879 * with a conflicting probe to force the new
1880 * device to pick a different IPv4LL address.
1881 */
1882 if (arp_verbose || log_arp_warnings) {
1883 log(LOG_INFO, "arp: %s on %s sent "
1884 "probe for %s, already on %s\n",
1885 sdl_addr_to_hex(sender_hw, buf,
1886 sizeof(buf)), if_name(ifp),
1887 inet_ntop(AF_INET,
1888 &target_ip->sin_addr, ipv4str,
1889 sizeof(ipv4str)),
1890 if_name(route->rt_ifp));
1891 log(LOG_INFO, "arp: sending "
1892 "conflicting probe to %s on %s\n",
1893 sdl_addr_to_hex(sender_hw, buf,
1894 sizeof(buf)), if_name(ifp));
1895 }
1896 /* Mark use timestamp */
1897 if (route->rt_llinfo != NULL) {
1898 arp_llreach_use(route->rt_llinfo);
1899 }
1900 /* We're done with the route */
1901 RT_REMREF_LOCKED(route);
1902 RT_UNLOCK(route);
1903 route = NULL;
1904 /*
1905 * Send a conservative unicast "ARP probe".
1906 * This should force the other device to pick
1907 * a new number. This will not force the
1908 * device to pick a new number if the device
1909 * has already assigned that number. This will
1910 * not imply to the device that we own that
1911 * address. The link address is always
1912 * present; it's never freed.
1913 */
1914 ifnet_lock_shared(ifp);
1915 ifa = ifp->if_lladdr;
1916 IFA_ADDREF(ifa);
1917 ifnet_lock_done(ifp);
1918 dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1919 SDL(ifa->ifa_addr),
1920 (const struct sockaddr *)sender_ip,
1921 sender_hw,
1922 (const struct sockaddr *)target_ip);
1923 IFA_REMREF(ifa);
1924 ifa = NULL;
1925 atomic_add_32(&arpstat.txconflicts, 1);
1926 }
1927 goto respond;
1928 } else if (keep_announcements != 0 &&
1929 target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1930 /*
1931 * Don't create entry if link-local address and
1932 * link-local is disabled
1933 */
1934 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1935 (ifp->if_eflags & IFEF_ARPLL)) {
1936 if (route != NULL) {
1937 RT_REMREF_LOCKED(route);
1938 RT_UNLOCK(route);
1939 route = NULL;
1940 }
1941 /*
1942 * Callee holds a reference on the route and
1943 * returns with the route entry locked, upon
1944 * success.
1945 */
1946 error = arp_lookup_route(&sender_ip->sin_addr,
1947 1, 0, &route, ifp->if_index);
1948
1949 if (error == 0) {
1950 RT_LOCK_ASSERT_HELD(route);
1951 }
1952
1953 if (error == 0 && route != NULL &&
1954 route->rt_gateway != NULL) {
1955 created_announcement = 1;
1956 }
1957 }
1958 if (created_announcement == 0) {
1959 goto respond;
1960 }
1961 } else {
1962 goto respond;
1963 }
1964 }
1965
1966 RT_LOCK_ASSERT_HELD(route);
1967 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1968 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1969
1970 gateway = SDL(route->rt_gateway);
1971 if (!bridged && route->rt_ifp != ifp) {
1972 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1973 !(ifp->if_eflags & IFEF_ARPLL)) {
1974 if (arp_verbose || log_arp_warnings) {
1975 log(LOG_ERR, "arp: %s is on %s but got "
1976 "reply from %s on %s\n",
1977 inet_ntop(AF_INET, &sender_ip->sin_addr,
1978 ipv4str, sizeof(ipv4str)),
1979 if_name(route->rt_ifp),
1980 sdl_addr_to_hex(sender_hw, buf,
1981 sizeof(buf)), if_name(ifp));
1982 }
1983 goto respond;
1984 } else {
1985 /* Don't change a permanent address */
1986 if (route->rt_expire == 0) {
1987 goto respond;
1988 }
1989
1990 /*
1991 * We're about to check and/or change the route's ifp
1992 * and ifa, so do the lock dance: drop rt_lock, hold
1993 * rnh_lock and re-hold rt_lock to avoid violating the
1994 * lock ordering. We have an extra reference on the
1995 * route, so it won't go away while we do this.
1996 */
1997 RT_UNLOCK(route);
1998 lck_mtx_lock(rnh_lock);
1999 RT_LOCK(route);
2000 /*
2001 * Don't change the cloned route away from the
2002 * parent's interface if the address did resolve
2003 * or if the route is defunct. rt_ifp on both
2004 * the parent and the clone can now be freely
2005 * accessed now that we have acquired rnh_lock.
2006 */
2007 gateway = SDL(route->rt_gateway);
2008 if ((gateway->sdl_alen != 0 &&
2009 route->rt_parent != NULL &&
2010 route->rt_parent->rt_ifp == route->rt_ifp) ||
2011 (route->rt_flags & RTF_CONDEMNED)) {
2012 RT_REMREF_LOCKED(route);
2013 RT_UNLOCK(route);
2014 route = NULL;
2015 lck_mtx_unlock(rnh_lock);
2016 goto respond;
2017 }
2018 if (route->rt_ifp != ifp) {
2019 /*
2020 * Purge any link-layer info caching.
2021 */
2022 if (route->rt_llinfo_purge != NULL) {
2023 route->rt_llinfo_purge(route);
2024 }
2025
2026 /* Adjust route ref count for the interfaces */
2027 if (route->rt_if_ref_fn != NULL) {
2028 route->rt_if_ref_fn(ifp, 1);
2029 route->rt_if_ref_fn(route->rt_ifp, -1);
2030 }
2031 }
2032 /* Change the interface when the existing route is on */
2033 route->rt_ifp = ifp;
2034 /*
2035 * If rmx_mtu is not locked, update it
2036 * to the MTU used by the new interface.
2037 */
2038 if (!(route->rt_rmx.rmx_locks & RTV_MTU)) {
2039 route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
2040 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
2041 route->rt_rmx.rmx_mtu = IN6_LINKMTU(route->rt_ifp);
2042 /* Further adjust the size for CLAT46 expansion */
2043 route->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
2044 }
2045 }
2046
2047 rtsetifa(route, &best_ia->ia_ifa);
2048 gateway->sdl_index = ifp->if_index;
2049 RT_UNLOCK(route);
2050 lck_mtx_unlock(rnh_lock);
2051 RT_LOCK(route);
2052 /* Don't bother if the route is down */
2053 if (!(route->rt_flags & RTF_UP)) {
2054 goto respond;
2055 }
2056 /* Refresh gateway pointer */
2057 gateway = SDL(route->rt_gateway);
2058 }
2059 RT_LOCK_ASSERT_HELD(route);
2060 }
2061
2062 if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
2063 CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
2064 if (route->rt_expire != 0 &&
2065 (arp_verbose || log_arp_warnings)) {
2066 char buf2[3 * MAX_HW_LEN];
2067 log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
2068 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
2069 sizeof(ipv4str)),
2070 sdl_addr_to_hex(gateway, buf, sizeof(buf)),
2071 sdl_addr_to_hex(sender_hw, buf2, sizeof(buf2)),
2072 if_name(ifp));
2073 } else if (route->rt_expire == 0) {
2074 if (arp_verbose || log_arp_warnings) {
2075 log(LOG_ERR, "arp: %s attempts to modify "
2076 "permanent entry for %s on %s\n",
2077 sdl_addr_to_hex(sender_hw, buf,
2078 sizeof(buf)),
2079 inet_ntop(AF_INET, &sender_ip->sin_addr,
2080 ipv4str, sizeof(ipv4str)),
2081 if_name(ifp));
2082 }
2083 goto respond;
2084 }
2085 }
2086
2087 /* Copy the sender hardware address in to the route's gateway address */
2088 gateway->sdl_alen = sender_hw->sdl_alen;
2089 bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
2090
2091 /* Update the expire time for the route and clear the reject flag */
2092 if (route->rt_expire != 0) {
2093 rt_setexpire(route, net_uptime() + arpt_keep);
2094 }
2095 route->rt_flags &= ~RTF_REJECT;
2096
2097 /* cache the gateway (sender HW) address */
2098 arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
2099 (arpop == ARPOP_REPLY), &rt_evcode);
2100
2101 llinfo = route->rt_llinfo;
2102 /* send a notification that the route is back up */
2103 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
2104 route->rt_flags & RTF_ROUTER &&
2105 llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
2106 struct kev_msg ev_msg;
2107 struct kev_in_arpalive in_arpalive;
2108
2109 llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
2110 RT_UNLOCK(route);
2111 bzero(&ev_msg, sizeof(ev_msg));
2112 bzero(&in_arpalive, sizeof(in_arpalive));
2113 in_arpalive.link_data.if_family = ifp->if_family;
2114 in_arpalive.link_data.if_unit = ifp->if_unit;
2115 strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
2116 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2117 ev_msg.kev_class = KEV_NETWORK_CLASS;
2118 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
2119 ev_msg.event_code = KEV_INET_ARPRTRALIVE;
2120 ev_msg.dv[0].data_ptr = &in_arpalive;
2121 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
2122 dlil_post_complete_msg(NULL, &ev_msg);
2123 RT_LOCK(route);
2124 }
2125 /* Update the llinfo, send out all queued packets at once */
2126 llinfo->la_asked = 0;
2127 llinfo->la_flags &= ~LLINFO_PROBING;
2128 llinfo->la_prbreq_cnt = 0;
2129
2130 if (rt_evcode) {
2131 /*
2132 * Enqueue work item to invoke callback for this route entry
2133 */
2134 route_event_enqueue_nwk_wq_entry(route, NULL, rt_evcode, NULL, TRUE);
2135
2136 if (route->rt_flags & RTF_ROUTER) {
2137 struct radix_node_head *rnh = NULL;
2138 struct route_event rt_ev;
2139 route_event_init(&rt_ev, route, NULL, rt_evcode);
2140 /*
2141 * We already have a reference on rt. The function
2142 * frees it before returning.
2143 */
2144 RT_UNLOCK(route);
2145 lck_mtx_lock(rnh_lock);
2146 rnh = rt_tables[AF_INET];
2147
2148 if (rnh != NULL) {
2149 (void) rnh->rnh_walktree(rnh, route_event_walktree,
2150 (void *)&rt_ev);
2151 }
2152 lck_mtx_unlock(rnh_lock);
2153 RT_LOCK(route);
2154 }
2155 }
2156
2157 if (!qempty(&llinfo->la_holdq)) {
2158 uint32_t held;
2159 struct mbuf *m0;
2160 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
2161
2162 _getq_all(&llinfo->la_holdq, &pkt, NULL, &held, NULL);
2163 m0 = pkt.cp_mbuf;
2164 if (arp_verbose) {
2165 log(LOG_DEBUG, "%s: sending %u held packets\n",
2166 __func__, held);
2167 }
2168 atomic_add_32(&arpstat.held, -held);
2169 VERIFY(qempty(&llinfo->la_holdq));
2170 RT_UNLOCK(route);
2171 dlil_output(ifp, PF_INET, m0, (caddr_t)route,
2172 rt_key(route), 0, NULL);
2173 RT_REMREF(route);
2174 route = NULL;
2175 }
2176
2177respond:
2178 if (route != NULL) {
2179 /* Mark use timestamp if we're going to send a reply */
2180 if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) {
2181 arp_llreach_use(route->rt_llinfo);
2182 }
2183 RT_REMREF_LOCKED(route);
2184 RT_UNLOCK(route);
2185 route = NULL;
2186 }
2187
2188 if (arpop != ARPOP_REQUEST) {
2189 goto done;
2190 }
2191
2192 /* See comments at the beginning of this routine */
2193 arpstat.rxrequests++;
2194
2195 /* If we are not the target, check if we should proxy */
2196 if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
2197 /*
2198 * Find a proxy route; callee holds a reference on the
2199 * route and returns with the route entry locked, upon
2200 * success.
2201 */
2202 error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
2203 &route, ifp->if_index);
2204
2205 if (error == 0) {
2206 RT_LOCK_ASSERT_HELD(route);
2207 /*
2208 * Return proxied ARP replies only on the interface
2209 * or bridge cluster where this network resides.
2210 * Otherwise we may conflict with the host we are
2211 * proxying for.
2212 */
2213 if (route->rt_ifp != ifp &&
2214 (route->rt_ifp->if_bridge != ifp->if_bridge ||
2215 ifp->if_bridge == NULL)) {
2216 RT_REMREF_LOCKED(route);
2217 RT_UNLOCK(route);
2218 goto done;
2219 }
2220 proxied = *SDL(route->rt_gateway);
2221 target_hw = &proxied;
2222 } else {
2223 /*
2224 * We don't have a route entry indicating we should
2225 * use proxy. If we aren't supposed to proxy all,
2226 * we are done.
2227 */
2228 if (!arp_proxyall) {
2229 goto done;
2230 }
2231
2232 /*
2233 * See if we have a route to the target ip before
2234 * we proxy it.
2235 */
2236 route = rtalloc1_scoped((struct sockaddr *)
2237 (size_t)target_ip, 0, 0, ifp->if_index);
2238 if (!route) {
2239 goto done;
2240 }
2241
2242 /*
2243 * Don't proxy for hosts already on the same interface.
2244 */
2245 RT_LOCK(route);
2246 if (route->rt_ifp == ifp) {
2247 RT_UNLOCK(route);
2248 rtfree(route);
2249 goto done;
2250 }
2251 }
2252 /* Mark use timestamp */
2253 if (route->rt_llinfo != NULL) {
2254 arp_llreach_use(route->rt_llinfo);
2255 }
2256 RT_REMREF_LOCKED(route);
2257 RT_UNLOCK(route);
2258 }
2259
2260 dlil_send_arp(ifp, ARPOP_REPLY,
2261 target_hw, (const struct sockaddr *)target_ip,
2262 sender_hw, (const struct sockaddr *)sender_ip, 0);
2263
2264done:
2265 if (best_ia != NULL) {
2266 IFA_REMREF(&best_ia->ia_ifa);
2267 }
2268 return 0;
2269}
2270
2271void
2272arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
2273{
2274 struct sockaddr *sa;
2275
2276 IFA_LOCK(ifa);
2277 ifa->ifa_rtrequest = arp_rtrequest;
2278 ifa->ifa_flags |= RTF_CLONING;
2279 sa = ifa->ifa_addr;
2280 IFA_UNLOCK(ifa);
2281 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
2282}
2283
2284static int
2285arp_getstat SYSCTL_HANDLER_ARGS
2286{
2287#pragma unused(oidp, arg1, arg2)
2288 if (req->oldptr == USER_ADDR_NULL) {
2289 req->oldlen = (size_t)sizeof(struct arpstat);
2290 }
2291
2292 return SYSCTL_OUT(req, &arpstat, MIN(sizeof(arpstat), req->oldlen));
2293}