]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet/in_arp.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / netinet / in_arp.c
CommitLineData
91447636 1/*
5ba3f43e 2 * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * Copyright (c) 1982, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 */
61
62#include <kern/debug.h>
63#include <netinet/in_arp.h>
64#include <sys/types.h>
65#include <sys/param.h>
66#include <sys/kernel_types.h>
67#include <sys/syslog.h>
68#include <sys/systm.h>
69#include <sys/time.h>
70#include <sys/kernel.h>
71#include <sys/mbuf.h>
72#include <sys/sysctl.h>
6d2010ae
A
73#include <sys/mcache.h>
74#include <sys/protosw.h>
91447636
A
75#include <string.h>
76#include <net/if_arp.h>
77#include <net/if_dl.h>
78#include <net/dlil.h>
b7266188 79#include <net/if_types.h>
6d2010ae 80#include <net/if_llreach.h>
91447636 81#include <net/route.h>
5ba3f43e 82#include <net/nwk_wq.h>
39236c6e 83
91447636
A
84#include <netinet/if_ether.h>
85#include <netinet/in_var.h>
d9a64523
A
86#include <netinet/ip.h>
87#include <netinet/ip6.h>
b0d623f7 88#include <kern/zalloc.h>
91447636 89
39037602
A
90#include <kern/thread.h>
91#include <kern/sched_prim.h>
92
0a7de745 93#define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen))
91447636
A
94
95static const size_t MAX_HW_LEN = 10;
96
b0d623f7
A
97/*
98 * Synchronization notes:
99 *
100 * The global list of ARP entries are stored in llinfo_arp; an entry
101 * gets inserted into the list when the route is created and gets
102 * removed from the list when it is deleted; this is done as part
103 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
104 *
105 * Because rnh_lock and rt_lock for the entry are held during those
106 * operations, the same locks (and thus lock ordering) must be used
107 * elsewhere to access the relevant data structure fields:
108 *
109 * la_le.{le_next,le_prev}, la_rt
110 *
111 * - Routing lock (rnh_lock)
112 *
39037602 113 * la_holdq, la_asked, la_llreach, la_lastused, la_flags
b0d623f7
A
114 *
115 * - Routing entry lock (rt_lock)
116 *
117 * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
118 * as the route entry itself. When a route is deleted (RTM_DELETE),
119 * it is simply removed from the global list but the memory is not
120 * freed until the route itself is freed.
121 */
39236c6e
A
122struct llinfo_arp {
123 /*
124 * The following are protected by rnh_lock
125 */
126 LIST_ENTRY(llinfo_arp) la_le;
5ba3f43e 127 struct rtentry *la_rt;
39236c6e
A
128 /*
129 * The following are protected by rt_lock
130 */
5ba3f43e
A
131 class_queue_t la_holdq; /* packets awaiting resolution */
132 struct if_llreach *la_llreach; /* link-layer reachability record */
133 u_int64_t la_lastused; /* last used timestamp */
134 u_int32_t la_asked; /* # of requests sent */
135 u_int32_t la_maxtries; /* retry limit */
136 u_int64_t la_probeexp; /* probe deadline timestamp */
137 u_int32_t la_prbreq_cnt; /* probe request count */
39037602 138 u_int32_t la_flags;
5ba3f43e
A
139#define LLINFO_RTRFAIL_EVTSENT 0x1 /* sent an ARP event */
140#define LLINFO_PROBING 0x2 /* waiting for an ARP reply */
39236c6e 141};
5ba3f43e 142
91447636
A
143static LIST_HEAD(, llinfo_arp) llinfo_arp;
144
39037602 145static thread_call_t arp_timeout_tcall;
0a7de745 146static int arp_timeout_run; /* arp_timeout is scheduled to run */
39037602 147static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1);
39236c6e
A
148static void arp_sched_timeout(struct timeval *);
149
39037602 150static thread_call_t arp_probe_tcall;
0a7de745 151static int arp_probe_run; /* arp_probe is scheduled to run */
39037602
A
152static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1);
153static void arp_sched_probe(struct timeval *);
154
39236c6e
A
155static void arptfree(struct llinfo_arp *, void *);
156static errno_t arp_lookup_route(const struct in_addr *, int,
157 int, route_t *, unsigned int);
158static int arp_getstat SYSCTL_HANDLER_ARGS;
159
160static struct llinfo_arp *arp_llinfo_alloc(int);
161static void arp_llinfo_free(void *);
39037602 162static uint32_t arp_llinfo_flushq(struct llinfo_arp *);
39236c6e
A
163static void arp_llinfo_purge(struct rtentry *);
164static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
165static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
3e170ce0 166static void arp_llinfo_refresh(struct rtentry *);
39236c6e
A
167
168static __inline void arp_llreach_use(struct llinfo_arp *);
169static __inline int arp_llreach_reachable(struct llinfo_arp *);
170static void arp_llreach_alloc(struct rtentry *, struct ifnet *, void *,
5ba3f43e 171 unsigned int, boolean_t, uint32_t *);
39236c6e
A
172
173extern int tvtohz(struct timeval *);
174
175static int arpinit_done;
176
177SYSCTL_DECL(_net_link_ether);
0a7de745 178SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
39236c6e 179
0a7de745 180static int arpt_prune = (5 * 60 * 1); /* walk list every 5 minutes */
39236c6e 181SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
0a7de745 182 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
39236c6e 183
0a7de745 184#define ARP_PROBE_TIME 7 /* seconds */
39037602
A
185static u_int32_t arpt_probe = ARP_PROBE_TIME;
186SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl,
0a7de745 187 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, "");
39037602 188
0a7de745 189static int arpt_keep = (20 * 60); /* once resolved, good for 20 more minutes */
39236c6e 190SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
0a7de745 191 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
39236c6e 192
0a7de745 193static int arpt_down = 20; /* once declared down, don't send for 20 sec */
39236c6e 194SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
0a7de745 195 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
39236c6e 196
0a7de745 197static int arp_llreach_base = 120; /* seconds */
39236c6e 198SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
0a7de745
A
199 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0,
200 "default ARP link-layer reachability max lifetime (in seconds)");
39236c6e 201
0a7de745 202#define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */
39236c6e
A
203static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
204SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
0a7de745
A
205 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
206 "number of unicast ARP refresh probes before using broadcast");
91447636 207
6d2010ae 208static u_int32_t arp_maxtries = 5;
39236c6e 209SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
0a7de745 210 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
91447636 211
39037602
A
212static u_int32_t arp_maxhold = 16;
213SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold,
0a7de745 214 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, "");
39037602 215
0a7de745 216static int useloopback = 1; /* use loopback interface for local traffic */
39236c6e 217SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
0a7de745 218 CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
39236c6e
A
219
220static int arp_proxyall = 0;
221SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
0a7de745 222 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
39236c6e
A
223
224static int arp_sendllconflict = 0;
225SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
0a7de745 226 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
91447636 227
0a7de745 228static int log_arp_warnings = 0; /* Thread safe: no accumulated state */
6d2010ae 229SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
0a7de745
A
230 CTLFLAG_RW | CTLFLAG_LOCKED,
231 &log_arp_warnings, 0,
232 "log arp warning messages");
91447636 233
0a7de745 234static int keep_announcements = 1; /* Thread safe: no aging of state */
6d2010ae 235SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
0a7de745
A
236 CTLFLAG_RW | CTLFLAG_LOCKED,
237 &keep_announcements, 0,
238 "keep arp announcements");
2d21ac55 239
0a7de745 240static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
6d2010ae 241SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
0a7de745
A
242 CTLFLAG_RW | CTLFLAG_LOCKED,
243 &send_conflicting_probes, 0,
244 "send conflicting link-local arp probes");
2d21ac55 245
39236c6e
A
246static int arp_verbose;
247SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
0a7de745 248 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
6d2010ae 249
cb323159
A
250static uint32_t arp_maxhold_total = 1024; /* max total packets in the holdq */
251SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxhold_total,
252 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold_total, 0, "");
253
254
39037602
A
255/*
256 * Generally protected by rnh_lock; use atomic operations on fields
257 * that are also modified outside of that lock (if needed).
258 */
0a7de745 259struct arpstat arpstat __attribute__((aligned(sizeof(uint64_t))));
fe8ab488 260SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
0a7de745
A
261 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
262 0, 0, arp_getstat, "S,arpstat",
263 "ARP statistics (struct arpstat, net/if_arp.h)");
b0d623f7 264
b0d623f7 265static struct zone *llinfo_arp_zone;
0a7de745
A
266#define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */
267#define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */
b0d623f7
A
268
269void
270arp_init(void)
271{
39236c6e 272 VERIFY(!arpinit_done);
b0d623f7
A
273
274 LIST_INIT(&llinfo_arp);
275
0a7de745
A
276 llinfo_arp_zone = zinit(sizeof(struct llinfo_arp),
277 LLINFO_ARP_ZONE_MAX * sizeof(struct llinfo_arp), 0,
b0d623f7 278 LLINFO_ARP_ZONE_NAME);
0a7de745 279 if (llinfo_arp_zone == NULL) {
b0d623f7 280 panic("%s: failed allocating llinfo_arp_zone", __func__);
0a7de745 281 }
b0d623f7
A
282
283 zone_change(llinfo_arp_zone, Z_EXPAND, TRUE);
6d2010ae 284 zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE);
b0d623f7
A
285
286 arpinit_done = 1;
b0d623f7
A
287}
288
289static struct llinfo_arp *
39236c6e 290arp_llinfo_alloc(int how)
b0d623f7 291{
39236c6e
A
292 struct llinfo_arp *la;
293
294 la = (how == M_WAITOK) ? zalloc(llinfo_arp_zone) :
295 zalloc_noblock(llinfo_arp_zone);
39037602 296 if (la != NULL) {
0a7de745 297 bzero(la, sizeof(*la));
39037602
A
298 /*
299 * The type of queue (Q_DROPHEAD) here is just a hint;
300 * the actual logic that works on this queue performs
301 * a head drop, details in arp_llinfo_addq().
302 */
303 _qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ?
5ba3f43e 304 (uint32_t)-1 : arp_maxhold, QP_MBUF);
39037602 305 }
39236c6e 306
0a7de745 307 return la;
b0d623f7
A
308}
309
310static void
311arp_llinfo_free(void *arg)
312{
313 struct llinfo_arp *la = arg;
314
315 if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
316 panic("%s: trying to free %p when it is in use", __func__, la);
317 /* NOTREACHED */
318 }
319
39037602
A
320 /* Free any held packets */
321 (void) arp_llinfo_flushq(la);
b0d623f7 322
6d2010ae
A
323 /* Purge any link-layer info caching */
324 VERIFY(la->la_rt->rt_llinfo == la);
0a7de745 325 if (la->la_rt->rt_llinfo_purge != NULL) {
6d2010ae 326 la->la_rt->rt_llinfo_purge(la->la_rt);
0a7de745 327 }
6d2010ae 328
b0d623f7
A
329 zfree(llinfo_arp_zone, la);
330}
331
cb323159 332static bool
39037602
A
333arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m)
334{
cb323159
A
335 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
336
337 if (arpstat.held >= arp_maxhold_total) {
338 if (arp_verbose) {
339 log(LOG_DEBUG,
340 "%s: dropping packet due to maxhold_total\n",
341 __func__);
342 }
343 atomic_add_32(&arpstat.dropped, 1);
344 return false;
345 }
346
39037602
A
347 if (qlen(&la->la_holdq) >= qlimit(&la->la_holdq)) {
348 struct mbuf *_m;
349 /* prune less than CTL, else take what's at the head */
cb323159
A
350 _getq_scidx_lt(&la->la_holdq, &pkt, SCIDX_CTL);
351 _m = pkt.cp_mbuf;
0a7de745 352 if (_m == NULL) {
cb323159
A
353 _getq(&la->la_holdq, &pkt);
354 _m = pkt.cp_mbuf;
0a7de745 355 }
39037602
A
356 VERIFY(_m != NULL);
357 if (arp_verbose) {
358 log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n",
359 __func__, MBUF_SCIDX(mbuf_get_service_class(_m)));
360 }
361 m_freem(_m);
362 atomic_add_32(&arpstat.dropped, 1);
363 atomic_add_32(&arpstat.held, -1);
364 }
cb323159
A
365 CLASSQ_PKT_INIT_MBUF(&pkt, m);
366 _addq(&la->la_holdq, &pkt);
39037602
A
367 atomic_add_32(&arpstat.held, 1);
368 if (arp_verbose) {
369 log(LOG_DEBUG, "%s: enqueued packet (scidx %u), qlen now %u\n",
370 __func__, MBUF_SCIDX(mbuf_get_service_class(m)),
371 qlen(&la->la_holdq));
372 }
cb323159
A
373
374 return true;
39037602
A
375}
376
377static uint32_t
378arp_llinfo_flushq(struct llinfo_arp *la)
379{
380 uint32_t held = qlen(&la->la_holdq);
381
5ba3f43e
A
382 if (held != 0) {
383 atomic_add_32(&arpstat.purged, held);
384 atomic_add_32(&arpstat.held, -held);
385 _flushq(&la->la_holdq);
386 }
387 la->la_prbreq_cnt = 0;
39037602 388 VERIFY(qempty(&la->la_holdq));
0a7de745 389 return held;
39037602
A
390}
391
6d2010ae
A
392static void
393arp_llinfo_purge(struct rtentry *rt)
394{
395 struct llinfo_arp *la = rt->rt_llinfo;
396
397 RT_LOCK_ASSERT_HELD(rt);
398 VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
399
400 if (la->la_llreach != NULL) {
401 RT_CONVERT_LOCK(rt);
402 ifnet_llreach_free(la->la_llreach);
403 la->la_llreach = NULL;
404 }
405 la->la_lastused = 0;
406}
407
408static void
409arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
410{
411 struct llinfo_arp *la = rt->rt_llinfo;
412 struct if_llreach *lr = la->la_llreach;
413
414 if (lr == NULL) {
0a7de745 415 bzero(ri, sizeof(*ri));
316670eb
A
416 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
417 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
418 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
6d2010ae
A
419 } else {
420 IFLR_LOCK(lr);
421 /* Export to rt_reach_info structure */
422 ifnet_lr2ri(lr, ri);
316670eb
A
423 /* Export ARP send expiration (calendar) time */
424 ri->ri_snd_expire =
425 ifnet_llreach_up2calexp(lr, la->la_lastused);
426 IFLR_UNLOCK(lr);
427 }
428}
429
430static void
431arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
432{
433 struct llinfo_arp *la = rt->rt_llinfo;
434 struct if_llreach *lr = la->la_llreach;
435
436 if (lr == NULL) {
0a7de745 437 bzero(iflri, sizeof(*iflri));
316670eb
A
438 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
439 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
440 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
441 } else {
442 IFLR_LOCK(lr);
443 /* Export to ifnet_llreach_info structure */
444 ifnet_lr2iflri(lr, iflri);
445 /* Export ARP send expiration (uptime) time */
446 iflri->iflri_snd_expire =
447 ifnet_llreach_up2upexp(lr, la->la_lastused);
6d2010ae
A
448 IFLR_UNLOCK(lr);
449 }
450}
451
3e170ce0
A
452static void
453arp_llinfo_refresh(struct rtentry *rt)
454{
455 uint64_t timenow = net_uptime();
456 /*
457 * If route entry is permanent or if expiry is less
458 * than timenow and extra time taken for unicast probe
459 * we can't expedite the refresh
460 */
461 if ((rt->rt_expire == 0) ||
462 (rt->rt_flags & RTF_STATIC) ||
463 !(rt->rt_flags & RTF_LLINFO)) {
464 return;
465 }
466
0a7de745 467 if (rt->rt_expire > timenow) {
39037602 468 rt->rt_expire = timenow;
0a7de745 469 }
3e170ce0
A
470 return;
471}
472
6d2010ae
A
473void
474arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen)
475{
476 /* Nothing more to do if it's disabled */
0a7de745 477 if (arp_llreach_base == 0) {
6d2010ae 478 return;
0a7de745 479 }
6d2010ae
A
480
481 ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
482}
483
484static __inline void
485arp_llreach_use(struct llinfo_arp *la)
486{
0a7de745 487 if (la->la_llreach != NULL) {
6d2010ae 488 la->la_lastused = net_uptime();
0a7de745 489 }
6d2010ae
A
490}
491
492static __inline int
493arp_llreach_reachable(struct llinfo_arp *la)
494{
495 struct if_llreach *lr;
496 const char *why = NULL;
497
498 /* Nothing more to do if it's disabled; pretend it's reachable */
0a7de745
A
499 if (arp_llreach_base == 0) {
500 return 1;
501 }
6d2010ae
A
502
503 if ((lr = la->la_llreach) == NULL) {
504 /*
505 * Link-layer reachability record isn't present for this
506 * ARP entry; pretend it's reachable and use it as is.
507 */
0a7de745 508 return 1;
6d2010ae
A
509 } else if (ifnet_llreach_reachable(lr)) {
510 /*
511 * Record is present, it's not shared with other ARP
512 * entries and a packet has recently been received
513 * from the remote host; consider it reachable.
514 */
0a7de745
A
515 if (lr->lr_reqcnt == 1) {
516 return 1;
517 }
6d2010ae
A
518
519 /* Prime it up, if this is the first time */
520 if (la->la_lastused == 0) {
521 VERIFY(la->la_llreach != NULL);
522 arp_llreach_use(la);
523 }
524
525 /*
526 * Record is present and shared with one or more ARP
527 * entries, and a packet has recently been received
528 * from the remote host. Since it's shared by more
529 * than one IP addresses, we can't rely on the link-
530 * layer reachability alone; consider it reachable if
531 * this ARP entry has been used "recently."
532 */
0a7de745
A
533 if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) {
534 return 1;
535 }
6d2010ae
A
536
537 why = "has alias(es) and hasn't been used in a while";
538 } else {
539 why = "haven't heard from it in a while";
540 }
541
39236c6e 542 if (arp_verbose > 1) {
6d2010ae
A
543 char tmp[MAX_IPv4_STR_LEN];
544 u_int64_t now = net_uptime();
545
39236c6e 546 log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
6d2010ae 547 "%s [lastused %lld, lastrcvd %lld] secs ago\n",
39236c6e 548 if_name(lr->lr_ifp), inet_ntop(AF_INET,
0a7de745 549 &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof(tmp)), why,
39236c6e
A
550 (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
551 (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
6d2010ae 552 }
0a7de745 553 return 0;
6d2010ae
A
554}
555
556/*
557 * Obtain a link-layer source cache entry for the sender.
558 *
559 * NOTE: This is currently only for ARP/Ethernet.
560 */
561static void
562arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr,
5ba3f43e 563 unsigned int alen, boolean_t solicited, uint32_t *p_rt_event_code)
6d2010ae
A
564{
565 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
566 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
39236c6e
A
567
568 if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
569 !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
0a7de745 570 ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */
6d2010ae
A
571 alen == ifp->if_addrlen) {
572 struct llinfo_arp *la = rt->rt_llinfo;
573 struct if_llreach *lr;
574 const char *why = NULL, *type = "";
575
576 /* Become a regular mutex, just in case */
577 RT_CONVERT_LOCK(rt);
578
579 if ((lr = la->la_llreach) != NULL) {
580 type = (solicited ? "ARP reply" : "ARP announcement");
581 /*
582 * If target has changed, create a new record;
583 * otherwise keep existing record.
584 */
585 IFLR_LOCK(lr);
586 if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
587 IFLR_UNLOCK(lr);
588 /* Purge any link-layer info caching */
589 VERIFY(rt->rt_llinfo_purge != NULL);
590 rt->rt_llinfo_purge(rt);
591 lr = NULL;
592 why = " for different target HW address; "
593 "using new llreach record";
5ba3f43e 594 *p_rt_event_code = ROUTE_LLENTRY_CHANGED;
6d2010ae 595 } else {
5ba3f43e
A
596 /*
597 * If we were doing unicast probing, we need to
598 * deliver an event for neighbor cache resolution
599 */
0a7de745 600 if (lr->lr_probes != 0) {
5ba3f43e 601 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
0a7de745 602 }
5ba3f43e 603
0a7de745 604 lr->lr_probes = 0; /* reset probe count */
6d2010ae
A
605 IFLR_UNLOCK(lr);
606 if (solicited) {
607 why = " for same target HW address; "
608 "keeping existing llreach record";
609 }
610 }
611 }
612
613 if (lr == NULL) {
614 lr = la->la_llreach = ifnet_llreach_alloc(ifp,
615 ETHERTYPE_IP, addr, alen, arp_llreach_base);
616 if (lr != NULL) {
0a7de745
A
617 lr->lr_probes = 0; /* reset probe count */
618 if (why == NULL) {
6d2010ae 619 why = "creating new llreach record";
0a7de745 620 }
6d2010ae 621 }
5ba3f43e 622 *p_rt_event_code = ROUTE_LLENTRY_RESOLVED;
6d2010ae
A
623 }
624
39236c6e 625 if (arp_verbose > 1 && lr != NULL && why != NULL) {
6d2010ae
A
626 char tmp[MAX_IPv4_STR_LEN];
627
39236c6e
A
628 log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
629 type, why, inet_ntop(AF_INET,
0a7de745 630 &SIN(rt_key(rt))->sin_addr, tmp, sizeof(tmp)));
6d2010ae
A
631 }
632 }
633}
634
39236c6e 635struct arptf_arg {
39037602
A
636 boolean_t draining;
637 boolean_t probing;
39236c6e
A
638 uint32_t killed;
639 uint32_t aging;
640 uint32_t sticky;
641 uint32_t found;
39037602
A
642 uint32_t qlen;
643 uint32_t qsize;
39236c6e
A
644};
645
91447636
A
646/*
647 * Free an arp entry.
648 */
649static void
39236c6e 650arptfree(struct llinfo_arp *la, void *arg)
91447636 651{
39236c6e 652 struct arptf_arg *ap = arg;
91447636 653 struct rtentry *rt = la->la_rt;
39037602 654 uint64_t timenow;
b0d623f7 655
5ba3f43e 656 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 657
39236c6e
A
658 /* rnh_lock acquired by caller protects rt from going away */
659 RT_LOCK(rt);
660
661 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
662 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
663
664 ap->found++;
39037602
A
665 timenow = net_uptime();
666
667 /* If we're probing, flush out held packets upon probe expiration */
668 if (ap->probing && (la->la_flags & LLINFO_PROBING) &&
669 la->la_probeexp <= timenow) {
670 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
0a7de745 671 if (sdl != NULL) {
39037602 672 sdl->sdl_alen = 0;
0a7de745 673 }
39037602 674 (void) arp_llinfo_flushq(la);
5ba3f43e
A
675 /*
676 * Enqueue work item to invoke callback for this route entry
677 */
678 route_event_enqueue_nwk_wq_entry(rt, NULL,
679 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
39037602
A
680 }
681
5ba3f43e
A
682 /*
683 * The following is mostly being used to arm the timer
684 * again and for logging.
685 * qlen is used to re-arm the timer. Therefore, pure probe
686 * requests can be considered as 0 length packets
687 * contributing only to length but not to the size.
688 */
39037602 689 ap->qlen += qlen(&la->la_holdq);
5ba3f43e 690 ap->qlen += la->la_prbreq_cnt;
39037602
A
691 ap->qsize += qsize(&la->la_holdq);
692
39236c6e
A
693 if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
694 ap->sticky++;
695 /* ARP entry is permanent? */
15129b1c 696 if (rt->rt_expire == 0) {
39236c6e
A
697 RT_UNLOCK(rt);
698 return;
699 }
700 }
701
702 /* ARP entry hasn't expired and we're not draining? */
39037602 703 if (!ap->draining && rt->rt_expire > timenow) {
b0d623f7 704 RT_UNLOCK(rt);
39236c6e
A
705 ap->aging++;
706 return;
707 }
708
709 if (rt->rt_refcnt > 0) {
6d2010ae 710 /*
39236c6e
A
711 * ARP entry has expired, with outstanding refcnt.
712 * If we're not draining, force ARP query to be
713 * generated next time this entry is used.
6d2010ae 714 */
39037602 715 if (!ap->draining && !ap->probing) {
39236c6e 716 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
0a7de745 717 if (sdl != NULL) {
39236c6e 718 sdl->sdl_alen = 0;
0a7de745 719 }
39236c6e
A
720 la->la_asked = 0;
721 rt->rt_flags &= ~RTF_REJECT;
722 }
6d2010ae 723 RT_UNLOCK(rt);
39037602 724 } else if (!(rt->rt_flags & RTF_STATIC) && !ap->probing) {
b0d623f7 725 /*
39236c6e
A
726 * ARP entry has no outstanding refcnt, and we're either
727 * draining or it has expired; delete it from the routing
728 * table. Safe to drop rt_lock and use rt_key, since holding
b0d623f7
A
729 * rnh_lock here prevents another thread from calling
730 * rt_setgate() on this route.
731 */
732 RT_UNLOCK(rt);
39236c6e
A
733 rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
734 rt_mask(rt), 0, NULL);
735 arpstat.timeouts++;
736 ap->killed++;
737 } else {
738 /* ARP entry is static; let it linger */
739 RT_UNLOCK(rt);
91447636 740 }
91447636
A
741}
742
d1ecb069 743void
39236c6e 744in_arpdrain(void *arg)
91447636 745{
39236c6e 746#pragma unused(arg)
0c530ab8 747 struct llinfo_arp *la, *ola;
39236c6e
A
748 struct arptf_arg farg;
749
0a7de745 750 if (arp_verbose) {
39236c6e 751 log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
0a7de745 752 }
91447636 753
b0d623f7 754 lck_mtx_lock(rnh_lock);
0c530ab8 755 la = llinfo_arp.lh_first;
0a7de745 756 bzero(&farg, sizeof(farg));
39037602 757 farg.draining = TRUE;
39236c6e 758 while ((ola = la) != NULL) {
91447636 759 la = la->la_le.le_next;
39236c6e
A
760 arptfree(ola, &farg);
761 }
762 if (arp_verbose) {
39037602
A
763 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
764 "%u pkts held (%u bytes)\n", __func__, farg.found,
765 farg.aging, farg.sticky, farg.killed, farg.qlen,
766 farg.qsize);
91447636 767 }
b0d623f7 768 lck_mtx_unlock(rnh_lock);
d1ecb069
A
769}
770
39236c6e
A
771/*
772 * Timeout routine. Age arp_tab entries periodically.
773 */
774static void
39037602 775arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1)
6d2010ae 776{
39037602 777#pragma unused(arg0, arg1)
39236c6e
A
778 struct llinfo_arp *la, *ola;
779 struct timeval atv;
780 struct arptf_arg farg;
6d2010ae 781
39236c6e
A
782 lck_mtx_lock(rnh_lock);
783 la = llinfo_arp.lh_first;
0a7de745 784 bzero(&farg, sizeof(farg));
39236c6e
A
785 while ((ola = la) != NULL) {
786 la = la->la_le.le_next;
787 arptfree(ola, &farg);
788 }
789 if (arp_verbose) {
39037602
A
790 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
791 "%u pkts held (%u bytes)\n", __func__, farg.found,
792 farg.aging, farg.sticky, farg.killed, farg.qlen,
793 farg.qsize);
39236c6e
A
794 }
795 atv.tv_usec = 0;
39037602 796 atv.tv_sec = MAX(arpt_prune, 5);
39236c6e
A
797 /* re-arm the timer if there's work to do */
798 arp_timeout_run = 0;
0a7de745 799 if (farg.aging > 0) {
39236c6e 800 arp_sched_timeout(&atv);
0a7de745 801 } else if (arp_verbose) {
39236c6e 802 log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
0a7de745 803 }
39236c6e 804 lck_mtx_unlock(rnh_lock);
6d2010ae
A
805}
806
d1ecb069 807static void
39236c6e 808arp_sched_timeout(struct timeval *atv)
d1ecb069 809{
5ba3f43e 810 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
39236c6e
A
811
812 if (!arp_timeout_run) {
813 struct timeval tv;
39037602
A
814 uint64_t deadline = 0;
815
816 if (arp_timeout_tcall == NULL) {
817 arp_timeout_tcall =
818 thread_call_allocate(arp_timeout, NULL);
819 VERIFY(arp_timeout_tcall != NULL);
820 }
39236c6e
A
821
822 if (atv == NULL) {
823 tv.tv_usec = 0;
824 tv.tv_sec = MAX(arpt_prune / 5, 1);
825 atv = &tv;
826 }
827 if (arp_verbose) {
828 log(LOG_DEBUG, "%s: timer scheduled in "
829 "T+%llus.%lluu\n", __func__,
830 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
831 }
832 arp_timeout_run = 1;
39037602
A
833
834 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
835 mach_absolute_time(), &deadline);
836 (void) thread_call_enter_delayed(arp_timeout_tcall, deadline);
837 }
838}
839
840/*
841 * Probe routine.
842 */
843static void
844arp_probe(thread_call_param_t arg0, thread_call_param_t arg1)
845{
846#pragma unused(arg0, arg1)
847 struct llinfo_arp *la, *ola;
848 struct timeval atv;
849 struct arptf_arg farg;
850
851 lck_mtx_lock(rnh_lock);
852 la = llinfo_arp.lh_first;
0a7de745 853 bzero(&farg, sizeof(farg));
39037602
A
854 farg.probing = TRUE;
855 while ((ola = la) != NULL) {
856 la = la->la_le.le_next;
857 arptfree(ola, &farg);
858 }
859 if (arp_verbose) {
860 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u; "
861 "%u pkts held (%u bytes)\n", __func__, farg.found,
862 farg.aging, farg.sticky, farg.killed, farg.qlen,
863 farg.qsize);
864 }
865 atv.tv_usec = 0;
866 atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
867 /* re-arm the probe if there's work to do */
868 arp_probe_run = 0;
0a7de745 869 if (farg.qlen > 0) {
39037602 870 arp_sched_probe(&atv);
0a7de745 871 } else if (arp_verbose) {
39037602 872 log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__);
0a7de745 873 }
39037602
A
874 lck_mtx_unlock(rnh_lock);
875}
876
877static void
878arp_sched_probe(struct timeval *atv)
879{
5ba3f43e 880 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
39037602
A
881
882 if (!arp_probe_run) {
883 struct timeval tv;
884 uint64_t deadline = 0;
885
886 if (arp_probe_tcall == NULL) {
887 arp_probe_tcall =
888 thread_call_allocate(arp_probe, NULL);
889 VERIFY(arp_probe_tcall != NULL);
890 }
891
892 if (atv == NULL) {
893 tv.tv_usec = 0;
894 tv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME);
895 atv = &tv;
896 }
897 if (arp_verbose) {
898 log(LOG_DEBUG, "%s: probe scheduled in "
899 "T+%llus.%lluu\n", __func__,
900 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
901 }
902 arp_probe_run = 1;
903
904 clock_deadline_for_periodic_event(atv->tv_sec * NSEC_PER_SEC,
905 mach_absolute_time(), &deadline);
906 (void) thread_call_enter_delayed(arp_probe_tcall, deadline);
39236c6e 907 }
91447636
A
908}
909
910/*
39236c6e 911 * ifa_rtrequest() callback
91447636
A
912 */
913static void
39236c6e 914arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
91447636 915{
39236c6e 916#pragma unused(sa)
91447636 917 struct sockaddr *gate = rt->rt_gateway;
b0d623f7 918 struct llinfo_arp *la = rt->rt_llinfo;
39236c6e 919 static struct sockaddr_dl null_sdl =
0a7de745 920 { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK };
6d2010ae 921 uint64_t timenow;
39236c6e 922 char buf[MAX_IPv4_STR_LEN];
91447636 923
39236c6e 924 VERIFY(arpinit_done);
5ba3f43e 925 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
b0d623f7 926 RT_LOCK_ASSERT_HELD(rt);
91447636 927
0a7de745 928 if (rt->rt_flags & RTF_GATEWAY) {
91447636 929 return;
0a7de745 930 }
39236c6e 931
6d2010ae 932 timenow = net_uptime();
91447636 933 switch (req) {
91447636
A
934 case RTM_ADD:
935 /*
936 * XXX: If this is a manually added route to interface
937 * such as older version of routed or gated might provide,
938 * restore cloning bit.
939 */
39236c6e 940 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
0a7de745 941 SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) {
91447636 942 rt->rt_flags |= RTF_CLONING;
0a7de745 943 }
39236c6e 944
91447636
A
945 if (rt->rt_flags & RTF_CLONING) {
946 /*
947 * Case 1: This route should come from a route to iface.
948 */
39236c6e 949 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
b0d623f7
A
950 gate = rt->rt_gateway;
951 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
952 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
953 /*
954 * In case we're called before 1.0 sec.
955 * has elapsed.
956 */
6d2010ae 957 rt_setexpire(rt, MAX(timenow, 1));
b0d623f7 958 }
91447636
A
959 break;
960 }
961 /* Announce a new entry if requested. */
b0d623f7 962 if (rt->rt_flags & RTF_ANNOUNCE) {
0a7de745 963 if (la != NULL) {
6d2010ae 964 arp_llreach_use(la); /* Mark use timestamp */
0a7de745 965 }
b0d623f7
A
966 RT_UNLOCK(rt);
967 dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
316670eb 968 SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
b0d623f7 969 RT_LOCK(rt);
39236c6e 970 arpstat.txannounces++;
b0d623f7 971 }
0a7de745 972 /* FALLTHRU */
91447636
A
973 case RTM_RESOLVE:
974 if (gate->sa_family != AF_LINK ||
0a7de745 975 gate->sa_len < sizeof(null_sdl)) {
39236c6e
A
976 arpstat.invalidreqs++;
977 log(LOG_ERR, "%s: route to %s has bad gateway address "
978 "(sa_family %u sa_len %u) on %s\n",
979 __func__, inet_ntop(AF_INET,
980 &SIN(rt_key(rt))->sin_addr.s_addr, buf,
0a7de745 981 sizeof(buf)), gate->sa_family, gate->sa_len,
39236c6e 982 if_name(rt->rt_ifp));
91447636
A
983 break;
984 }
985 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
986 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
39236c6e 987
0a7de745 988 if (la != NULL) {
91447636 989 break; /* This happens on a route change */
0a7de745 990 }
91447636
A
991 /*
992 * Case 2: This route may come from cloning, or a manual route
993 * add with a LL address.
994 */
39236c6e 995 rt->rt_llinfo = la = arp_llinfo_alloc(M_WAITOK);
b0d623f7 996 if (la == NULL) {
39236c6e 997 arpstat.reqnobufs++;
91447636
A
998 break;
999 }
0a7de745
A
1000 rt->rt_llinfo_get_ri = arp_llinfo_get_ri;
1001 rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
1002 rt->rt_llinfo_purge = arp_llinfo_purge;
1003 rt->rt_llinfo_free = arp_llinfo_free;
3e170ce0 1004 rt->rt_llinfo_refresh = arp_llinfo_refresh;
91447636 1005 rt->rt_flags |= RTF_LLINFO;
39236c6e 1006 la->la_rt = rt;
91447636 1007 LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
39236c6e
A
1008 arpstat.inuse++;
1009
1010 /* We have at least one entry; arm the timer if not already */
1011 arp_sched_timeout(NULL);
91447636 1012
91447636
A
1013 /*
1014 * This keeps the multicast addresses from showing up
1015 * in `arp -a' listings as unresolved. It's not actually
6d2010ae
A
1016 * functional. Then the same for broadcast. For IPv4
1017 * link-local address, keep the entry around even after
1018 * it has expired.
91447636
A
1019 */
1020 if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
b0d623f7
A
1021 RT_UNLOCK(rt);
1022 dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
0a7de745 1023 sizeof(struct sockaddr_dl));
b0d623f7 1024 RT_LOCK(rt);
6d2010ae 1025 rt_setexpire(rt, 0);
39236c6e
A
1026 } else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
1027 rt->rt_ifp)) {
1028 struct sockaddr_dl *gate_ll = SDL(gate);
1029 size_t broadcast_len;
b0d623f7 1030 ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
0a7de745 1031 LLADDR(gate_ll), sizeof(gate_ll->sdl_data),
b0d623f7 1032 &broadcast_len);
91447636
A
1033 gate_ll->sdl_alen = broadcast_len;
1034 gate_ll->sdl_family = AF_LINK;
0a7de745 1035 gate_ll->sdl_len = sizeof(struct sockaddr_dl);
593a1d5f 1036 /* In case we're called before 1.0 sec. has elapsed */
6d2010ae 1037 rt_setexpire(rt, MAX(timenow, 1));
39236c6e
A
1038 } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
1039 sin_addr.s_addr))) {
6d2010ae 1040 rt->rt_flags |= RTF_STATIC;
91447636 1041 }
91447636 1042
39236c6e
A
1043 /* Set default maximum number of retries */
1044 la->la_maxtries = arp_maxtries;
1045
6d2010ae
A
1046 /* Become a regular mutex, just in case */
1047 RT_CONVERT_LOCK(rt);
1048 IFA_LOCK_SPIN(rt->rt_ifa);
91447636
A
1049 if (SIN(rt_key(rt))->sin_addr.s_addr ==
1050 (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
6d2010ae
A
1051 IFA_UNLOCK(rt->rt_ifa);
1052 /*
1053 * This test used to be
1054 * if (loif.if_flags & IFF_UP)
1055 * It allowed local traffic to be forced through the
1056 * hardware by configuring the loopback down. However,
1057 * it causes problems during network configuration
1058 * for boards that can't receive packets they send.
1059 * It is now necessary to clear "useloopback" and
1060 * remove the route to force traffic out to the
1061 * hardware.
1062 */
1063 rt_setexpire(rt, 0);
1064 ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)),
1065 SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
d1ecb069 1066 if (useloopback) {
6d2010ae
A
1067 if (rt->rt_ifp != lo_ifp) {
1068 /*
1069 * Purge any link-layer info caching.
1070 */
0a7de745 1071 if (rt->rt_llinfo_purge != NULL) {
6d2010ae 1072 rt->rt_llinfo_purge(rt);
0a7de745 1073 }
6d2010ae
A
1074
1075 /*
1076 * Adjust route ref count for the
1077 * interfaces.
1078 */
1079 if (rt->rt_if_ref_fn != NULL) {
1080 rt->rt_if_ref_fn(lo_ifp, 1);
1081 rt->rt_if_ref_fn(rt->rt_ifp, -1);
1082 }
d1ecb069 1083 }
2d21ac55 1084 rt->rt_ifp = lo_ifp;
39236c6e
A
1085 /*
1086 * If rmx_mtu is not locked, update it
1087 * to the MTU used by the new interface.
1088 */
0a7de745 1089 if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) {
39236c6e 1090 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
0a7de745 1091 }
d1ecb069 1092 }
6d2010ae
A
1093 } else {
1094 IFA_UNLOCK(rt->rt_ifa);
91447636
A
1095 }
1096 break;
1097
1098 case RTM_DELETE:
0a7de745 1099 if (la == NULL) {
91447636 1100 break;
0a7de745 1101 }
b0d623f7
A
1102 /*
1103 * Unchain it but defer the actual freeing until the route
1104 * itself is to be freed. rt->rt_llinfo still points to
1105 * llinfo_arp, and likewise, la->la_rt still points to this
1106 * route entry, except that RTF_LLINFO is now cleared.
1107 */
91447636 1108 LIST_REMOVE(la, la_le);
b0d623f7
A
1109 la->la_le.le_next = NULL;
1110 la->la_le.le_prev = NULL;
39236c6e 1111 arpstat.inuse--;
6d2010ae
A
1112
1113 /*
1114 * Purge any link-layer info caching.
1115 */
0a7de745 1116 if (rt->rt_llinfo_purge != NULL) {
6d2010ae 1117 rt->rt_llinfo_purge(rt);
0a7de745 1118 }
6d2010ae 1119
91447636 1120 rt->rt_flags &= ~RTF_LLINFO;
39037602 1121 (void) arp_llinfo_flushq(la);
91447636
A
1122 }
1123}
1124
1125/*
1126 * convert hardware address to hex string for logging errors.
1127 */
1128static const char *
39236c6e 1129sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen)
91447636 1130{
39236c6e
A
1131 char *buf = orig_buf;
1132 int i;
1133 const u_char *lladdr = (u_char *)(size_t)sdl->sdl_data;
1134 int maxbytes = buflen / 3;
1135
91447636
A
1136 if (maxbytes > sdl->sdl_alen) {
1137 maxbytes = sdl->sdl_alen;
39236c6e 1138 }
91447636
A
1139 *buf = '\0';
1140 for (i = 0; i < maxbytes; i++) {
1141 snprintf(buf, 3, "%02x", lladdr[i]);
1142 buf += 2;
1143 *buf = (i == maxbytes - 1) ? '\0' : ':';
1144 buf++;
1145 }
0a7de745 1146 return orig_buf;
91447636
A
1147}
1148
1149/*
1150 * arp_lookup_route will lookup the route for a given address.
1151 *
b0d623f7
A
1152 * The address must be for a host on a local network on this interface.
1153 * If the returned route is non-NULL, the route is locked and the caller
1154 * is responsible for unlocking it and releasing its reference.
91447636
A
1155 */
1156static errno_t
b0d623f7
A
1157arp_lookup_route(const struct in_addr *addr, int create, int proxy,
1158 route_t *route, unsigned int ifscope)
91447636 1159{
39236c6e 1160 struct sockaddr_inarp sin =
0a7de745 1161 { sizeof(sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
2d21ac55 1162 const char *why = NULL;
0a7de745 1163 errno_t error = 0;
b0d623f7
A
1164 route_t rt;
1165
1166 *route = NULL;
91447636
A
1167
1168 sin.sin_addr.s_addr = addr->s_addr;
1169 sin.sin_other = proxy ? SIN_PROXY : 0;
c910b4d9 1170
6d2010ae
A
1171 /*
1172 * If the destination is a link-local address, don't
1173 * constrain the lookup (don't scope it).
1174 */
0a7de745 1175 if (IN_LINKLOCAL(ntohl(addr->s_addr))) {
6d2010ae 1176 ifscope = IFSCOPE_NONE;
0a7de745 1177 }
6d2010ae 1178
39236c6e 1179 rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope);
0a7de745
A
1180 if (rt == NULL) {
1181 return ENETUNREACH;
1182 }
b0d623f7
A
1183
1184 RT_LOCK(rt);
1185
1186 if (rt->rt_flags & RTF_GATEWAY) {
91447636 1187 why = "host is not on local network";
91447636 1188 error = ENETUNREACH;
b0d623f7 1189 } else if (!(rt->rt_flags & RTF_LLINFO)) {
91447636 1190 why = "could not allocate llinfo";
91447636 1191 error = ENOMEM;
b0d623f7 1192 } else if (rt->rt_gateway->sa_family != AF_LINK) {
91447636 1193 why = "gateway route is not ours";
91447636
A
1194 error = EPROTONOSUPPORT;
1195 }
b0d623f7
A
1196
1197 if (error != 0) {
39236c6e 1198 if (create && (arp_verbose || log_arp_warnings)) {
b0d623f7 1199 char tmp[MAX_IPv4_STR_LEN];
39236c6e
A
1200 log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
1201 __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
0a7de745 1202 sizeof(tmp)), why);
b0d623f7
A
1203 }
1204
1205 /*
1206 * If there are no references to this route, and it is
1207 * a cloned route, and not static, and ARP had created
1208 * the route, then purge it from the routing table as
1209 * it is probably bogus.
1210 */
1211 if (rt->rt_refcnt == 1 &&
1212 (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
1213 RTF_WASCLONED) {
1214 /*
1215 * Prevent another thread from modiying rt_key,
1216 * rt_gateway via rt_setgate() after rt_lock is
1217 * dropped by marking the route as defunct.
1218 */
1219 rt->rt_flags |= RTF_CONDEMNED;
1220 RT_UNLOCK(rt);
1221 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
39236c6e 1222 rt_mask(rt), rt->rt_flags, NULL);
b0d623f7
A
1223 rtfree(rt);
1224 } else {
1225 RT_REMREF_LOCKED(rt);
1226 RT_UNLOCK(rt);
1227 }
0a7de745 1228 return error;
91447636 1229 }
91447636 1230
b0d623f7
A
1231 /*
1232 * Caller releases reference and does RT_UNLOCK(rt).
1233 */
1234 *route = rt;
0a7de745 1235 return 0;
b0d623f7 1236}
91447636 1237
5ba3f43e 1238boolean_t
0a7de745 1239arp_is_entry_probing(route_t p_route)
5ba3f43e
A
1240{
1241 struct llinfo_arp *llinfo = p_route->rt_llinfo;
1242
1243 if (llinfo != NULL &&
1244 llinfo->la_llreach != NULL &&
0a7de745
A
1245 llinfo->la_llreach->lr_probes != 0) {
1246 return TRUE;
1247 }
5ba3f43e 1248
0a7de745 1249 return FALSE;
5ba3f43e
A
1250}
1251
b0d623f7
A
1252/*
1253 * This is the ARP pre-output routine; care must be taken to ensure that
1254 * the "hint" route never gets freed via rtfree(), since the caller may
1255 * have stored it inside a struct route with a reference held for that
1256 * placeholder.
1257 */
91447636 1258errno_t
b0d623f7 1259arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
0a7de745 1260 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
b0d623f7 1261 mbuf_t packet)
91447636 1262{
0a7de745
A
1263 route_t route = NULL; /* output route */
1264 errno_t result = 0;
39236c6e
A
1265 struct sockaddr_dl *gateway;
1266 struct llinfo_arp *llinfo = NULL;
39037602 1267 boolean_t usable, probing = FALSE;
6d2010ae 1268 uint64_t timenow;
fe8ab488
A
1269 struct if_llreach *lr;
1270 struct ifaddr *rt_ifa;
1271 struct sockaddr *sa;
1272 uint32_t rtflags;
1273 struct sockaddr_dl sdl;
5ba3f43e 1274 boolean_t send_probe_notif = FALSE;
cb323159 1275 boolean_t enqueued = FALSE;
b0d623f7 1276
0a7de745
A
1277 if (ifp == NULL || net_dest == NULL) {
1278 return EINVAL;
1279 }
39037602 1280
0a7de745
A
1281 if (net_dest->sin_family != AF_INET) {
1282 return EAFNOSUPPORT;
1283 }
b0d623f7 1284
0a7de745
A
1285 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) {
1286 return ENETDOWN;
1287 }
b0d623f7 1288
91447636
A
1289 /*
1290 * If we were given a route, verify the route and grab the gateway
1291 */
b0d623f7
A
1292 if (hint != NULL) {
1293 /*
1294 * Callee holds a reference on the route and returns
1295 * with the route entry locked, upon success.
1296 */
316670eb 1297 result = route_to_gwroute((const struct sockaddr *)
b0d623f7 1298 net_dest, hint, &route);
0a7de745
A
1299 if (result != 0) {
1300 return result;
1301 }
1302 if (route != NULL) {
b0d623f7 1303 RT_LOCK_ASSERT_HELD(route);
0a7de745 1304 }
91447636 1305 }
b0d623f7 1306
39037602
A
1307 if ((packet != NULL && (packet->m_flags & M_BCAST)) ||
1308 in_broadcast(net_dest->sin_addr, ifp)) {
39236c6e 1309 size_t broadcast_len;
91447636 1310 bzero(ll_dest, ll_dest_len);
b0d623f7
A
1311 result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1312 ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1313 &broadcast_len);
1314 if (result == 0) {
1315 ll_dest->sdl_alen = broadcast_len;
1316 ll_dest->sdl_family = AF_LINK;
0a7de745 1317 ll_dest->sdl_len = sizeof(struct sockaddr_dl);
91447636 1318 }
b0d623f7 1319 goto release;
91447636 1320 }
39037602
A
1321 if ((packet != NULL && (packet->m_flags & M_MCAST)) ||
1322 ((ifp->if_flags & IFF_MULTICAST) &&
1323 IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) {
0a7de745 1324 if (route != NULL) {
b0d623f7 1325 RT_UNLOCK(route);
0a7de745 1326 }
b0d623f7 1327 result = dlil_resolve_multi(ifp,
39236c6e
A
1328 (const struct sockaddr *)net_dest,
1329 (struct sockaddr *)ll_dest, ll_dest_len);
0a7de745 1330 if (route != NULL) {
b0d623f7 1331 RT_LOCK(route);
0a7de745 1332 }
b0d623f7 1333 goto release;
91447636 1334 }
b0d623f7 1335
91447636
A
1336 /*
1337 * If we didn't find a route, or the route doesn't have
1338 * link layer information, trigger the creation of the
1339 * route and link layer information.
1340 */
b0d623f7
A
1341 if (route == NULL || route->rt_llinfo == NULL) {
1342 /* Clean up now while we can */
1343 if (route != NULL) {
1344 if (route == hint) {
1345 RT_REMREF_LOCKED(route);
1346 RT_UNLOCK(route);
1347 } else {
1348 RT_UNLOCK(route);
1349 rtfree(route);
1350 }
1351 }
1352 /*
1353 * Callee holds a reference on the route and returns
1354 * with the route entry locked, upon success.
1355 */
c910b4d9
A
1356 result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1357 ifp->if_index);
0a7de745 1358 if (result == 0) {
b0d623f7 1359 RT_LOCK_ASSERT_HELD(route);
0a7de745 1360 }
b0d623f7
A
1361 }
1362
6d2010ae 1363 if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
b0d623f7 1364 /* In case result is 0 but no route, return an error */
0a7de745 1365 if (result == 0) {
b0d623f7 1366 result = EHOSTUNREACH;
0a7de745 1367 }
b0d623f7 1368
39236c6e
A
1369 if (route != NULL && route->rt_llinfo == NULL) {
1370 char tmp[MAX_IPv4_STR_LEN];
1371 log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1372 __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
0a7de745 1373 tmp, sizeof(tmp)));
39236c6e 1374 }
b0d623f7 1375 goto release;
91447636 1376 }
b0d623f7 1377
91447636
A
1378 /*
1379 * Now that we have the right route, is it filled in?
1380 */
1381 gateway = SDL(route->rt_gateway);
6d2010ae
A
1382 timenow = net_uptime();
1383 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1384 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
39037602
A
1385
1386 usable = ((route->rt_expire == 0 || route->rt_expire > timenow) &&
1387 gateway != NULL && gateway->sdl_family == AF_LINK &&
1388 gateway->sdl_alen != 0);
1389
1390 if (usable) {
1391 boolean_t unreachable = !arp_llreach_reachable(llinfo);
1392
1393 /* Entry is usable, so fill in info for caller */
91447636 1394 bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
b0d623f7 1395 result = 0;
0a7de745 1396 arp_llreach_use(llinfo); /* Mark use timestamp */
39037602 1397
fe8ab488 1398 lr = llinfo->la_llreach;
0a7de745 1399 if (lr == NULL) {
fe8ab488 1400 goto release;
0a7de745 1401 }
fe8ab488 1402 rt_ifa = route->rt_ifa;
39037602 1403
fe8ab488
A
1404 /* Become a regular mutex, just in case */
1405 RT_CONVERT_LOCK(route);
1406 IFLR_LOCK_SPIN(lr);
39037602
A
1407
1408 if ((unreachable || (llinfo->la_flags & LLINFO_PROBING)) &&
1409 lr->lr_probes < arp_unicast_lim) {
1410 /*
1411 * Thus mark the entry with la_probeexp deadline to
1412 * trigger the probe timer to be scheduled (if not
1413 * already). This gets cleared the moment we get
1414 * an ARP reply.
1415 */
1416 probing = TRUE;
1417 if (lr->lr_probes == 0) {
1418 llinfo->la_probeexp = (timenow + arpt_probe);
1419 llinfo->la_flags |= LLINFO_PROBING;
5ba3f43e
A
1420 /*
1421 * Provide notification that ARP unicast
1422 * probing has started.
1423 * We only do it for the first unicast probe
1424 * attempt.
1425 */
1426 send_probe_notif = TRUE;
39037602
A
1427 }
1428
1429 /*
1430 * Start the unicast probe and anticipate a reply;
1431 * afterwards, return existing entry to caller and
1432 * let it be used anyway. If peer is non-existent
1433 * we'll broadcast ARP next time around.
1434 */
fe8ab488 1435 lr->lr_probes++;
0a7de745 1436 bzero(&sdl, sizeof(sdl));
fe8ab488
A
1437 sdl.sdl_alen = ifp->if_addrlen;
1438 bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1439 ifp->if_addrlen);
1440 IFLR_UNLOCK(lr);
1441 IFA_LOCK_SPIN(rt_ifa);
1442 IFA_ADDREF_LOCKED(rt_ifa);
1443 sa = rt_ifa->ifa_addr;
1444 IFA_UNLOCK(rt_ifa);
1445 rtflags = route->rt_flags;
1446 RT_UNLOCK(route);
1447 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1448 (const struct sockaddr_dl *)&sdl,
1449 (const struct sockaddr *)net_dest, rtflags);
1450 IFA_REMREF(rt_ifa);
1451 RT_LOCK(route);
39037602
A
1452 goto release;
1453 } else {
fe8ab488 1454 IFLR_UNLOCK(lr);
39037602
A
1455 if (!unreachable &&
1456 !(llinfo->la_flags & LLINFO_PROBING)) {
1457 /*
1458 * Normal case where peer is still reachable,
1459 * we're not probing and if_addrlen is anything
1460 * but IF_LLREACH_MAXLEN.
1461 */
1462 goto release;
1463 }
1464 }
91447636 1465 }
b0d623f7
A
1466
1467 if (ifp->if_flags & IFF_NOARP) {
1468 result = ENOTSUP;
1469 goto release;
1470 }
1471
91447636 1472 /*
39037602
A
1473 * Route wasn't complete/valid; we need to send out ARP request.
1474 * If we've exceeded the limit of la_holdq, drop from the head
1475 * of queue and add this packet to the tail. If we end up with
1476 * RTF_REJECT below, we'll dequeue this from tail and have the
1477 * caller free the packet instead. It's safe to do that since
1478 * we still hold the route's rt_lock.
91447636 1479 */
0a7de745 1480 if (packet != NULL) {
cb323159 1481 enqueued = arp_llinfo_addq(llinfo, packet);
0a7de745 1482 } else {
5ba3f43e 1483 llinfo->la_prbreq_cnt++;
0a7de745 1484 }
39037602
A
1485 /*
1486 * Regardless of permanent vs. expirable entry, we need to
1487 * avoid having packets sit in la_holdq forever; thus mark the
1488 * entry with la_probeexp deadline to trigger the probe timer
1489 * to be scheduled (if not already). This gets cleared the
1490 * moment we get an ARP reply.
1491 */
1492 probing = TRUE;
5ba3f43e 1493 if ((qlen(&llinfo->la_holdq) + llinfo->la_prbreq_cnt) == 1) {
39037602
A
1494 llinfo->la_probeexp = (timenow + arpt_probe);
1495 llinfo->la_flags |= LLINFO_PROBING;
1496 }
5ba3f43e 1497
6d2010ae 1498 if (route->rt_expire) {
91447636 1499 route->rt_flags &= ~RTF_REJECT;
39236c6e 1500 if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
6d2010ae 1501 rt_setexpire(route, timenow);
39236c6e 1502 if (llinfo->la_asked++ < llinfo->la_maxtries) {
fe8ab488
A
1503 struct kev_msg ev_msg;
1504 struct kev_in_arpfailure in_arpfailure;
1505 boolean_t sendkev = FALSE;
6d2010ae 1506
fe8ab488
A
1507 rt_ifa = route->rt_ifa;
1508 lr = llinfo->la_llreach;
6d2010ae
A
1509 /* Become a regular mutex, just in case */
1510 RT_CONVERT_LOCK(route);
1511 /* Update probe count, if applicable */
39236c6e
A
1512 if (lr != NULL) {
1513 IFLR_LOCK_SPIN(lr);
1514 lr->lr_probes++;
39236c6e 1515 IFLR_UNLOCK(lr);
6d2010ae 1516 }
fe8ab488
A
1517 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1518 route->rt_flags & RTF_ROUTER &&
1519 llinfo->la_asked > 1) {
1520 sendkev = TRUE;
1521 llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1522 }
6d2010ae
A
1523 IFA_LOCK_SPIN(rt_ifa);
1524 IFA_ADDREF_LOCKED(rt_ifa);
1525 sa = rt_ifa->ifa_addr;
1526 IFA_UNLOCK(rt_ifa);
39236c6e 1527 arp_llreach_use(llinfo); /* Mark use tstamp */
316670eb 1528 rtflags = route->rt_flags;
b0d623f7 1529 RT_UNLOCK(route);
39236c6e 1530 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
fe8ab488
A
1531 NULL, (const struct sockaddr *)net_dest,
1532 rtflags);
6d2010ae 1533 IFA_REMREF(rt_ifa);
fe8ab488
A
1534 if (sendkev) {
1535 bzero(&ev_msg, sizeof(ev_msg));
39037602 1536 bzero(&in_arpfailure,
fe8ab488
A
1537 sizeof(in_arpfailure));
1538 in_arpfailure.link_data.if_family =
1539 ifp->if_family;
1540 in_arpfailure.link_data.if_unit =
1541 ifp->if_unit;
1542 strlcpy(in_arpfailure.link_data.if_name,
1543 ifp->if_name, IFNAMSIZ);
1544 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1545 ev_msg.kev_class = KEV_NETWORK_CLASS;
1546 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1547 ev_msg.event_code =
1548 KEV_INET_ARPRTRFAILURE;
1549 ev_msg.dv[0].data_ptr = &in_arpfailure;
1550 ev_msg.dv[0].data_length =
1551 sizeof(struct
0a7de745 1552 kev_in_arpfailure);
39037602 1553 dlil_post_complete_msg(NULL, &ev_msg);
fe8ab488 1554 }
b0d623f7 1555 result = EJUSTRETURN;
fe8ab488 1556 RT_LOCK(route);
b0d623f7
A
1557 goto release;
1558 } else {
91447636 1559 route->rt_flags |= RTF_REJECT;
39236c6e
A
1560 rt_setexpire(route,
1561 route->rt_expire + arpt_down);
91447636 1562 llinfo->la_asked = 0;
6d2010ae 1563 /*
39037602
A
1564 * Remove the packet that was just added above;
1565 * don't free it since we're not returning
1566 * EJUSTRETURN. The caller will handle the
1567 * freeing. Since we haven't dropped rt_lock
1568 * from the time of _addq() above, this packet
1569 * must be at the tail.
6d2010ae 1570 */
cb323159
A
1571 if (packet != NULL && enqueued) {
1572 classq_pkt_t pkt =
1573 CLASSQ_PKT_INITIALIZER(pkt);
1574
1575 _getq_tail(&llinfo->la_holdq, &pkt);
39037602 1576 atomic_add_32(&arpstat.held, -1);
cb323159 1577 VERIFY(pkt.cp_mbuf == packet);
39037602 1578 }
b0d623f7 1579 result = EHOSTUNREACH;
5ba3f43e
A
1580 /*
1581 * Enqueue work item to invoke callback for this route entry
1582 */
1583 route_event_enqueue_nwk_wq_entry(route, NULL,
1584 ROUTE_LLENTRY_UNREACH, NULL, TRUE);
b0d623f7 1585 goto release;
91447636
A
1586 }
1587 }
1588 }
b0d623f7 1589
cb323159 1590 /* The packet is now held inside la_holdq or dropped */
b0d623f7 1591 result = EJUSTRETURN;
cb323159
A
1592 if (packet != NULL && !enqueued) {
1593 mbuf_free(packet);
1594 packet = NULL;
1595 }
b0d623f7
A
1596
1597release:
0a7de745 1598 if (result == EHOSTUNREACH) {
39037602 1599 atomic_add_32(&arpstat.dropped, 1);
0a7de745 1600 }
39236c6e 1601
b0d623f7 1602 if (route != NULL) {
5ba3f43e
A
1603 if (send_probe_notif) {
1604 route_event_enqueue_nwk_wq_entry(route, NULL,
1605 ROUTE_LLENTRY_PROBED, NULL, TRUE);
1606
1607 if (route->rt_flags & RTF_ROUTER) {
1608 struct radix_node_head *rnh = NULL;
1609 struct route_event rt_ev;
1610 route_event_init(&rt_ev, route, NULL, ROUTE_LLENTRY_PROBED);
1611 /*
1612 * We already have a reference on rt. The function
1613 * frees it before returning.
1614 */
1615 RT_UNLOCK(route);
1616 lck_mtx_lock(rnh_lock);
1617 rnh = rt_tables[AF_INET];
1618
0a7de745 1619 if (rnh != NULL) {
5ba3f43e
A
1620 (void) rnh->rnh_walktree(rnh,
1621 route_event_walktree, (void *)&rt_ev);
0a7de745 1622 }
5ba3f43e
A
1623 lck_mtx_unlock(rnh_lock);
1624 RT_LOCK(route);
1625 }
1626 }
1627
b0d623f7
A
1628 if (route == hint) {
1629 RT_REMREF_LOCKED(route);
1630 RT_UNLOCK(route);
1631 } else {
1632 RT_UNLOCK(route);
1633 rtfree(route);
1634 }
1635 }
39037602
A
1636 if (probing) {
1637 /* Do this after we drop rt_lock to preserve ordering */
1638 lck_mtx_lock(rnh_lock);
1639 arp_sched_probe(NULL);
1640 lck_mtx_unlock(rnh_lock);
1641 }
0a7de745 1642 return result;
91447636
A
1643}
1644
1645errno_t
39236c6e
A
1646arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1647 const struct sockaddr_dl *sender_hw, const struct sockaddr_in *sender_ip,
1648 const struct sockaddr_in *target_ip)
91447636 1649{
39236c6e 1650 char ipv4str[MAX_IPv4_STR_LEN];
b0d623f7
A
1651 struct sockaddr_dl proxied;
1652 struct sockaddr_dl *gateway, *target_hw = NULL;
1653 struct ifaddr *ifa;
91447636
A
1654 struct in_ifaddr *ia;
1655 struct in_ifaddr *best_ia = NULL;
6d2010ae 1656 struct sockaddr_in best_ia_sin;
0a7de745 1657 route_t route = NULL;
39236c6e 1658 char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
91447636 1659 struct llinfo_arp *llinfo;
0a7de745 1660 errno_t error;
2d21ac55 1661 int created_announcement = 0;
b7266188 1662 int bridged = 0, is_bridge = 0;
5ba3f43e 1663 uint32_t rt_evcode = 0;
6d2010ae 1664
39037602
A
1665 /*
1666 * Here and other places within this routine where we don't hold
1667 * rnh_lock, trade accuracy for speed for the common scenarios
1668 * and avoid the use of atomic updates.
1669 */
39236c6e
A
1670 arpstat.received++;
1671
91447636 1672 /* Do not respond to requests for 0.0.0.0 */
0a7de745 1673 if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) {
b0d623f7 1674 goto done;
0a7de745 1675 }
6d2010ae 1676
0a7de745 1677 if (ifp->if_bridge) {
b7266188 1678 bridged = 1;
0a7de745
A
1679 }
1680 if (ifp->if_type == IFT_BRIDGE) {
b7266188 1681 is_bridge = 1;
0a7de745 1682 }
b0d623f7 1683
0a7de745 1684 if (arpop == ARPOP_REPLY) {
39236c6e 1685 arpstat.rxreplies++;
0a7de745 1686 }
39236c6e 1687
91447636
A
1688 /*
1689 * Determine if this ARP is for us
1690 */
b0d623f7
A
1691 lck_rw_lock_shared(in_ifaddr_rwlock);
1692 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
6d2010ae 1693 IFA_LOCK_SPIN(&ia->ia_ifa);
cb323159 1694 if (ia->ia_ifp == ifp &&
b0d623f7 1695 ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
6d2010ae
A
1696 best_ia = ia;
1697 best_ia_sin = best_ia->ia_addr;
1698 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1699 IFA_UNLOCK(&ia->ia_ifa);
1700 lck_rw_done(in_ifaddr_rwlock);
1701 goto match;
91447636 1702 }
6d2010ae 1703 IFA_UNLOCK(&ia->ia_ifa);
91447636 1704 }
b0d623f7
A
1705
1706 TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
6d2010ae 1707 IFA_LOCK_SPIN(&ia->ia_ifa);
cb323159 1708 if (ia->ia_ifp == ifp &&
b0d623f7 1709 ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
6d2010ae
A
1710 best_ia = ia;
1711 best_ia_sin = best_ia->ia_addr;
1712 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1713 IFA_UNLOCK(&ia->ia_ifa);
1714 lck_rw_done(in_ifaddr_rwlock);
1715 goto match;
b7266188 1716 }
6d2010ae 1717 IFA_UNLOCK(&ia->ia_ifa);
b7266188
A
1718 }
1719
0a7de745
A
1720#define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
1721 (ia->ia_ifp->if_bridge == ifp->if_softc && \
39236c6e 1722 bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
b7266188
A
1723 addr == ia->ia_addr.sin_addr.s_addr)
1724 /*
1725 * Check the case when bridge shares its MAC address with
1726 * some of its children, so packets are claimed by bridge
1727 * itself (bridge_input() does it first), but they are really
1728 * meant to be destined to the bridge member.
1729 */
1730 if (is_bridge) {
6d2010ae
A
1731 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1732 ia_hash) {
1733 IFA_LOCK_SPIN(&ia->ia_ifa);
1734 if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1735 ifp, ia)) {
b7266188
A
1736 ifp = ia->ia_ifp;
1737 best_ia = ia;
6d2010ae
A
1738 best_ia_sin = best_ia->ia_addr;
1739 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1740 IFA_UNLOCK(&ia->ia_ifa);
b7266188
A
1741 lck_rw_done(in_ifaddr_rwlock);
1742 goto match;
1743 }
6d2010ae 1744 IFA_UNLOCK(&ia->ia_ifa);
b0d623f7 1745 }
91447636 1746 }
39236c6e 1747#undef BDG_MEMBER_MATCHES_ARP
b0d623f7
A
1748 lck_rw_done(in_ifaddr_rwlock);
1749
1750 /*
1751 * No match, use the first inet address on the receive interface
1752 * as a dummy address for the rest of the function; we may be
1753 * proxying for another address.
1754 */
1755 ifnet_lock_shared(ifp);
1756 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
6d2010ae
A
1757 IFA_LOCK_SPIN(ifa);
1758 if (ifa->ifa_addr->sa_family != AF_INET) {
1759 IFA_UNLOCK(ifa);
b0d623f7 1760 continue;
6d2010ae 1761 }
b0d623f7 1762 best_ia = (struct in_ifaddr *)ifa;
6d2010ae
A
1763 best_ia_sin = best_ia->ia_addr;
1764 IFA_ADDREF_LOCKED(ifa);
1765 IFA_UNLOCK(ifa);
b7266188
A
1766 ifnet_lock_done(ifp);
1767 goto match;
b0d623f7
A
1768 }
1769 ifnet_lock_done(ifp);
1770
b7266188
A
1771 /*
1772 * If we're not a bridge member, or if we are but there's no
1773 * IPv4 address to use for the interface, drop the packet.
1774 */
0a7de745 1775 if (!bridged || best_ia == NULL) {
b0d623f7 1776 goto done;
0a7de745 1777 }
b0d623f7
A
1778
1779match:
91447636 1780 /* If the packet is from this interface, ignore the packet */
39236c6e 1781 if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
0a7de745 1782 sender_hw->sdl_alen) == 0) {
b0d623f7 1783 goto done;
0a7de745 1784 }
b0d623f7 1785
91447636 1786 /* Check for a conflict */
39236c6e
A
1787 if (!bridged &&
1788 sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1789 struct kev_msg ev_msg;
0a7de745
A
1790 struct kev_in_collision *in_collision;
1791 u_char storage[sizeof(struct kev_in_collision) + MAX_HW_LEN];
39236c6e 1792
0a7de745
A
1793 bzero(&ev_msg, sizeof(struct kev_msg));
1794 bzero(storage, (sizeof(struct kev_in_collision) + MAX_HW_LEN));
39236c6e
A
1795 in_collision = (struct kev_in_collision *)(void *)storage;
1796 log(LOG_ERR, "%s duplicate IP address %s sent from "
1797 "address %s\n", if_name(ifp),
1798 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
0a7de745
A
1799 sizeof(ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1800 sizeof(buf)));
b0d623f7 1801
91447636
A
1802 /* Send a kernel event so anyone can learn of the conflict */
1803 in_collision->link_data.if_family = ifp->if_family;
1804 in_collision->link_data.if_unit = ifp->if_unit;
fe8ab488 1805 strlcpy(&in_collision->link_data.if_name[0],
39236c6e 1806 ifp->if_name, IFNAMSIZ);
91447636 1807 in_collision->ia_ipaddr = sender_ip->sin_addr;
39236c6e
A
1808 in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1809 sender_hw->sdl_alen : MAX_HW_LEN;
1810 bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1811 in_collision->hw_len);
91447636
A
1812 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1813 ev_msg.kev_class = KEV_NETWORK_CLASS;
1814 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1815 ev_msg.event_code = KEV_INET_ARPCOLLISION;
1816 ev_msg.dv[0].data_ptr = in_collision;
39236c6e 1817 ev_msg.dv[0].data_length =
0a7de745 1818 sizeof(struct kev_in_collision) + in_collision->hw_len;
91447636 1819 ev_msg.dv[1].data_length = 0;
39037602
A
1820 dlil_post_complete_msg(NULL, &ev_msg);
1821 atomic_add_32(&arpstat.dupips, 1);
91447636
A
1822 goto respond;
1823 }
b0d623f7 1824
91447636
A
1825 /*
1826 * Look up the routing entry. If it doesn't exist and we are the
c910b4d9 1827 * target, and the sender isn't 0.0.0.0, go ahead and create one.
b0d623f7
A
1828 * Callee holds a reference on the route and returns with the route
1829 * entry locked, upon success.
91447636 1830 */
c910b4d9 1831 error = arp_lookup_route(&sender_ip->sin_addr,
6d2010ae 1832 (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
c910b4d9 1833 sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
b0d623f7 1834
0a7de745 1835 if (error == 0) {
b0d623f7 1836 RT_LOCK_ASSERT_HELD(route);
0a7de745 1837 }
b0d623f7 1838
39236c6e 1839 if (error || route == NULL || route->rt_gateway == NULL) {
0a7de745 1840 if (arpop != ARPOP_REQUEST) {
2d21ac55 1841 goto respond;
0a7de745 1842 }
39236c6e
A
1843
1844 if (arp_sendllconflict && send_conflicting_probes != 0 &&
1845 (ifp->if_eflags & IFEF_ARPLL) &&
1846 IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1847 sender_ip->sin_addr.s_addr == INADDR_ANY) {
91447636 1848 /*
39236c6e
A
1849 * Verify this ARP probe doesn't conflict with
1850 * an IPv4LL we know of on another interface.
91447636 1851 */
b0d623f7
A
1852 if (route != NULL) {
1853 RT_REMREF_LOCKED(route);
1854 RT_UNLOCK(route);
1855 route = NULL;
1856 }
1857 /*
1858 * Callee holds a reference on the route and returns
1859 * with the route entry locked, upon success.
1860 */
c910b4d9
A
1861 error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1862 &route, ifp->if_index);
b0d623f7 1863
39236c6e 1864 if (error != 0 || route == NULL ||
0a7de745 1865 route->rt_gateway == NULL) {
39236c6e 1866 goto respond;
0a7de745 1867 }
b0d623f7 1868
39236c6e
A
1869 RT_LOCK_ASSERT_HELD(route);
1870
1871 gateway = SDL(route->rt_gateway);
1872 if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1873 (gateway->sdl_alen != sender_hw->sdl_alen ||
1874 bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1875 gateway->sdl_alen) != 0)) {
1876 /*
1877 * A node is probing for an IPv4LL we know
1878 * exists on a different interface. We respond
1879 * with a conflicting probe to force the new
1880 * device to pick a different IPv4LL address.
1881 */
1882 if (arp_verbose || log_arp_warnings) {
1883 log(LOG_INFO, "arp: %s on %s sent "
1884 "probe for %s, already on %s\n",
1885 sdl_addr_to_hex(sender_hw, buf,
0a7de745 1886 sizeof(buf)), if_name(ifp),
39236c6e
A
1887 inet_ntop(AF_INET,
1888 &target_ip->sin_addr, ipv4str,
0a7de745 1889 sizeof(ipv4str)),
39236c6e
A
1890 if_name(route->rt_ifp));
1891 log(LOG_INFO, "arp: sending "
1892 "conflicting probe to %s on %s\n",
1893 sdl_addr_to_hex(sender_hw, buf,
0a7de745 1894 sizeof(buf)), if_name(ifp));
6d2010ae 1895 }
39236c6e 1896 /* Mark use timestamp */
0a7de745 1897 if (route->rt_llinfo != NULL) {
39236c6e 1898 arp_llreach_use(route->rt_llinfo);
0a7de745 1899 }
39236c6e
A
1900 /* We're done with the route */
1901 RT_REMREF_LOCKED(route);
1902 RT_UNLOCK(route);
1903 route = NULL;
1904 /*
1905 * Send a conservative unicast "ARP probe".
1906 * This should force the other device to pick
1907 * a new number. This will not force the
1908 * device to pick a new number if the device
1909 * has already assigned that number. This will
1910 * not imply to the device that we own that
1911 * address. The link address is always
1912 * present; it's never freed.
1913 */
1914 ifnet_lock_shared(ifp);
1915 ifa = ifp->if_lladdr;
1916 IFA_ADDREF(ifa);
1917 ifnet_lock_done(ifp);
1918 dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1919 SDL(ifa->ifa_addr),
1920 (const struct sockaddr *)sender_ip,
1921 sender_hw,
1922 (const struct sockaddr *)target_ip);
1923 IFA_REMREF(ifa);
1924 ifa = NULL;
39037602 1925 atomic_add_32(&arpstat.txconflicts, 1);
91447636 1926 }
2d21ac55 1927 goto respond;
39236c6e
A
1928 } else if (keep_announcements != 0 &&
1929 target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1930 /*
1931 * Don't create entry if link-local address and
1932 * link-local is disabled
1933 */
1934 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1935 (ifp->if_eflags & IFEF_ARPLL)) {
b0d623f7
A
1936 if (route != NULL) {
1937 RT_REMREF_LOCKED(route);
1938 RT_UNLOCK(route);
1939 route = NULL;
1940 }
1941 /*
1942 * Callee holds a reference on the route and
1943 * returns with the route entry locked, upon
1944 * success.
1945 */
c910b4d9
A
1946 error = arp_lookup_route(&sender_ip->sin_addr,
1947 1, 0, &route, ifp->if_index);
b0d623f7 1948
0a7de745 1949 if (error == 0) {
b0d623f7 1950 RT_LOCK_ASSERT_HELD(route);
0a7de745 1951 }
b0d623f7 1952
39236c6e 1953 if (error == 0 && route != NULL &&
0a7de745 1954 route->rt_gateway != NULL) {
2d21ac55 1955 created_announcement = 1;
0a7de745 1956 }
2d21ac55 1957 }
0a7de745 1958 if (created_announcement == 0) {
2d21ac55 1959 goto respond;
0a7de745 1960 }
2d21ac55
A
1961 } else {
1962 goto respond;
91447636 1963 }
91447636 1964 }
b0d623f7
A
1965
1966 RT_LOCK_ASSERT_HELD(route);
6d2010ae
A
1967 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1968 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
39236c6e 1969
91447636 1970 gateway = SDL(route->rt_gateway);
b7266188 1971 if (!bridged && route->rt_ifp != ifp) {
39236c6e
A
1972 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1973 !(ifp->if_eflags & IFEF_ARPLL)) {
0a7de745 1974 if (arp_verbose || log_arp_warnings) {
39236c6e
A
1975 log(LOG_ERR, "arp: %s is on %s but got "
1976 "reply from %s on %s\n",
1977 inet_ntop(AF_INET, &sender_ip->sin_addr,
0a7de745 1978 ipv4str, sizeof(ipv4str)),
39236c6e
A
1979 if_name(route->rt_ifp),
1980 sdl_addr_to_hex(sender_hw, buf,
0a7de745
A
1981 sizeof(buf)), if_name(ifp));
1982 }
91447636 1983 goto respond;
39236c6e 1984 } else {
91447636 1985 /* Don't change a permanent address */
0a7de745 1986 if (route->rt_expire == 0) {
91447636 1987 goto respond;
0a7de745 1988 }
b0d623f7
A
1989
1990 /*
1991 * We're about to check and/or change the route's ifp
1992 * and ifa, so do the lock dance: drop rt_lock, hold
1993 * rnh_lock and re-hold rt_lock to avoid violating the
1994 * lock ordering. We have an extra reference on the
1995 * route, so it won't go away while we do this.
1996 */
1997 RT_UNLOCK(route);
1998 lck_mtx_lock(rnh_lock);
1999 RT_LOCK(route);
91447636 2000 /*
b0d623f7
A
2001 * Don't change the cloned route away from the
2002 * parent's interface if the address did resolve
2003 * or if the route is defunct. rt_ifp on both
2004 * the parent and the clone can now be freely
2005 * accessed now that we have acquired rnh_lock.
91447636 2006 */
b0d623f7 2007 gateway = SDL(route->rt_gateway);
39236c6e
A
2008 if ((gateway->sdl_alen != 0 &&
2009 route->rt_parent != NULL &&
b0d623f7
A
2010 route->rt_parent->rt_ifp == route->rt_ifp) ||
2011 (route->rt_flags & RTF_CONDEMNED)) {
2012 RT_REMREF_LOCKED(route);
2013 RT_UNLOCK(route);
2014 route = NULL;
2015 lck_mtx_unlock(rnh_lock);
91447636
A
2016 goto respond;
2017 }
6d2010ae
A
2018 if (route->rt_ifp != ifp) {
2019 /*
2020 * Purge any link-layer info caching.
2021 */
0a7de745 2022 if (route->rt_llinfo_purge != NULL) {
6d2010ae 2023 route->rt_llinfo_purge(route);
0a7de745 2024 }
6d2010ae
A
2025
2026 /* Adjust route ref count for the interfaces */
2027 if (route->rt_if_ref_fn != NULL) {
2028 route->rt_if_ref_fn(ifp, 1);
2029 route->rt_if_ref_fn(route->rt_ifp, -1);
2030 }
d1ecb069 2031 }
91447636
A
2032 /* Change the interface when the existing route is on */
2033 route->rt_ifp = ifp;
39236c6e
A
2034 /*
2035 * If rmx_mtu is not locked, update it
2036 * to the MTU used by the new interface.
2037 */
d9a64523 2038 if (!(route->rt_rmx.rmx_locks & RTV_MTU)) {
39236c6e 2039 route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
d9a64523
A
2040 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) {
2041 route->rt_rmx.rmx_mtu = IN6_LINKMTU(route->rt_ifp);
2042 /* Further adjust the size for CLAT46 expansion */
2043 route->rt_rmx.rmx_mtu -= CLAT46_HDR_EXPANSION_OVERHD;
2044 }
2045 }
39236c6e 2046
91447636
A
2047 rtsetifa(route, &best_ia->ia_ifa);
2048 gateway->sdl_index = ifp->if_index;
b0d623f7
A
2049 RT_UNLOCK(route);
2050 lck_mtx_unlock(rnh_lock);
2051 RT_LOCK(route);
2052 /* Don't bother if the route is down */
0a7de745 2053 if (!(route->rt_flags & RTF_UP)) {
b0d623f7 2054 goto respond;
0a7de745 2055 }
b0d623f7
A
2056 /* Refresh gateway pointer */
2057 gateway = SDL(route->rt_gateway);
91447636 2058 }
b0d623f7 2059 RT_LOCK_ASSERT_HELD(route);
91447636 2060 }
b0d623f7 2061
39236c6e
A
2062 if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
2063 CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
2064 if (route->rt_expire != 0 &&
2065 (arp_verbose || log_arp_warnings)) {
91447636 2066 char buf2[3 * MAX_HW_LEN];
39236c6e 2067 log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
2d21ac55 2068 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
0a7de745
A
2069 sizeof(ipv4str)),
2070 sdl_addr_to_hex(gateway, buf, sizeof(buf)),
2071 sdl_addr_to_hex(sender_hw, buf2, sizeof(buf2)),
39236c6e
A
2072 if_name(ifp));
2073 } else if (route->rt_expire == 0) {
2074 if (arp_verbose || log_arp_warnings) {
2d21ac55 2075 log(LOG_ERR, "arp: %s attempts to modify "
39236c6e 2076 "permanent entry for %s on %s\n",
2d21ac55 2077 sdl_addr_to_hex(sender_hw, buf,
0a7de745 2078 sizeof(buf)),
2d21ac55 2079 inet_ntop(AF_INET, &sender_ip->sin_addr,
0a7de745 2080 ipv4str, sizeof(ipv4str)),
39236c6e 2081 if_name(ifp));
2d21ac55 2082 }
91447636
A
2083 goto respond;
2084 }
2085 }
b0d623f7 2086
91447636
A
2087 /* Copy the sender hardware address in to the route's gateway address */
2088 gateway->sdl_alen = sender_hw->sdl_alen;
2089 bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
b0d623f7 2090
91447636 2091 /* Update the expire time for the route and clear the reject flag */
0a7de745 2092 if (route->rt_expire != 0) {
39236c6e 2093 rt_setexpire(route, net_uptime() + arpt_keep);
0a7de745 2094 }
91447636 2095 route->rt_flags &= ~RTF_REJECT;
b0d623f7 2096
6d2010ae
A
2097 /* cache the gateway (sender HW) address */
2098 arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
5ba3f43e 2099 (arpop == ARPOP_REPLY), &rt_evcode);
6d2010ae 2100
b0d623f7 2101 llinfo = route->rt_llinfo;
fe8ab488
A
2102 /* send a notification that the route is back up */
2103 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
39037602 2104 route->rt_flags & RTF_ROUTER &&
fe8ab488
A
2105 llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
2106 struct kev_msg ev_msg;
3e170ce0 2107 struct kev_in_arpalive in_arpalive;
fe8ab488
A
2108
2109 llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
2110 RT_UNLOCK(route);
2111 bzero(&ev_msg, sizeof(ev_msg));
2112 bzero(&in_arpalive, sizeof(in_arpalive));
2113 in_arpalive.link_data.if_family = ifp->if_family;
2114 in_arpalive.link_data.if_unit = ifp->if_unit;
2115 strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
2116 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2117 ev_msg.kev_class = KEV_NETWORK_CLASS;
2118 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
2119 ev_msg.event_code = KEV_INET_ARPRTRALIVE;
2120 ev_msg.dv[0].data_ptr = &in_arpalive;
39037602
A
2121 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
2122 dlil_post_complete_msg(NULL, &ev_msg);
fe8ab488
A
2123 RT_LOCK(route);
2124 }
39037602 2125 /* Update the llinfo, send out all queued packets at once */
91447636 2126 llinfo->la_asked = 0;
39037602 2127 llinfo->la_flags &= ~LLINFO_PROBING;
5ba3f43e
A
2128 llinfo->la_prbreq_cnt = 0;
2129
2130 if (rt_evcode) {
2131 /*
2132 * Enqueue work item to invoke callback for this route entry
2133 */
2134 route_event_enqueue_nwk_wq_entry(route, NULL, rt_evcode, NULL, TRUE);
2135
2136 if (route->rt_flags & RTF_ROUTER) {
2137 struct radix_node_head *rnh = NULL;
2138 struct route_event rt_ev;
2139 route_event_init(&rt_ev, route, NULL, rt_evcode);
2140 /*
2141 * We already have a reference on rt. The function
2142 * frees it before returning.
2143 */
2144 RT_UNLOCK(route);
2145 lck_mtx_lock(rnh_lock);
2146 rnh = rt_tables[AF_INET];
2147
0a7de745 2148 if (rnh != NULL) {
5ba3f43e
A
2149 (void) rnh->rnh_walktree(rnh, route_event_walktree,
2150 (void *)&rt_ev);
0a7de745 2151 }
5ba3f43e
A
2152 lck_mtx_unlock(rnh_lock);
2153 RT_LOCK(route);
2154 }
2155 }
2156
39037602
A
2157 if (!qempty(&llinfo->la_holdq)) {
2158 uint32_t held;
cb323159
A
2159 struct mbuf *m0;
2160 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
2161
2162 _getq_all(&llinfo->la_holdq, &pkt, NULL, &held, NULL);
2163 m0 = pkt.cp_mbuf;
39037602
A
2164 if (arp_verbose) {
2165 log(LOG_DEBUG, "%s: sending %u held packets\n",
2166 __func__, held);
2167 }
2168 atomic_add_32(&arpstat.held, -held);
2169 VERIFY(qempty(&llinfo->la_holdq));
b0d623f7 2170 RT_UNLOCK(route);
39236c6e
A
2171 dlil_output(ifp, PF_INET, m0, (caddr_t)route,
2172 rt_key(route), 0, NULL);
b0d623f7
A
2173 RT_REMREF(route);
2174 route = NULL;
91447636 2175 }
b0d623f7 2176
91447636 2177respond:
b0d623f7 2178 if (route != NULL) {
6d2010ae 2179 /* Mark use timestamp if we're going to send a reply */
0a7de745 2180 if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) {
6d2010ae 2181 arp_llreach_use(route->rt_llinfo);
0a7de745 2182 }
b0d623f7
A
2183 RT_REMREF_LOCKED(route);
2184 RT_UNLOCK(route);
2185 route = NULL;
91447636 2186 }
b0d623f7 2187
0a7de745 2188 if (arpop != ARPOP_REQUEST) {
b0d623f7 2189 goto done;
0a7de745 2190 }
b0d623f7 2191
39037602 2192 /* See comments at the beginning of this routine */
39236c6e
A
2193 arpstat.rxrequests++;
2194
91447636 2195 /* If we are not the target, check if we should proxy */
6d2010ae 2196 if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
b0d623f7
A
2197 /*
2198 * Find a proxy route; callee holds a reference on the
2199 * route and returns with the route entry locked, upon
2200 * success.
2201 */
c910b4d9
A
2202 error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
2203 &route, ifp->if_index);
b0d623f7
A
2204
2205 if (error == 0) {
2206 RT_LOCK_ASSERT_HELD(route);
b7266188
A
2207 /*
2208 * Return proxied ARP replies only on the interface
2209 * or bridge cluster where this network resides.
2210 * Otherwise we may conflict with the host we are
2211 * proxying for.
2212 */
2213 if (route->rt_ifp != ifp &&
39236c6e
A
2214 (route->rt_ifp->if_bridge != ifp->if_bridge ||
2215 ifp->if_bridge == NULL)) {
2216 RT_REMREF_LOCKED(route);
2217 RT_UNLOCK(route);
2218 goto done;
2219 }
b0d623f7
A
2220 proxied = *SDL(route->rt_gateway);
2221 target_hw = &proxied;
2222 } else {
2223 /*
2224 * We don't have a route entry indicating we should
2225 * use proxy. If we aren't supposed to proxy all,
2226 * we are done.
2227 */
0a7de745 2228 if (!arp_proxyall) {
b0d623f7 2229 goto done;
0a7de745 2230 }
b0d623f7
A
2231
2232 /*
2233 * See if we have a route to the target ip before
2234 * we proxy it.
2235 */
2236 route = rtalloc1_scoped((struct sockaddr *)
2237 (size_t)target_ip, 0, 0, ifp->if_index);
0a7de745 2238 if (!route) {
b0d623f7 2239 goto done;
0a7de745 2240 }
b0d623f7 2241
91447636
A
2242 /*
2243 * Don't proxy for hosts already on the same interface.
2244 */
b0d623f7 2245 RT_LOCK(route);
91447636 2246 if (route->rt_ifp == ifp) {
b0d623f7
A
2247 RT_UNLOCK(route);
2248 rtfree(route);
2249 goto done;
91447636
A
2250 }
2251 }
6d2010ae 2252 /* Mark use timestamp */
0a7de745 2253 if (route->rt_llinfo != NULL) {
6d2010ae 2254 arp_llreach_use(route->rt_llinfo);
0a7de745 2255 }
b0d623f7
A
2256 RT_REMREF_LOCKED(route);
2257 RT_UNLOCK(route);
91447636 2258 }
b0d623f7
A
2259
2260 dlil_send_arp(ifp, ARPOP_REPLY,
39236c6e
A
2261 target_hw, (const struct sockaddr *)target_ip,
2262 sender_hw, (const struct sockaddr *)sender_ip, 0);
b0d623f7
A
2263
2264done:
0a7de745 2265 if (best_ia != NULL) {
6d2010ae 2266 IFA_REMREF(&best_ia->ia_ifa);
0a7de745
A
2267 }
2268 return 0;
91447636
A
2269}
2270
2271void
6d2010ae 2272arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
91447636 2273{
6d2010ae
A
2274 struct sockaddr *sa;
2275
2276 IFA_LOCK(ifa);
91447636
A
2277 ifa->ifa_rtrequest = arp_rtrequest;
2278 ifa->ifa_flags |= RTF_CLONING;
6d2010ae
A
2279 sa = ifa->ifa_addr;
2280 IFA_UNLOCK(ifa);
316670eb 2281 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
91447636 2282}
39236c6e
A
2283
2284static int
2285arp_getstat SYSCTL_HANDLER_ARGS
2286{
2287#pragma unused(oidp, arg1, arg2)
0a7de745
A
2288 if (req->oldptr == USER_ADDR_NULL) {
2289 req->oldlen = (size_t)sizeof(struct arpstat);
2290 }
39236c6e 2291
0a7de745 2292 return SYSCTL_OUT(req, &arpstat, MIN(sizeof(arpstat), req->oldlen));
39236c6e 2293}