]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/in_arp.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / bsd / netinet / in_arp.c
1 /*
2 * Copyright (c) 2004-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 */
61
62 #include <kern/debug.h>
63 #include <netinet/in_arp.h>
64 #include <sys/types.h>
65 #include <sys/param.h>
66 #include <sys/kernel_types.h>
67 #include <sys/syslog.h>
68 #include <sys/systm.h>
69 #include <sys/time.h>
70 #include <sys/kernel.h>
71 #include <sys/mbuf.h>
72 #include <sys/sysctl.h>
73 #include <sys/mcache.h>
74 #include <sys/protosw.h>
75 #include <string.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/dlil.h>
79 #include <net/if_types.h>
80 #include <net/if_llreach.h>
81 #include <net/route.h>
82
83 #include <netinet/if_ether.h>
84 #include <netinet/in_var.h>
85 #include <kern/zalloc.h>
86
87 #define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen))
88
89 static const size_t MAX_HW_LEN = 10;
90
91 /*
92 * Synchronization notes:
93 *
94 * The global list of ARP entries are stored in llinfo_arp; an entry
95 * gets inserted into the list when the route is created and gets
96 * removed from the list when it is deleted; this is done as part
97 * of RTM_ADD/RTM_RESOLVE/RTM_DELETE in arp_rtrequest().
98 *
99 * Because rnh_lock and rt_lock for the entry are held during those
100 * operations, the same locks (and thus lock ordering) must be used
101 * elsewhere to access the relevant data structure fields:
102 *
103 * la_le.{le_next,le_prev}, la_rt
104 *
105 * - Routing lock (rnh_lock)
106 *
107 * la_hold, la_asked, la_llreach, la_lastused, la_flags
108 *
109 * - Routing entry lock (rt_lock)
110 *
111 * Due to the dependency on rt_lock, llinfo_arp has the same lifetime
112 * as the route entry itself. When a route is deleted (RTM_DELETE),
113 * it is simply removed from the global list but the memory is not
114 * freed until the route itself is freed.
115 */
116 struct llinfo_arp {
117 /*
118 * The following are protected by rnh_lock
119 */
120 LIST_ENTRY(llinfo_arp) la_le;
121 struct rtentry *la_rt;
122 /*
123 * The following are protected by rt_lock
124 */
125 struct mbuf *la_hold; /* last packet until resolved/timeout */
126 struct if_llreach *la_llreach; /* link-layer reachability record */
127 u_int64_t la_lastused; /* last used timestamp */
128 u_int32_t la_asked; /* # of requests sent */
129 u_int32_t la_maxtries; /* retry limit */
130 uint32_t la_flags;
131 #define LLINFO_RTRFAIL_EVTSENT 0x1 /* sent an ARP event */
132 };
133 static LIST_HEAD(, llinfo_arp) llinfo_arp;
134
135 static int arp_timeout_run; /* arp_timeout is scheduled to run */
136 static void arp_timeout(void *);
137 static void arp_sched_timeout(struct timeval *);
138
139 static void arptfree(struct llinfo_arp *, void *);
140 static errno_t arp_lookup_route(const struct in_addr *, int,
141 int, route_t *, unsigned int);
142 static int arp_getstat SYSCTL_HANDLER_ARGS;
143
144 static struct llinfo_arp *arp_llinfo_alloc(int);
145 static void arp_llinfo_free(void *);
146 static void arp_llinfo_purge(struct rtentry *);
147 static void arp_llinfo_get_ri(struct rtentry *, struct rt_reach_info *);
148 static void arp_llinfo_get_iflri(struct rtentry *, struct ifnet_llreach_info *);
149 static void arp_llinfo_refresh(struct rtentry *);
150
151 static __inline void arp_llreach_use(struct llinfo_arp *);
152 static __inline int arp_llreach_reachable(struct llinfo_arp *);
153 static void arp_llreach_alloc(struct rtentry *, struct ifnet *, void *,
154 unsigned int, boolean_t);
155
156 extern int tvtohz(struct timeval *);
157
158 static int arpinit_done;
159
160 SYSCTL_DECL(_net_link_ether);
161 SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "");
162
163 /* timer values */
164 static int arpt_prune = (5*60*1); /* walk list every 5 minutes */
165 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl,
166 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, "");
167
168 static int arpt_keep = (20*60); /* once resolved, good for 20 more minutes */
169 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age,
170 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, "");
171
172 static int arpt_down = 20; /* once declared down, don't send for 20 sec */
173 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time,
174 CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, "");
175
176 static int arp_llreach_base = (LL_BASE_REACHABLE / 1000); /* seconds */
177 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base,
178 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, LL_BASE_REACHABLE,
179 "default ARP link-layer reachability max lifetime (in seconds)");
180
181 #define ARP_UNICAST_LIMIT 5 /* # of probes until ARP refresh broadcast */
182 static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT;
183 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim,
184 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT,
185 "number of unicast ARP refresh probes before using broadcast");
186
187 static u_int32_t arp_maxtries = 5;
188 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries,
189 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, "");
190
191 static int useloopback = 1; /* use loopback interface for local traffic */
192 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback,
193 CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, "");
194
195 static int arp_proxyall = 0;
196 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall,
197 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, "");
198
199 static int arp_sendllconflict = 0;
200 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict,
201 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, "");
202
203 static int log_arp_warnings = 0; /* Thread safe: no accumulated state */
204 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings,
205 CTLFLAG_RW | CTLFLAG_LOCKED,
206 &log_arp_warnings, 0,
207 "log arp warning messages");
208
209 static int keep_announcements = 1; /* Thread safe: no aging of state */
210 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements,
211 CTLFLAG_RW | CTLFLAG_LOCKED,
212 &keep_announcements, 0,
213 "keep arp announcements");
214
215 static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */
216 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes,
217 CTLFLAG_RW | CTLFLAG_LOCKED,
218 &send_conflicting_probes, 0,
219 "send conflicting link-local arp probes");
220
221 static int arp_verbose;
222 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose,
223 CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, "");
224
225 struct arpstat arpstat;
226 SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats,
227 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
228 0, 0, arp_getstat, "S,arpstat",
229 "ARP statistics (struct arpstat, net/if_arp.h)");
230
231 /* these are deprecated (read-only); use net.link.generic.system node instead */
232 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, apple_hwcksum_tx,
233 CTLFLAG_RD | CTLFLAG_LOCKED, &hwcksum_tx, 0, "");
234
235 SYSCTL_INT(_net_link_ether_inet, OID_AUTO, apple_hwcksum_rx,
236 CTLFLAG_RD | CTLFLAG_LOCKED, &hwcksum_rx, 0, "");
237
238 static struct zone *llinfo_arp_zone;
239 #define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */
240 #define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */
241
242 void
243 arp_init(void)
244 {
245 VERIFY(!arpinit_done);
246
247 LIST_INIT(&llinfo_arp);
248
249 llinfo_arp_zone = zinit(sizeof (struct llinfo_arp),
250 LLINFO_ARP_ZONE_MAX * sizeof (struct llinfo_arp), 0,
251 LLINFO_ARP_ZONE_NAME);
252 if (llinfo_arp_zone == NULL)
253 panic("%s: failed allocating llinfo_arp_zone", __func__);
254
255 zone_change(llinfo_arp_zone, Z_EXPAND, TRUE);
256 zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE);
257
258 arpinit_done = 1;
259 }
260
261 static struct llinfo_arp *
262 arp_llinfo_alloc(int how)
263 {
264 struct llinfo_arp *la;
265
266 la = (how == M_WAITOK) ? zalloc(llinfo_arp_zone) :
267 zalloc_noblock(llinfo_arp_zone);
268 if (la != NULL)
269 bzero(la, sizeof (*la));
270
271 return (la);
272 }
273
274 static void
275 arp_llinfo_free(void *arg)
276 {
277 struct llinfo_arp *la = arg;
278
279 if (la->la_le.le_next != NULL || la->la_le.le_prev != NULL) {
280 panic("%s: trying to free %p when it is in use", __func__, la);
281 /* NOTREACHED */
282 }
283
284 /* Just in case there's anything there, free it */
285 if (la->la_hold != NULL) {
286 m_freem(la->la_hold);
287 la->la_hold = NULL;
288 arpstat.purged++;
289 }
290
291 /* Purge any link-layer info caching */
292 VERIFY(la->la_rt->rt_llinfo == la);
293 if (la->la_rt->rt_llinfo_purge != NULL)
294 la->la_rt->rt_llinfo_purge(la->la_rt);
295
296 zfree(llinfo_arp_zone, la);
297 }
298
299 static void
300 arp_llinfo_purge(struct rtentry *rt)
301 {
302 struct llinfo_arp *la = rt->rt_llinfo;
303
304 RT_LOCK_ASSERT_HELD(rt);
305 VERIFY(rt->rt_llinfo_purge == arp_llinfo_purge && la != NULL);
306
307 if (la->la_llreach != NULL) {
308 RT_CONVERT_LOCK(rt);
309 ifnet_llreach_free(la->la_llreach);
310 la->la_llreach = NULL;
311 }
312 la->la_lastused = 0;
313 }
314
315 static void
316 arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri)
317 {
318 struct llinfo_arp *la = rt->rt_llinfo;
319 struct if_llreach *lr = la->la_llreach;
320
321 if (lr == NULL) {
322 bzero(ri, sizeof (*ri));
323 ri->ri_rssi = IFNET_RSSI_UNKNOWN;
324 ri->ri_lqm = IFNET_LQM_THRESH_OFF;
325 ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN;
326 } else {
327 IFLR_LOCK(lr);
328 /* Export to rt_reach_info structure */
329 ifnet_lr2ri(lr, ri);
330 /* Export ARP send expiration (calendar) time */
331 ri->ri_snd_expire =
332 ifnet_llreach_up2calexp(lr, la->la_lastused);
333 IFLR_UNLOCK(lr);
334 }
335 }
336
337 static void
338 arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri)
339 {
340 struct llinfo_arp *la = rt->rt_llinfo;
341 struct if_llreach *lr = la->la_llreach;
342
343 if (lr == NULL) {
344 bzero(iflri, sizeof (*iflri));
345 iflri->iflri_rssi = IFNET_RSSI_UNKNOWN;
346 iflri->iflri_lqm = IFNET_LQM_THRESH_OFF;
347 iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN;
348 } else {
349 IFLR_LOCK(lr);
350 /* Export to ifnet_llreach_info structure */
351 ifnet_lr2iflri(lr, iflri);
352 /* Export ARP send expiration (uptime) time */
353 iflri->iflri_snd_expire =
354 ifnet_llreach_up2upexp(lr, la->la_lastused);
355 IFLR_UNLOCK(lr);
356 }
357 }
358
359 static void
360 arp_llinfo_refresh(struct rtentry *rt)
361 {
362 uint64_t timenow = net_uptime();
363 /*
364 * If route entry is permanent or if expiry is less
365 * than timenow and extra time taken for unicast probe
366 * we can't expedite the refresh
367 */
368 if ((rt->rt_expire == 0) ||
369 (rt->rt_flags & RTF_STATIC) ||
370 !(rt->rt_flags & RTF_LLINFO)) {
371 return;
372 }
373
374 if (rt->rt_expire > timenow + arp_unicast_lim) {
375 rt->rt_expire = timenow + arp_unicast_lim;
376 }
377 return;
378 }
379
380 void
381 arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen)
382 {
383 /* Nothing more to do if it's disabled */
384 if (arp_llreach_base == 0)
385 return;
386
387 ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen);
388 }
389
390 static __inline void
391 arp_llreach_use(struct llinfo_arp *la)
392 {
393 if (la->la_llreach != NULL)
394 la->la_lastused = net_uptime();
395 }
396
397 static __inline int
398 arp_llreach_reachable(struct llinfo_arp *la)
399 {
400 struct if_llreach *lr;
401 const char *why = NULL;
402
403 /* Nothing more to do if it's disabled; pretend it's reachable */
404 if (arp_llreach_base == 0)
405 return (1);
406
407 if ((lr = la->la_llreach) == NULL) {
408 /*
409 * Link-layer reachability record isn't present for this
410 * ARP entry; pretend it's reachable and use it as is.
411 */
412 return (1);
413 } else if (ifnet_llreach_reachable(lr)) {
414 /*
415 * Record is present, it's not shared with other ARP
416 * entries and a packet has recently been received
417 * from the remote host; consider it reachable.
418 */
419 if (lr->lr_reqcnt == 1)
420 return (1);
421
422 /* Prime it up, if this is the first time */
423 if (la->la_lastused == 0) {
424 VERIFY(la->la_llreach != NULL);
425 arp_llreach_use(la);
426 }
427
428 /*
429 * Record is present and shared with one or more ARP
430 * entries, and a packet has recently been received
431 * from the remote host. Since it's shared by more
432 * than one IP addresses, we can't rely on the link-
433 * layer reachability alone; consider it reachable if
434 * this ARP entry has been used "recently."
435 */
436 if (ifnet_llreach_reachable_delta(lr, la->la_lastused))
437 return (1);
438
439 why = "has alias(es) and hasn't been used in a while";
440 } else {
441 why = "haven't heard from it in a while";
442 }
443
444 if (arp_verbose > 1) {
445 char tmp[MAX_IPv4_STR_LEN];
446 u_int64_t now = net_uptime();
447
448 log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; "
449 "%s [lastused %lld, lastrcvd %lld] secs ago\n",
450 if_name(lr->lr_ifp), inet_ntop(AF_INET,
451 &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof (tmp)), why,
452 (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1),
453 (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1));
454
455 }
456 return (0);
457 }
458
459 /*
460 * Obtain a link-layer source cache entry for the sender.
461 *
462 * NOTE: This is currently only for ARP/Ethernet.
463 */
464 static void
465 arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr,
466 unsigned int alen, boolean_t solicited)
467 {
468 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
469 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
470
471 if (arp_llreach_base != 0 && rt->rt_expire != 0 &&
472 !(rt->rt_ifp->if_flags & IFF_LOOPBACK) &&
473 ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */
474 alen == ifp->if_addrlen) {
475 struct llinfo_arp *la = rt->rt_llinfo;
476 struct if_llreach *lr;
477 const char *why = NULL, *type = "";
478
479 /* Become a regular mutex, just in case */
480 RT_CONVERT_LOCK(rt);
481
482 if ((lr = la->la_llreach) != NULL) {
483 type = (solicited ? "ARP reply" : "ARP announcement");
484 /*
485 * If target has changed, create a new record;
486 * otherwise keep existing record.
487 */
488 IFLR_LOCK(lr);
489 if (bcmp(addr, lr->lr_key.addr, alen) != 0) {
490 IFLR_UNLOCK(lr);
491 /* Purge any link-layer info caching */
492 VERIFY(rt->rt_llinfo_purge != NULL);
493 rt->rt_llinfo_purge(rt);
494 lr = NULL;
495 why = " for different target HW address; "
496 "using new llreach record";
497 } else {
498 lr->lr_probes = 0; /* reset probe count */
499 IFLR_UNLOCK(lr);
500 if (solicited) {
501 why = " for same target HW address; "
502 "keeping existing llreach record";
503 }
504 }
505 }
506
507 if (lr == NULL) {
508 lr = la->la_llreach = ifnet_llreach_alloc(ifp,
509 ETHERTYPE_IP, addr, alen, arp_llreach_base);
510 if (lr != NULL) {
511 lr->lr_probes = 0; /* reset probe count */
512 if (why == NULL)
513 why = "creating new llreach record";
514 }
515 }
516
517 /* Bump up retry ceiling to accomodate unicast retries */
518 if (lr != NULL)
519 la->la_maxtries = arp_maxtries + arp_unicast_lim;
520
521 if (arp_verbose > 1 && lr != NULL && why != NULL) {
522 char tmp[MAX_IPv4_STR_LEN];
523
524 log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp),
525 type, why, inet_ntop(AF_INET,
526 &SIN(rt_key(rt))->sin_addr, tmp, sizeof (tmp)));
527 }
528 }
529 }
530
531 struct arptf_arg {
532 int draining;
533 uint32_t killed;
534 uint32_t aging;
535 uint32_t sticky;
536 uint32_t found;
537 };
538
539 /*
540 * Free an arp entry.
541 */
542 static void
543 arptfree(struct llinfo_arp *la, void *arg)
544 {
545 struct arptf_arg *ap = arg;
546 struct rtentry *rt = la->la_rt;
547
548 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
549
550 /* rnh_lock acquired by caller protects rt from going away */
551 RT_LOCK(rt);
552
553 VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0);
554 VERIFY(rt->rt_expire != 0 || rt->rt_rmx.rmx_expire == 0);
555
556 ap->found++;
557 if (rt->rt_expire == 0 || (rt->rt_flags & RTF_STATIC)) {
558 ap->sticky++;
559 /* ARP entry is permanent? */
560 if (rt->rt_expire == 0) {
561 RT_UNLOCK(rt);
562 return;
563 }
564 }
565
566 /* ARP entry hasn't expired and we're not draining? */
567 if (!ap->draining && rt->rt_expire > net_uptime()) {
568 RT_UNLOCK(rt);
569 ap->aging++;
570 return;
571 }
572
573 if (rt->rt_refcnt > 0) {
574 /*
575 * ARP entry has expired, with outstanding refcnt.
576 * If we're not draining, force ARP query to be
577 * generated next time this entry is used.
578 */
579 if (!ap->draining) {
580 struct sockaddr_dl *sdl = SDL(rt->rt_gateway);
581 if (sdl != NULL)
582 sdl->sdl_alen = 0;
583 la->la_asked = 0;
584 rt->rt_flags &= ~RTF_REJECT;
585 }
586 RT_UNLOCK(rt);
587 } else if (!(rt->rt_flags & RTF_STATIC)) {
588 /*
589 * ARP entry has no outstanding refcnt, and we're either
590 * draining or it has expired; delete it from the routing
591 * table. Safe to drop rt_lock and use rt_key, since holding
592 * rnh_lock here prevents another thread from calling
593 * rt_setgate() on this route.
594 */
595 RT_UNLOCK(rt);
596 rtrequest_locked(RTM_DELETE, rt_key(rt), NULL,
597 rt_mask(rt), 0, NULL);
598 arpstat.timeouts++;
599 ap->killed++;
600 } else {
601 /* ARP entry is static; let it linger */
602 RT_UNLOCK(rt);
603 }
604 }
605
606 void
607 in_arpdrain(void *arg)
608 {
609 #pragma unused(arg)
610 struct llinfo_arp *la, *ola;
611 struct arptf_arg farg;
612
613 if (arp_verbose)
614 log(LOG_DEBUG, "%s: draining ARP entries\n", __func__);
615
616 lck_mtx_lock(rnh_lock);
617 la = llinfo_arp.lh_first;
618 bzero(&farg, sizeof (farg));
619 farg.draining = 1;
620 while ((ola = la) != NULL) {
621 la = la->la_le.le_next;
622 arptfree(ola, &farg);
623 }
624 if (arp_verbose) {
625 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u\n",
626 __func__, farg.found, farg.aging, farg.sticky, farg.killed);
627 }
628 lck_mtx_unlock(rnh_lock);
629 }
630
631 /*
632 * Timeout routine. Age arp_tab entries periodically.
633 */
634 static void
635 arp_timeout(void *arg)
636 {
637 #pragma unused(arg)
638 struct llinfo_arp *la, *ola;
639 struct timeval atv;
640 struct arptf_arg farg;
641
642 lck_mtx_lock(rnh_lock);
643 la = llinfo_arp.lh_first;
644 bzero(&farg, sizeof (farg));
645 while ((ola = la) != NULL) {
646 la = la->la_le.le_next;
647 arptfree(ola, &farg);
648 }
649 if (arp_verbose) {
650 log(LOG_DEBUG, "%s: found %u, aging %u, sticky %u, killed %u\n",
651 __func__, farg.found, farg.aging, farg.sticky, farg.killed);
652 }
653 atv.tv_usec = 0;
654 atv.tv_sec = arpt_prune;
655 /* re-arm the timer if there's work to do */
656 arp_timeout_run = 0;
657 if (farg.aging > 0)
658 arp_sched_timeout(&atv);
659 else if (arp_verbose)
660 log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__);
661 lck_mtx_unlock(rnh_lock);
662 }
663
664 static void
665 arp_sched_timeout(struct timeval *atv)
666 {
667 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
668
669 if (!arp_timeout_run) {
670 struct timeval tv;
671
672 if (atv == NULL) {
673 tv.tv_usec = 0;
674 tv.tv_sec = MAX(arpt_prune / 5, 1);
675 atv = &tv;
676 }
677 if (arp_verbose) {
678 log(LOG_DEBUG, "%s: timer scheduled in "
679 "T+%llus.%lluu\n", __func__,
680 (uint64_t)atv->tv_sec, (uint64_t)atv->tv_usec);
681 }
682 arp_timeout_run = 1;
683 timeout(arp_timeout, NULL, tvtohz(atv));
684 }
685 }
686
687 /*
688 * ifa_rtrequest() callback
689 */
690 static void
691 arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa)
692 {
693 #pragma unused(sa)
694 struct sockaddr *gate = rt->rt_gateway;
695 struct llinfo_arp *la = rt->rt_llinfo;
696 static struct sockaddr_dl null_sdl =
697 { .sdl_len = sizeof (null_sdl), .sdl_family = AF_LINK };
698 uint64_t timenow;
699 char buf[MAX_IPv4_STR_LEN];
700
701 VERIFY(arpinit_done);
702 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
703 RT_LOCK_ASSERT_HELD(rt);
704
705 if (rt->rt_flags & RTF_GATEWAY)
706 return;
707
708 timenow = net_uptime();
709 switch (req) {
710 case RTM_ADD:
711 /*
712 * XXX: If this is a manually added route to interface
713 * such as older version of routed or gated might provide,
714 * restore cloning bit.
715 */
716 if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL &&
717 SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST)
718 rt->rt_flags |= RTF_CLONING;
719
720 if (rt->rt_flags & RTF_CLONING) {
721 /*
722 * Case 1: This route should come from a route to iface.
723 */
724 if (rt_setgate(rt, rt_key(rt), SA(&null_sdl)) == 0) {
725 gate = rt->rt_gateway;
726 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
727 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
728 /*
729 * In case we're called before 1.0 sec.
730 * has elapsed.
731 */
732 rt_setexpire(rt, MAX(timenow, 1));
733 }
734 break;
735 }
736 /* Announce a new entry if requested. */
737 if (rt->rt_flags & RTF_ANNOUNCE) {
738 if (la != NULL)
739 arp_llreach_use(la); /* Mark use timestamp */
740 RT_UNLOCK(rt);
741 dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST,
742 SDL(gate), rt_key(rt), NULL, rt_key(rt), 0);
743 RT_LOCK(rt);
744 arpstat.txannounces++;
745 }
746 /* FALLTHRU */
747 case RTM_RESOLVE:
748 if (gate->sa_family != AF_LINK ||
749 gate->sa_len < sizeof (null_sdl)) {
750 arpstat.invalidreqs++;
751 log(LOG_ERR, "%s: route to %s has bad gateway address "
752 "(sa_family %u sa_len %u) on %s\n",
753 __func__, inet_ntop(AF_INET,
754 &SIN(rt_key(rt))->sin_addr.s_addr, buf,
755 sizeof (buf)), gate->sa_family, gate->sa_len,
756 if_name(rt->rt_ifp));
757 break;
758 }
759 SDL(gate)->sdl_type = rt->rt_ifp->if_type;
760 SDL(gate)->sdl_index = rt->rt_ifp->if_index;
761
762 if (la != NULL)
763 break; /* This happens on a route change */
764
765 /*
766 * Case 2: This route may come from cloning, or a manual route
767 * add with a LL address.
768 */
769 rt->rt_llinfo = la = arp_llinfo_alloc(M_WAITOK);
770 if (la == NULL) {
771 arpstat.reqnobufs++;
772 break;
773 }
774 rt->rt_llinfo_get_ri = arp_llinfo_get_ri;
775 rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri;
776 rt->rt_llinfo_purge = arp_llinfo_purge;
777 rt->rt_llinfo_free = arp_llinfo_free;
778 rt->rt_llinfo_refresh = arp_llinfo_refresh;
779 rt->rt_flags |= RTF_LLINFO;
780 la->la_rt = rt;
781 LIST_INSERT_HEAD(&llinfo_arp, la, la_le);
782 arpstat.inuse++;
783
784 /* We have at least one entry; arm the timer if not already */
785 arp_sched_timeout(NULL);
786
787 /*
788 * This keeps the multicast addresses from showing up
789 * in `arp -a' listings as unresolved. It's not actually
790 * functional. Then the same for broadcast. For IPv4
791 * link-local address, keep the entry around even after
792 * it has expired.
793 */
794 if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) {
795 RT_UNLOCK(rt);
796 dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate,
797 sizeof (struct sockaddr_dl));
798 RT_LOCK(rt);
799 rt_setexpire(rt, 0);
800 } else if (in_broadcast(SIN(rt_key(rt))->sin_addr,
801 rt->rt_ifp)) {
802 struct sockaddr_dl *gate_ll = SDL(gate);
803 size_t broadcast_len;
804 ifnet_llbroadcast_copy_bytes(rt->rt_ifp,
805 LLADDR(gate_ll), sizeof (gate_ll->sdl_data),
806 &broadcast_len);
807 gate_ll->sdl_alen = broadcast_len;
808 gate_ll->sdl_family = AF_LINK;
809 gate_ll->sdl_len = sizeof (struct sockaddr_dl);
810 /* In case we're called before 1.0 sec. has elapsed */
811 rt_setexpire(rt, MAX(timenow, 1));
812 } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))->
813 sin_addr.s_addr))) {
814 rt->rt_flags |= RTF_STATIC;
815 }
816
817 /* Set default maximum number of retries */
818 la->la_maxtries = arp_maxtries;
819
820 /* Become a regular mutex, just in case */
821 RT_CONVERT_LOCK(rt);
822 IFA_LOCK_SPIN(rt->rt_ifa);
823 if (SIN(rt_key(rt))->sin_addr.s_addr ==
824 (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) {
825 IFA_UNLOCK(rt->rt_ifa);
826 /*
827 * This test used to be
828 * if (loif.if_flags & IFF_UP)
829 * It allowed local traffic to be forced through the
830 * hardware by configuring the loopback down. However,
831 * it causes problems during network configuration
832 * for boards that can't receive packets they send.
833 * It is now necessary to clear "useloopback" and
834 * remove the route to force traffic out to the
835 * hardware.
836 */
837 rt_setexpire(rt, 0);
838 ifnet_lladdr_copy_bytes(rt->rt_ifp, LLADDR(SDL(gate)),
839 SDL(gate)->sdl_alen = rt->rt_ifp->if_addrlen);
840 if (useloopback) {
841 if (rt->rt_ifp != lo_ifp) {
842 /*
843 * Purge any link-layer info caching.
844 */
845 if (rt->rt_llinfo_purge != NULL)
846 rt->rt_llinfo_purge(rt);
847
848 /*
849 * Adjust route ref count for the
850 * interfaces.
851 */
852 if (rt->rt_if_ref_fn != NULL) {
853 rt->rt_if_ref_fn(lo_ifp, 1);
854 rt->rt_if_ref_fn(rt->rt_ifp, -1);
855 }
856 }
857 rt->rt_ifp = lo_ifp;
858 /*
859 * If rmx_mtu is not locked, update it
860 * to the MTU used by the new interface.
861 */
862 if (!(rt->rt_rmx.rmx_locks & RTV_MTU))
863 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu;
864 }
865 } else {
866 IFA_UNLOCK(rt->rt_ifa);
867 }
868 break;
869
870 case RTM_DELETE:
871 if (la == NULL)
872 break;
873 /*
874 * Unchain it but defer the actual freeing until the route
875 * itself is to be freed. rt->rt_llinfo still points to
876 * llinfo_arp, and likewise, la->la_rt still points to this
877 * route entry, except that RTF_LLINFO is now cleared.
878 */
879 LIST_REMOVE(la, la_le);
880 la->la_le.le_next = NULL;
881 la->la_le.le_prev = NULL;
882 arpstat.inuse--;
883
884 /*
885 * Purge any link-layer info caching.
886 */
887 if (rt->rt_llinfo_purge != NULL)
888 rt->rt_llinfo_purge(rt);
889
890 rt->rt_flags &= ~RTF_LLINFO;
891 if (la->la_hold != NULL) {
892 m_freem(la->la_hold);
893 la->la_hold = NULL;
894 arpstat.purged++;
895 }
896 }
897 }
898
899 /*
900 * convert hardware address to hex string for logging errors.
901 */
902 static const char *
903 sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen)
904 {
905 char *buf = orig_buf;
906 int i;
907 const u_char *lladdr = (u_char *)(size_t)sdl->sdl_data;
908 int maxbytes = buflen / 3;
909
910 if (maxbytes > sdl->sdl_alen) {
911 maxbytes = sdl->sdl_alen;
912 }
913 *buf = '\0';
914 for (i = 0; i < maxbytes; i++) {
915 snprintf(buf, 3, "%02x", lladdr[i]);
916 buf += 2;
917 *buf = (i == maxbytes - 1) ? '\0' : ':';
918 buf++;
919 }
920 return (orig_buf);
921 }
922
923 /*
924 * arp_lookup_route will lookup the route for a given address.
925 *
926 * The address must be for a host on a local network on this interface.
927 * If the returned route is non-NULL, the route is locked and the caller
928 * is responsible for unlocking it and releasing its reference.
929 */
930 static errno_t
931 arp_lookup_route(const struct in_addr *addr, int create, int proxy,
932 route_t *route, unsigned int ifscope)
933 {
934 struct sockaddr_inarp sin =
935 { sizeof (sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 };
936 const char *why = NULL;
937 errno_t error = 0;
938 route_t rt;
939
940 *route = NULL;
941
942 sin.sin_addr.s_addr = addr->s_addr;
943 sin.sin_other = proxy ? SIN_PROXY : 0;
944
945 /*
946 * If the destination is a link-local address, don't
947 * constrain the lookup (don't scope it).
948 */
949 if (IN_LINKLOCAL(ntohl(addr->s_addr)))
950 ifscope = IFSCOPE_NONE;
951
952 rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope);
953 if (rt == NULL)
954 return (ENETUNREACH);
955
956 RT_LOCK(rt);
957
958 if (rt->rt_flags & RTF_GATEWAY) {
959 why = "host is not on local network";
960 error = ENETUNREACH;
961 } else if (!(rt->rt_flags & RTF_LLINFO)) {
962 why = "could not allocate llinfo";
963 error = ENOMEM;
964 } else if (rt->rt_gateway->sa_family != AF_LINK) {
965 why = "gateway route is not ours";
966 error = EPROTONOSUPPORT;
967 }
968
969 if (error != 0) {
970 if (create && (arp_verbose || log_arp_warnings)) {
971 char tmp[MAX_IPv4_STR_LEN];
972 log(LOG_DEBUG, "%s: link#%d %s failed: %s\n",
973 __func__, ifscope, inet_ntop(AF_INET, addr, tmp,
974 sizeof (tmp)), why);
975 }
976
977 /*
978 * If there are no references to this route, and it is
979 * a cloned route, and not static, and ARP had created
980 * the route, then purge it from the routing table as
981 * it is probably bogus.
982 */
983 if (rt->rt_refcnt == 1 &&
984 (rt->rt_flags & (RTF_WASCLONED | RTF_STATIC)) ==
985 RTF_WASCLONED) {
986 /*
987 * Prevent another thread from modiying rt_key,
988 * rt_gateway via rt_setgate() after rt_lock is
989 * dropped by marking the route as defunct.
990 */
991 rt->rt_flags |= RTF_CONDEMNED;
992 RT_UNLOCK(rt);
993 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
994 rt_mask(rt), rt->rt_flags, NULL);
995 rtfree(rt);
996 } else {
997 RT_REMREF_LOCKED(rt);
998 RT_UNLOCK(rt);
999 }
1000 return (error);
1001 }
1002
1003 /*
1004 * Caller releases reference and does RT_UNLOCK(rt).
1005 */
1006 *route = rt;
1007 return (0);
1008 }
1009
1010 /*
1011 * This is the ARP pre-output routine; care must be taken to ensure that
1012 * the "hint" route never gets freed via rtfree(), since the caller may
1013 * have stored it inside a struct route with a reference held for that
1014 * placeholder.
1015 */
1016 errno_t
1017 arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
1018 struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint,
1019 mbuf_t packet)
1020 {
1021 route_t route = NULL; /* output route */
1022 errno_t result = 0;
1023 struct sockaddr_dl *gateway;
1024 struct llinfo_arp *llinfo = NULL;
1025 uint64_t timenow;
1026 int unreachable = 0;
1027 struct if_llreach *lr;
1028 struct ifaddr *rt_ifa;
1029 struct sockaddr *sa;
1030 uint32_t rtflags;
1031 struct sockaddr_dl sdl;
1032
1033 if (net_dest->sin_family != AF_INET)
1034 return (EAFNOSUPPORT);
1035
1036 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING))
1037 return (ENETDOWN);
1038
1039 /*
1040 * If we were given a route, verify the route and grab the gateway
1041 */
1042 if (hint != NULL) {
1043 /*
1044 * Callee holds a reference on the route and returns
1045 * with the route entry locked, upon success.
1046 */
1047 result = route_to_gwroute((const struct sockaddr *)
1048 net_dest, hint, &route);
1049 if (result != 0)
1050 return (result);
1051 if (route != NULL)
1052 RT_LOCK_ASSERT_HELD(route);
1053 }
1054
1055 if (packet->m_flags & M_BCAST) {
1056 size_t broadcast_len;
1057 bzero(ll_dest, ll_dest_len);
1058 result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest),
1059 ll_dest_len - offsetof(struct sockaddr_dl, sdl_data),
1060 &broadcast_len);
1061 if (result == 0) {
1062 ll_dest->sdl_alen = broadcast_len;
1063 ll_dest->sdl_family = AF_LINK;
1064 ll_dest->sdl_len = sizeof (struct sockaddr_dl);
1065 }
1066 goto release;
1067 }
1068 if (packet->m_flags & M_MCAST) {
1069 if (route != NULL)
1070 RT_UNLOCK(route);
1071 result = dlil_resolve_multi(ifp,
1072 (const struct sockaddr *)net_dest,
1073 (struct sockaddr *)ll_dest, ll_dest_len);
1074 if (route != NULL)
1075 RT_LOCK(route);
1076 goto release;
1077 }
1078
1079 /*
1080 * If we didn't find a route, or the route doesn't have
1081 * link layer information, trigger the creation of the
1082 * route and link layer information.
1083 */
1084 if (route == NULL || route->rt_llinfo == NULL) {
1085 /* Clean up now while we can */
1086 if (route != NULL) {
1087 if (route == hint) {
1088 RT_REMREF_LOCKED(route);
1089 RT_UNLOCK(route);
1090 } else {
1091 RT_UNLOCK(route);
1092 rtfree(route);
1093 }
1094 }
1095 /*
1096 * Callee holds a reference on the route and returns
1097 * with the route entry locked, upon success.
1098 */
1099 result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route,
1100 ifp->if_index);
1101 if (result == 0)
1102 RT_LOCK_ASSERT_HELD(route);
1103 }
1104
1105 if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) {
1106 /* In case result is 0 but no route, return an error */
1107 if (result == 0)
1108 result = EHOSTUNREACH;
1109
1110 if (route != NULL && route->rt_llinfo == NULL) {
1111 char tmp[MAX_IPv4_STR_LEN];
1112 log(LOG_ERR, "%s: can't allocate llinfo for %s\n",
1113 __func__, inet_ntop(AF_INET, &net_dest->sin_addr,
1114 tmp, sizeof (tmp)));
1115 }
1116 goto release;
1117 }
1118
1119 /*
1120 * Now that we have the right route, is it filled in?
1121 */
1122 gateway = SDL(route->rt_gateway);
1123 timenow = net_uptime();
1124 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1125 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1126 if ((route->rt_expire == 0 ||
1127 route->rt_expire > timenow) && gateway != NULL &&
1128 gateway->sdl_family == AF_LINK && gateway->sdl_alen != 0 &&
1129 !(unreachable = !arp_llreach_reachable(llinfo))) {
1130 bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len));
1131 result = 0;
1132 arp_llreach_use(llinfo); /* Mark use timestamp */
1133 /*
1134 * Start the unicast probe right before the entry expires.
1135 */
1136 lr = llinfo->la_llreach;
1137 if (lr == NULL)
1138 goto release;
1139 rt_ifa = route->rt_ifa;
1140 /* Become a regular mutex, just in case */
1141 RT_CONVERT_LOCK(route);
1142 IFLR_LOCK_SPIN(lr);
1143 if (route->rt_expire <= timenow + arp_unicast_lim &&
1144 ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1145 lr->lr_probes <= arp_unicast_lim) {
1146 lr->lr_probes++;
1147 bzero(&sdl, sizeof (sdl));
1148 sdl.sdl_alen = ifp->if_addrlen;
1149 bcopy(&lr->lr_key.addr, LLADDR(&sdl),
1150 ifp->if_addrlen);
1151 IFLR_UNLOCK(lr);
1152 IFA_LOCK_SPIN(rt_ifa);
1153 IFA_ADDREF_LOCKED(rt_ifa);
1154 sa = rt_ifa->ifa_addr;
1155 IFA_UNLOCK(rt_ifa);
1156 rtflags = route->rt_flags;
1157 RT_UNLOCK(route);
1158 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1159 (const struct sockaddr_dl *)&sdl,
1160 (const struct sockaddr *)net_dest, rtflags);
1161 IFA_REMREF(rt_ifa);
1162 RT_LOCK(route);
1163 } else
1164 IFLR_UNLOCK(lr);
1165 goto release;
1166 } else if (unreachable) {
1167 /*
1168 * Discard existing answer in case we need to probe.
1169 */
1170 gateway->sdl_alen = 0;
1171 }
1172
1173 if (ifp->if_flags & IFF_NOARP) {
1174 result = ENOTSUP;
1175 goto release;
1176 }
1177
1178 /*
1179 * Route wasn't complete/valid. We need to arp.
1180 */
1181 if (packet != NULL) {
1182 if (llinfo->la_hold != NULL) {
1183 m_freem(llinfo->la_hold);
1184 arpstat.dropped++;
1185 }
1186 llinfo->la_hold = packet;
1187 }
1188
1189 if (route->rt_expire) {
1190 route->rt_flags &= ~RTF_REJECT;
1191 if (llinfo->la_asked == 0 || route->rt_expire != timenow) {
1192 rt_setexpire(route, timenow);
1193 if (llinfo->la_asked++ < llinfo->la_maxtries) {
1194 struct kev_msg ev_msg;
1195 struct kev_in_arpfailure in_arpfailure;
1196 boolean_t sendkev = FALSE;
1197
1198 rt_ifa = route->rt_ifa;
1199 lr = llinfo->la_llreach;
1200 /* Become a regular mutex, just in case */
1201 RT_CONVERT_LOCK(route);
1202 /* Update probe count, if applicable */
1203 if (lr != NULL) {
1204 IFLR_LOCK_SPIN(lr);
1205 lr->lr_probes++;
1206 IFLR_UNLOCK(lr);
1207 }
1208 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1209 route->rt_flags & RTF_ROUTER &&
1210 llinfo->la_asked > 1) {
1211 sendkev = TRUE;
1212 llinfo->la_flags |= LLINFO_RTRFAIL_EVTSENT;
1213 }
1214 IFA_LOCK_SPIN(rt_ifa);
1215 IFA_ADDREF_LOCKED(rt_ifa);
1216 sa = rt_ifa->ifa_addr;
1217 IFA_UNLOCK(rt_ifa);
1218 arp_llreach_use(llinfo); /* Mark use tstamp */
1219 rtflags = route->rt_flags;
1220 RT_UNLOCK(route);
1221 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa,
1222 NULL, (const struct sockaddr *)net_dest,
1223 rtflags);
1224 IFA_REMREF(rt_ifa);
1225 if (sendkev) {
1226 bzero(&ev_msg, sizeof(ev_msg));
1227 bzero(&in_arpfailure,
1228 sizeof(in_arpfailure));
1229 in_arpfailure.link_data.if_family =
1230 ifp->if_family;
1231 in_arpfailure.link_data.if_unit =
1232 ifp->if_unit;
1233 strlcpy(in_arpfailure.link_data.if_name,
1234 ifp->if_name, IFNAMSIZ);
1235 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1236 ev_msg.kev_class = KEV_NETWORK_CLASS;
1237 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1238 ev_msg.event_code =
1239 KEV_INET_ARPRTRFAILURE;
1240 ev_msg.dv[0].data_ptr = &in_arpfailure;
1241 ev_msg.dv[0].data_length =
1242 sizeof(struct
1243 kev_in_arpfailure);
1244 kev_post_msg(&ev_msg);
1245 }
1246 result = EJUSTRETURN;
1247 RT_LOCK(route);
1248 goto release;
1249 } else {
1250 route->rt_flags |= RTF_REJECT;
1251 rt_setexpire(route,
1252 route->rt_expire + arpt_down);
1253 llinfo->la_asked = 0;
1254 /*
1255 * Clear la_hold; don't free the packet since
1256 * we're not returning EJUSTRETURN; the caller
1257 * will handle the freeing.
1258 */
1259 llinfo->la_hold = NULL;
1260 result = EHOSTUNREACH;
1261 goto release;
1262 }
1263 }
1264 }
1265
1266 /* The packet is now held inside la_hold (can "packet" be NULL?) */
1267 result = EJUSTRETURN;
1268
1269 release:
1270 if (result == EHOSTUNREACH)
1271 arpstat.dropped++;
1272
1273 if (route != NULL) {
1274 if (route == hint) {
1275 RT_REMREF_LOCKED(route);
1276 RT_UNLOCK(route);
1277 } else {
1278 RT_UNLOCK(route);
1279 rtfree(route);
1280 }
1281 }
1282 return (result);
1283 }
1284
1285 errno_t
1286 arp_ip_handle_input(ifnet_t ifp, u_short arpop,
1287 const struct sockaddr_dl *sender_hw, const struct sockaddr_in *sender_ip,
1288 const struct sockaddr_in *target_ip)
1289 {
1290 char ipv4str[MAX_IPv4_STR_LEN];
1291 struct sockaddr_dl proxied;
1292 struct sockaddr_dl *gateway, *target_hw = NULL;
1293 struct ifaddr *ifa;
1294 struct in_ifaddr *ia;
1295 struct in_ifaddr *best_ia = NULL;
1296 struct sockaddr_in best_ia_sin;
1297 route_t route = NULL;
1298 char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */
1299 struct llinfo_arp *llinfo;
1300 errno_t error;
1301 int created_announcement = 0;
1302 int bridged = 0, is_bridge = 0;
1303
1304 arpstat.received++;
1305
1306 /* Do not respond to requests for 0.0.0.0 */
1307 if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST)
1308 goto done;
1309
1310 if (ifp->if_bridge)
1311 bridged = 1;
1312 if (ifp->if_type == IFT_BRIDGE)
1313 is_bridge = 1;
1314
1315 if (arpop == ARPOP_REPLY)
1316 arpstat.rxreplies++;
1317
1318 /*
1319 * Determine if this ARP is for us
1320 * For a bridge, we want to check the address irrespective
1321 * of the receive interface.
1322 */
1323 lck_rw_lock_shared(in_ifaddr_rwlock);
1324 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr), ia_hash) {
1325 IFA_LOCK_SPIN(&ia->ia_ifa);
1326 if (((bridged && ia->ia_ifp->if_bridge != NULL) ||
1327 (ia->ia_ifp == ifp)) &&
1328 ia->ia_addr.sin_addr.s_addr == target_ip->sin_addr.s_addr) {
1329 best_ia = ia;
1330 best_ia_sin = best_ia->ia_addr;
1331 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1332 IFA_UNLOCK(&ia->ia_ifa);
1333 lck_rw_done(in_ifaddr_rwlock);
1334 goto match;
1335 }
1336 IFA_UNLOCK(&ia->ia_ifa);
1337 }
1338
1339 TAILQ_FOREACH(ia, INADDR_HASH(sender_ip->sin_addr.s_addr), ia_hash) {
1340 IFA_LOCK_SPIN(&ia->ia_ifa);
1341 if (((bridged && ia->ia_ifp->if_bridge != NULL) ||
1342 (ia->ia_ifp == ifp)) &&
1343 ia->ia_addr.sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1344 best_ia = ia;
1345 best_ia_sin = best_ia->ia_addr;
1346 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1347 IFA_UNLOCK(&ia->ia_ifa);
1348 lck_rw_done(in_ifaddr_rwlock);
1349 goto match;
1350 }
1351 IFA_UNLOCK(&ia->ia_ifa);
1352 }
1353
1354 #define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \
1355 (ia->ia_ifp->if_bridge == ifp->if_softc && \
1356 bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \
1357 addr == ia->ia_addr.sin_addr.s_addr)
1358 /*
1359 * Check the case when bridge shares its MAC address with
1360 * some of its children, so packets are claimed by bridge
1361 * itself (bridge_input() does it first), but they are really
1362 * meant to be destined to the bridge member.
1363 */
1364 if (is_bridge) {
1365 TAILQ_FOREACH(ia, INADDR_HASH(target_ip->sin_addr.s_addr),
1366 ia_hash) {
1367 IFA_LOCK_SPIN(&ia->ia_ifa);
1368 if (BDG_MEMBER_MATCHES_ARP(target_ip->sin_addr.s_addr,
1369 ifp, ia)) {
1370 ifp = ia->ia_ifp;
1371 best_ia = ia;
1372 best_ia_sin = best_ia->ia_addr;
1373 IFA_ADDREF_LOCKED(&ia->ia_ifa);
1374 IFA_UNLOCK(&ia->ia_ifa);
1375 lck_rw_done(in_ifaddr_rwlock);
1376 goto match;
1377 }
1378 IFA_UNLOCK(&ia->ia_ifa);
1379 }
1380 }
1381 #undef BDG_MEMBER_MATCHES_ARP
1382 lck_rw_done(in_ifaddr_rwlock);
1383
1384 /*
1385 * No match, use the first inet address on the receive interface
1386 * as a dummy address for the rest of the function; we may be
1387 * proxying for another address.
1388 */
1389 ifnet_lock_shared(ifp);
1390 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1391 IFA_LOCK_SPIN(ifa);
1392 if (ifa->ifa_addr->sa_family != AF_INET) {
1393 IFA_UNLOCK(ifa);
1394 continue;
1395 }
1396 best_ia = (struct in_ifaddr *)ifa;
1397 best_ia_sin = best_ia->ia_addr;
1398 IFA_ADDREF_LOCKED(ifa);
1399 IFA_UNLOCK(ifa);
1400 ifnet_lock_done(ifp);
1401 goto match;
1402 }
1403 ifnet_lock_done(ifp);
1404
1405 /*
1406 * If we're not a bridge member, or if we are but there's no
1407 * IPv4 address to use for the interface, drop the packet.
1408 */
1409 if (!bridged || best_ia == NULL)
1410 goto done;
1411
1412 match:
1413 /* If the packet is from this interface, ignore the packet */
1414 if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp),
1415 sender_hw->sdl_alen) == 0)
1416 goto done;
1417
1418 /* Check for a conflict */
1419 if (!bridged &&
1420 sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) {
1421 struct kev_msg ev_msg;
1422 struct kev_in_collision *in_collision;
1423 u_char storage[sizeof (struct kev_in_collision) + MAX_HW_LEN];
1424
1425 bzero(&ev_msg, sizeof (struct kev_msg));
1426 bzero(storage, (sizeof (struct kev_in_collision) + MAX_HW_LEN));
1427 in_collision = (struct kev_in_collision *)(void *)storage;
1428 log(LOG_ERR, "%s duplicate IP address %s sent from "
1429 "address %s\n", if_name(ifp),
1430 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1431 sizeof (ipv4str)), sdl_addr_to_hex(sender_hw, buf,
1432 sizeof (buf)));
1433
1434 /* Send a kernel event so anyone can learn of the conflict */
1435 in_collision->link_data.if_family = ifp->if_family;
1436 in_collision->link_data.if_unit = ifp->if_unit;
1437 strlcpy(&in_collision->link_data.if_name[0],
1438 ifp->if_name, IFNAMSIZ);
1439 in_collision->ia_ipaddr = sender_ip->sin_addr;
1440 in_collision->hw_len = (sender_hw->sdl_alen < MAX_HW_LEN) ?
1441 sender_hw->sdl_alen : MAX_HW_LEN;
1442 bcopy(CONST_LLADDR(sender_hw), (caddr_t)in_collision->hw_addr,
1443 in_collision->hw_len);
1444 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1445 ev_msg.kev_class = KEV_NETWORK_CLASS;
1446 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1447 ev_msg.event_code = KEV_INET_ARPCOLLISION;
1448 ev_msg.dv[0].data_ptr = in_collision;
1449 ev_msg.dv[0].data_length =
1450 sizeof (struct kev_in_collision) + in_collision->hw_len;
1451 ev_msg.dv[1].data_length = 0;
1452 kev_post_msg(&ev_msg);
1453 arpstat.dupips++;
1454 goto respond;
1455 }
1456
1457 /*
1458 * Look up the routing entry. If it doesn't exist and we are the
1459 * target, and the sender isn't 0.0.0.0, go ahead and create one.
1460 * Callee holds a reference on the route and returns with the route
1461 * entry locked, upon success.
1462 */
1463 error = arp_lookup_route(&sender_ip->sin_addr,
1464 (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr &&
1465 sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index);
1466
1467 if (error == 0)
1468 RT_LOCK_ASSERT_HELD(route);
1469
1470 if (error || route == NULL || route->rt_gateway == NULL) {
1471 if (arpop != ARPOP_REQUEST)
1472 goto respond;
1473
1474 if (arp_sendllconflict && send_conflicting_probes != 0 &&
1475 (ifp->if_eflags & IFEF_ARPLL) &&
1476 IN_LINKLOCAL(ntohl(target_ip->sin_addr.s_addr)) &&
1477 sender_ip->sin_addr.s_addr == INADDR_ANY) {
1478 /*
1479 * Verify this ARP probe doesn't conflict with
1480 * an IPv4LL we know of on another interface.
1481 */
1482 if (route != NULL) {
1483 RT_REMREF_LOCKED(route);
1484 RT_UNLOCK(route);
1485 route = NULL;
1486 }
1487 /*
1488 * Callee holds a reference on the route and returns
1489 * with the route entry locked, upon success.
1490 */
1491 error = arp_lookup_route(&target_ip->sin_addr, 0, 0,
1492 &route, ifp->if_index);
1493
1494 if (error != 0 || route == NULL ||
1495 route->rt_gateway == NULL)
1496 goto respond;
1497
1498 RT_LOCK_ASSERT_HELD(route);
1499
1500 gateway = SDL(route->rt_gateway);
1501 if (route->rt_ifp != ifp && gateway->sdl_alen != 0 &&
1502 (gateway->sdl_alen != sender_hw->sdl_alen ||
1503 bcmp(CONST_LLADDR(gateway), CONST_LLADDR(sender_hw),
1504 gateway->sdl_alen) != 0)) {
1505 /*
1506 * A node is probing for an IPv4LL we know
1507 * exists on a different interface. We respond
1508 * with a conflicting probe to force the new
1509 * device to pick a different IPv4LL address.
1510 */
1511 if (arp_verbose || log_arp_warnings) {
1512 log(LOG_INFO, "arp: %s on %s sent "
1513 "probe for %s, already on %s\n",
1514 sdl_addr_to_hex(sender_hw, buf,
1515 sizeof (buf)), if_name(ifp),
1516 inet_ntop(AF_INET,
1517 &target_ip->sin_addr, ipv4str,
1518 sizeof (ipv4str)),
1519 if_name(route->rt_ifp));
1520 log(LOG_INFO, "arp: sending "
1521 "conflicting probe to %s on %s\n",
1522 sdl_addr_to_hex(sender_hw, buf,
1523 sizeof (buf)), if_name(ifp));
1524 }
1525 /* Mark use timestamp */
1526 if (route->rt_llinfo != NULL)
1527 arp_llreach_use(route->rt_llinfo);
1528 /* We're done with the route */
1529 RT_REMREF_LOCKED(route);
1530 RT_UNLOCK(route);
1531 route = NULL;
1532 /*
1533 * Send a conservative unicast "ARP probe".
1534 * This should force the other device to pick
1535 * a new number. This will not force the
1536 * device to pick a new number if the device
1537 * has already assigned that number. This will
1538 * not imply to the device that we own that
1539 * address. The link address is always
1540 * present; it's never freed.
1541 */
1542 ifnet_lock_shared(ifp);
1543 ifa = ifp->if_lladdr;
1544 IFA_ADDREF(ifa);
1545 ifnet_lock_done(ifp);
1546 dlil_send_arp_internal(ifp, ARPOP_REQUEST,
1547 SDL(ifa->ifa_addr),
1548 (const struct sockaddr *)sender_ip,
1549 sender_hw,
1550 (const struct sockaddr *)target_ip);
1551 IFA_REMREF(ifa);
1552 ifa = NULL;
1553 arpstat.txconflicts++;
1554 }
1555 goto respond;
1556 } else if (keep_announcements != 0 &&
1557 target_ip->sin_addr.s_addr == sender_ip->sin_addr.s_addr) {
1558 /*
1559 * Don't create entry if link-local address and
1560 * link-local is disabled
1561 */
1562 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1563 (ifp->if_eflags & IFEF_ARPLL)) {
1564 if (route != NULL) {
1565 RT_REMREF_LOCKED(route);
1566 RT_UNLOCK(route);
1567 route = NULL;
1568 }
1569 /*
1570 * Callee holds a reference on the route and
1571 * returns with the route entry locked, upon
1572 * success.
1573 */
1574 error = arp_lookup_route(&sender_ip->sin_addr,
1575 1, 0, &route, ifp->if_index);
1576
1577 if (error == 0)
1578 RT_LOCK_ASSERT_HELD(route);
1579
1580 if (error == 0 && route != NULL &&
1581 route->rt_gateway != NULL)
1582 created_announcement = 1;
1583 }
1584 if (created_announcement == 0)
1585 goto respond;
1586 } else {
1587 goto respond;
1588 }
1589 }
1590
1591 RT_LOCK_ASSERT_HELD(route);
1592 VERIFY(route->rt_expire == 0 || route->rt_rmx.rmx_expire != 0);
1593 VERIFY(route->rt_expire != 0 || route->rt_rmx.rmx_expire == 0);
1594
1595 gateway = SDL(route->rt_gateway);
1596 if (!bridged && route->rt_ifp != ifp) {
1597 if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) ||
1598 !(ifp->if_eflags & IFEF_ARPLL)) {
1599 if (arp_verbose || log_arp_warnings)
1600 log(LOG_ERR, "arp: %s is on %s but got "
1601 "reply from %s on %s\n",
1602 inet_ntop(AF_INET, &sender_ip->sin_addr,
1603 ipv4str, sizeof (ipv4str)),
1604 if_name(route->rt_ifp),
1605 sdl_addr_to_hex(sender_hw, buf,
1606 sizeof (buf)), if_name(ifp));
1607 goto respond;
1608 } else {
1609 /* Don't change a permanent address */
1610 if (route->rt_expire == 0)
1611 goto respond;
1612
1613 /*
1614 * We're about to check and/or change the route's ifp
1615 * and ifa, so do the lock dance: drop rt_lock, hold
1616 * rnh_lock and re-hold rt_lock to avoid violating the
1617 * lock ordering. We have an extra reference on the
1618 * route, so it won't go away while we do this.
1619 */
1620 RT_UNLOCK(route);
1621 lck_mtx_lock(rnh_lock);
1622 RT_LOCK(route);
1623 /*
1624 * Don't change the cloned route away from the
1625 * parent's interface if the address did resolve
1626 * or if the route is defunct. rt_ifp on both
1627 * the parent and the clone can now be freely
1628 * accessed now that we have acquired rnh_lock.
1629 */
1630 gateway = SDL(route->rt_gateway);
1631 if ((gateway->sdl_alen != 0 &&
1632 route->rt_parent != NULL &&
1633 route->rt_parent->rt_ifp == route->rt_ifp) ||
1634 (route->rt_flags & RTF_CONDEMNED)) {
1635 RT_REMREF_LOCKED(route);
1636 RT_UNLOCK(route);
1637 route = NULL;
1638 lck_mtx_unlock(rnh_lock);
1639 goto respond;
1640 }
1641 if (route->rt_ifp != ifp) {
1642 /*
1643 * Purge any link-layer info caching.
1644 */
1645 if (route->rt_llinfo_purge != NULL)
1646 route->rt_llinfo_purge(route);
1647
1648 /* Adjust route ref count for the interfaces */
1649 if (route->rt_if_ref_fn != NULL) {
1650 route->rt_if_ref_fn(ifp, 1);
1651 route->rt_if_ref_fn(route->rt_ifp, -1);
1652 }
1653 }
1654 /* Change the interface when the existing route is on */
1655 route->rt_ifp = ifp;
1656 /*
1657 * If rmx_mtu is not locked, update it
1658 * to the MTU used by the new interface.
1659 */
1660 if (!(route->rt_rmx.rmx_locks & RTV_MTU))
1661 route->rt_rmx.rmx_mtu = route->rt_ifp->if_mtu;
1662
1663 rtsetifa(route, &best_ia->ia_ifa);
1664 gateway->sdl_index = ifp->if_index;
1665 RT_UNLOCK(route);
1666 lck_mtx_unlock(rnh_lock);
1667 RT_LOCK(route);
1668 /* Don't bother if the route is down */
1669 if (!(route->rt_flags & RTF_UP))
1670 goto respond;
1671 /* Refresh gateway pointer */
1672 gateway = SDL(route->rt_gateway);
1673 }
1674 RT_LOCK_ASSERT_HELD(route);
1675 }
1676
1677 if (gateway->sdl_alen != 0 && bcmp(LLADDR(gateway),
1678 CONST_LLADDR(sender_hw), gateway->sdl_alen) != 0) {
1679 if (route->rt_expire != 0 &&
1680 (arp_verbose || log_arp_warnings)) {
1681 char buf2[3 * MAX_HW_LEN];
1682 log(LOG_INFO, "arp: %s moved from %s to %s on %s\n",
1683 inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str,
1684 sizeof (ipv4str)),
1685 sdl_addr_to_hex(gateway, buf, sizeof (buf)),
1686 sdl_addr_to_hex(sender_hw, buf2, sizeof (buf2)),
1687 if_name(ifp));
1688 } else if (route->rt_expire == 0) {
1689 if (arp_verbose || log_arp_warnings) {
1690 log(LOG_ERR, "arp: %s attempts to modify "
1691 "permanent entry for %s on %s\n",
1692 sdl_addr_to_hex(sender_hw, buf,
1693 sizeof (buf)),
1694 inet_ntop(AF_INET, &sender_ip->sin_addr,
1695 ipv4str, sizeof (ipv4str)),
1696 if_name(ifp));
1697 }
1698 goto respond;
1699 }
1700 }
1701
1702 /* Copy the sender hardware address in to the route's gateway address */
1703 gateway->sdl_alen = sender_hw->sdl_alen;
1704 bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen);
1705
1706 /* Update the expire time for the route and clear the reject flag */
1707 if (route->rt_expire != 0)
1708 rt_setexpire(route, net_uptime() + arpt_keep);
1709 route->rt_flags &= ~RTF_REJECT;
1710
1711 /* cache the gateway (sender HW) address */
1712 arp_llreach_alloc(route, ifp, LLADDR(gateway), gateway->sdl_alen,
1713 (arpop == ARPOP_REPLY));
1714
1715 llinfo = route->rt_llinfo;
1716 /* send a notification that the route is back up */
1717 if (ifp->if_addrlen == IF_LLREACH_MAXLEN &&
1718 route->rt_flags & RTF_ROUTER &&
1719 llinfo->la_flags & LLINFO_RTRFAIL_EVTSENT) {
1720 struct kev_msg ev_msg;
1721 struct kev_in_arpalive in_arpalive;
1722
1723 llinfo->la_flags &= ~LLINFO_RTRFAIL_EVTSENT;
1724 RT_UNLOCK(route);
1725 bzero(&ev_msg, sizeof(ev_msg));
1726 bzero(&in_arpalive, sizeof(in_arpalive));
1727 in_arpalive.link_data.if_family = ifp->if_family;
1728 in_arpalive.link_data.if_unit = ifp->if_unit;
1729 strlcpy(in_arpalive.link_data.if_name, ifp->if_name, IFNAMSIZ);
1730 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1731 ev_msg.kev_class = KEV_NETWORK_CLASS;
1732 ev_msg.kev_subclass = KEV_INET_SUBCLASS;
1733 ev_msg.event_code = KEV_INET_ARPRTRALIVE;
1734 ev_msg.dv[0].data_ptr = &in_arpalive;
1735 ev_msg.dv[0].data_length = sizeof(struct kev_in_arpalive);
1736 kev_post_msg(&ev_msg);
1737 RT_LOCK(route);
1738 }
1739 /* update the llinfo, send a queued packet if there is one */
1740 llinfo->la_asked = 0;
1741 if (llinfo->la_hold) {
1742 struct mbuf *m0 = llinfo->la_hold;
1743 llinfo->la_hold = NULL;
1744 RT_UNLOCK(route);
1745 dlil_output(ifp, PF_INET, m0, (caddr_t)route,
1746 rt_key(route), 0, NULL);
1747 RT_REMREF(route);
1748 route = NULL;
1749 }
1750
1751
1752 respond:
1753 if (route != NULL) {
1754 /* Mark use timestamp if we're going to send a reply */
1755 if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL)
1756 arp_llreach_use(route->rt_llinfo);
1757 RT_REMREF_LOCKED(route);
1758 RT_UNLOCK(route);
1759 route = NULL;
1760 }
1761
1762 if (arpop != ARPOP_REQUEST)
1763 goto done;
1764
1765 arpstat.rxrequests++;
1766
1767 /* If we are not the target, check if we should proxy */
1768 if (target_ip->sin_addr.s_addr != best_ia_sin.sin_addr.s_addr) {
1769 /*
1770 * Find a proxy route; callee holds a reference on the
1771 * route and returns with the route entry locked, upon
1772 * success.
1773 */
1774 error = arp_lookup_route(&target_ip->sin_addr, 0, SIN_PROXY,
1775 &route, ifp->if_index);
1776
1777 if (error == 0) {
1778 RT_LOCK_ASSERT_HELD(route);
1779 /*
1780 * Return proxied ARP replies only on the interface
1781 * or bridge cluster where this network resides.
1782 * Otherwise we may conflict with the host we are
1783 * proxying for.
1784 */
1785 if (route->rt_ifp != ifp &&
1786 (route->rt_ifp->if_bridge != ifp->if_bridge ||
1787 ifp->if_bridge == NULL)) {
1788 RT_REMREF_LOCKED(route);
1789 RT_UNLOCK(route);
1790 goto done;
1791 }
1792 proxied = *SDL(route->rt_gateway);
1793 target_hw = &proxied;
1794 } else {
1795 /*
1796 * We don't have a route entry indicating we should
1797 * use proxy. If we aren't supposed to proxy all,
1798 * we are done.
1799 */
1800 if (!arp_proxyall)
1801 goto done;
1802
1803 /*
1804 * See if we have a route to the target ip before
1805 * we proxy it.
1806 */
1807 route = rtalloc1_scoped((struct sockaddr *)
1808 (size_t)target_ip, 0, 0, ifp->if_index);
1809 if (!route)
1810 goto done;
1811
1812 /*
1813 * Don't proxy for hosts already on the same interface.
1814 */
1815 RT_LOCK(route);
1816 if (route->rt_ifp == ifp) {
1817 RT_UNLOCK(route);
1818 rtfree(route);
1819 goto done;
1820 }
1821 }
1822 /* Mark use timestamp */
1823 if (route->rt_llinfo != NULL)
1824 arp_llreach_use(route->rt_llinfo);
1825 RT_REMREF_LOCKED(route);
1826 RT_UNLOCK(route);
1827 }
1828
1829 dlil_send_arp(ifp, ARPOP_REPLY,
1830 target_hw, (const struct sockaddr *)target_ip,
1831 sender_hw, (const struct sockaddr *)sender_ip, 0);
1832
1833 done:
1834 if (best_ia != NULL)
1835 IFA_REMREF(&best_ia->ia_ifa);
1836 return (0);
1837 }
1838
1839 void
1840 arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
1841 {
1842 struct sockaddr *sa;
1843
1844 IFA_LOCK(ifa);
1845 ifa->ifa_rtrequest = arp_rtrequest;
1846 ifa->ifa_flags |= RTF_CLONING;
1847 sa = ifa->ifa_addr;
1848 IFA_UNLOCK(ifa);
1849 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, sa, NULL, sa, 0);
1850 }
1851
1852 static int
1853 arp_getstat SYSCTL_HANDLER_ARGS
1854 {
1855 #pragma unused(oidp, arg1, arg2)
1856 if (req->oldptr == USER_ADDR_NULL)
1857 req->oldlen = (size_t)sizeof (struct arpstat);
1858
1859 return (SYSCTL_OUT(req, &arpstat, MIN(sizeof (arpstat), req->oldlen)));
1860 }