]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/if_llatbl.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / net / if_llatbl.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 2004 Luigi Rizzo, Alessandro Cerri. All rights reserved.
30 * Copyright (c) 2004-2008 Qing Li. All rights reserved.
31 * Copyright (c) 2008 Kip Macy. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54 #include <sys/cdefs.h>
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/syslog.h>
60 #include <sys/sysctl.h>
61 #include <sys/socket.h>
62 #include <sys/kernel.h>
63 #include <kern/queue.h>
64 #include <kern/locks.h>
65
66 #include <netinet/in.h>
67 #include <net/if_llatbl.h>
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <net/if_var.h>
71 #include <net/dlil.h>
72 #include <net/route.h>
73 #include <netinet/if_ether.h>
74 #include <netinet6/in6_var.h>
75 #include <netinet6/nd6.h>
76
77 MALLOC_DEFINE(M_LLTABLE, "lltable", "link level address tables");
78
79 static SLIST_HEAD(, lltable) lltables = SLIST_HEAD_INITIALIZER(lltables);
80
81 static lck_grp_attr_t *lltable_rwlock_grp_attr;
82 static lck_grp_t *lltable_rwlock_grp;
83 static lck_attr_t *lltable_rwlock_attr;
84
85 static lck_grp_attr_t *lle_lock_grp_attr = NULL;
86 lck_grp_t *lle_lock_grp = NULL;
87 lck_attr_t *lle_lock_attr = NULL;
88
89 decl_lck_rw_data(, lltable_rwlock_data);
90 lck_rw_t *lltable_rwlock = &lltable_rwlock_data;
91
92 #if 0
93 static void lltable_unlink(struct lltable *llt);
94 #endif
95 static void llentries_unlink(struct lltable *llt, struct llentries *head);
96
97 static void htable_unlink_entry(struct llentry *lle);
98 static void htable_link_entry(struct lltable *llt, struct llentry *lle);
99 static int htable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f,
100 void *farg);
101
102 void
103 lltable_glbl_init()
104 {
105 lltable_rwlock_grp_attr = lck_grp_attr_alloc_init();
106 lltable_rwlock_grp = lck_grp_alloc_init("lltable_rwlock",
107 lltable_rwlock_grp_attr);
108 lltable_rwlock_attr = lck_attr_alloc_init();
109 lck_rw_init(lltable_rwlock, lltable_rwlock_grp,
110 lltable_rwlock_attr);
111
112 lle_lock_grp_attr = lck_grp_attr_alloc_init();
113 lle_lock_grp = lck_grp_alloc_init("lle locks", lle_lock_grp_attr);
114 lle_lock_attr = lck_attr_alloc_init();
115 }
116
117 /*
118 * Dump lle state for a specific address family.
119 */
120 static int
121 lltable_dump_af(struct lltable *llt, struct sysctl_req *wr)
122 {
123 int error;
124
125 LLTABLE_LOCK_ASSERT();
126
127 if (llt->llt_ifp->if_flags & IFF_LOOPBACK) {
128 return 0;
129 }
130 error = 0;
131
132 IF_AFDATA_RLOCK(llt->llt_ifp, llt->llt_af);
133 error = lltable_foreach_lle(llt,
134 (llt_foreach_cb_t *)llt->llt_dump_entry, wr);
135 IF_AFDATA_RUNLOCK(llt->llt_ifp, llt->llt_af);
136
137 return error;
138 }
139
140 /*
141 * Dump arp state for a specific address family.
142 */
143 int
144 lltable_sysctl_dumparp(int af, struct sysctl_req *wr)
145 {
146 struct lltable *llt = NULL;
147 int error = 0;
148
149 LLTABLE_RLOCK();
150 SLIST_FOREACH(llt, &lltables, llt_link) {
151 if (llt->llt_af == af) {
152 error = lltable_dump_af(llt, wr);
153 if (error != 0) {
154 goto done;
155 }
156 }
157 }
158 done:
159 LLTABLE_RUNLOCK();
160 return error;
161 }
162
163 /*
164 * Common function helpers for chained hash table.
165 */
166
167 /*
168 * Runs specified callback for each entry in @llt.
169 * Caller does the locking.
170 *
171 */
172 static int
173 htable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg)
174 {
175 struct llentry *lle, *next;
176 int i, error;
177
178 error = 0;
179
180 for (i = 0; i < llt->llt_hsize; i++) {
181 LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) {
182 error = f(llt, lle, farg);
183 if (error != 0) {
184 break;
185 }
186 }
187 }
188
189 return error;
190 }
191
192 static void
193 htable_link_entry(struct lltable *llt, struct llentry *lle)
194 {
195 struct llentries *lleh;
196 uint32_t hashidx;
197
198 if ((lle->la_flags & LLE_LINKED) != 0) {
199 return;
200 }
201
202 IF_AFDATA_WLOCK_ASSERT(llt->llt_ifp, llt->llt_af);
203
204 hashidx = llt->llt_hash(lle, llt->llt_hsize);
205 lleh = &llt->lle_head[hashidx];
206
207 lle->lle_tbl = llt;
208 lle->lle_head = lleh;
209 lle->la_flags |= LLE_LINKED;
210 LIST_INSERT_HEAD(lleh, lle, lle_next);
211 }
212
213 static void
214 htable_unlink_entry(struct llentry *lle)
215 {
216 if ((lle->la_flags & LLE_LINKED) != 0) {
217 IF_AFDATA_WLOCK_ASSERT(lle->lle_tbl->llt_ifp, lle->lle_tbl->llt_af);
218 LIST_REMOVE(lle, lle_next);
219 lle->la_flags &= ~(LLE_VALID | LLE_LINKED);
220 #if 0
221 lle->lle_tbl = NULL;
222 lle->lle_head = NULL;
223 #endif
224 }
225 }
226
227 struct prefix_match_data {
228 const struct sockaddr *addr;
229 const struct sockaddr *mask;
230 struct llentries dchain;
231 u_int flags;
232 };
233
234 static int
235 htable_prefix_free_cb(struct lltable *llt, struct llentry *lle, void *farg)
236 {
237 struct prefix_match_data *pmd;
238
239 pmd = (struct prefix_match_data *)farg;
240
241 if (llt->llt_match_prefix(pmd->addr, pmd->mask, pmd->flags, lle)) {
242 LLE_WLOCK(lle);
243 LIST_INSERT_HEAD(&pmd->dchain, lle, lle_chain);
244 }
245
246 return 0;
247 }
248
249 static void
250 htable_prefix_free(struct lltable *llt, const struct sockaddr *addr,
251 const struct sockaddr *mask, u_int flags)
252 {
253 struct llentry *lle, *next;
254 struct prefix_match_data pmd;
255
256 bzero(&pmd, sizeof(pmd));
257 pmd.addr = addr;
258 pmd.mask = mask;
259 pmd.flags = flags;
260 LIST_INIT(&pmd.dchain);
261
262 IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af);
263 /* Push matching lles to chain */
264 lltable_foreach_lle(llt, htable_prefix_free_cb, &pmd);
265
266 llentries_unlink(llt, &pmd.dchain);
267 IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af);
268
269 LIST_FOREACH_SAFE(lle, &pmd.dchain, lle_chain, next)
270 lltable_free_entry(llt, lle);
271 }
272
273 static void
274 htable_free_tbl(struct lltable *llt)
275 {
276 FREE(llt->lle_head, M_LLTABLE);
277 FREE(llt, M_LLTABLE);
278 }
279
280 static void
281 llentries_unlink(struct lltable *llt, struct llentries *head)
282 {
283 struct llentry *lle, *next;
284
285 LIST_FOREACH_SAFE(lle, head, lle_chain, next)
286 llt->llt_unlink_entry(lle);
287 }
288
289 /*
290 * Helper function used to drop all mbufs in hold queue.
291 *
292 * Returns the number of held packets, if any, that were dropped.
293 */
294 size_t
295 lltable_drop_entry_queue(struct llentry *lle)
296 {
297 size_t pkts_dropped;
298 struct mbuf *next;
299
300 LLE_WLOCK_ASSERT(lle);
301
302 pkts_dropped = 0;
303 while ((lle->la_numheld > 0) && (lle->la_hold != NULL)) {
304 next = lle->la_hold->m_nextpkt;
305 m_freem(lle->la_hold);
306 lle->la_hold = next;
307 lle->la_numheld--;
308 pkts_dropped++;
309 }
310
311 KASSERT(lle->la_numheld == 0,
312 ("%s: la_numheld %d > 0, pkts_droped %zd", __func__,
313 lle->la_numheld, pkts_dropped));
314
315 return pkts_dropped;
316 }
317
318 void
319 lltable_set_entry_addr(struct ifnet *ifp, struct llentry *lle,
320 const char *ll_addr)
321 {
322 bcopy(ll_addr, &lle->ll_addr, ifp->if_addrlen);
323 lle->la_flags |= LLE_VALID;
324 lle->r_flags |= RLLE_VALID;
325 }
326
327 #if 0
328 /*
329 * XXX The following is related to a change to cache destination layer 2
330 * header cached in the entry instead of just the destination mac address
331 * Right now leaving this code out and just storing the destination's mac
332 * information.
333 */
334 /*
335 * Tries to update @lle link-level address.
336 * Since update requires AFDATA WLOCK, function
337 * drops @lle lock, acquires AFDATA lock and then acquires
338 * @lle lock to maintain lock order.
339 *
340 * Returns 1 on success.
341 */
342 int
343 lltable_try_set_entry_addr(struct ifnet *ifp, struct llentry *lle,
344 const char *linkhdr, size_t linkhdrsize, int lladdr_off)
345 {
346 /* Perform real LLE update */
347 /* use afdata WLOCK to update fields */
348 LLE_WLOCK_ASSERT(lle);
349 LLE_ADDREF(lle);
350 LLE_WUNLOCK(lle);
351 IF_AFDATA_WLOCK(ifp, lle->lle_tbl->llt_af);
352 LLE_WLOCK(lle);
353
354 /*
355 * Since we droppped LLE lock, other thread might have deleted
356 * this lle. Check and return
357 */
358 if ((lle->la_flags & LLE_DELETED) != 0) {
359 IF_AFDATA_WUNLOCK(ifp, lle->lle_tbl->llt_af);
360 LLE_FREE_LOCKED(lle);
361 return 0;
362 }
363
364 /* Update data */
365 lltable_set_entry_addr(ifp, lle, linkhdr, linkhdrsize, lladdr_off);
366
367 IF_AFDATA_WUNLOCK(ifp, lle->lle_tbl->llt_af);
368
369 LLE_REMREF(lle);
370
371 return 1;
372 }
373
374 /*
375 * Helper function used to pre-compute full/partial link-layer
376 * header data suitable for feeding into if_output().
377 */
378 int
379 lltable_calc_llheader(struct ifnet *ifp, int family, char *lladdr,
380 char *buf, size_t *bufsize, int *lladdr_off)
381 {
382 struct if_encap_req ereq;
383 int error;
384
385 bzero(buf, *bufsize);
386 bzero(&ereq, sizeof(ereq));
387 ereq.buf = buf;
388 ereq.bufsize = *bufsize;
389 ereq.rtype = IFENCAP_LL;
390 ereq.family = family;
391 ereq.lladdr = lladdr;
392 ereq.lladdr_len = ifp->if_addrlen;
393 error = ifp->if_requestencap(ifp, &ereq);
394 if (error == 0) {
395 *bufsize = ereq.bufsize;
396 *lladdr_off = ereq.lladdr_off;
397 }
398
399 return error;
400 }
401
402 /*
403 * Update link-layer header for given @lle after
404 * interface lladdr was changed.
405 */
406 static int
407 llentry_update_ifaddr(struct lltable *llt, struct llentry *lle, void *farg)
408 {
409 struct ifnet *ifp;
410 u_char linkhdr[LLE_MAX_LINKHDR];
411 size_t linkhdrsize;
412 u_char *lladdr;
413 int lladdr_off;
414
415 ifp = (struct ifnet *)farg;
416
417 lladdr = (void *)lle->ll_addr;
418
419 LLE_WLOCK(lle);
420 if ((lle->la_flags & LLE_VALID) == 0) {
421 LLE_WUNLOCK(lle);
422 return 0;
423 }
424
425 if ((lle->la_flags & LLE_IFADDR) != 0) {
426 lladdr = (void *)IF_LLADDR(ifp);
427 }
428
429 linkhdrsize = sizeof(linkhdr);
430 lltable_calc_llheader(ifp, llt->llt_af, (void *)lladdr, (void *)linkhdr, &linkhdrsize,
431 &lladdr_off);
432 memcpy(lle->r_linkdata, linkhdr, linkhdrsize);
433 LLE_WUNLOCK(lle);
434
435 return 0;
436 }
437
438 /*
439 * Update all calculated headers for given @llt
440 */
441 void
442 lltable_update_ifaddr(struct lltable *llt)
443 {
444 if (llt->llt_ifp->if_flags & IFF_LOOPBACK) {
445 return;
446 }
447
448 IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af);
449 lltable_foreach_lle(llt, llentry_update_ifaddr, llt->llt_ifp);
450 IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af);
451 }
452 #endif
453
454 /*
455 *
456 * Performs generic cleanup routines and frees lle.
457 *
458 * Called for non-linked entries, with callouts and
459 * other AF-specific cleanups performed.
460 *
461 * @lle must be passed WLOCK'ed
462 *
463 * Returns the number of held packets, if any, that were dropped.
464 */
465 size_t
466 llentry_free(struct llentry *lle)
467 {
468 size_t pkts_dropped;
469
470 LLE_WLOCK_ASSERT(lle);
471
472 KASSERT((lle->la_flags & LLE_LINKED) == 0, ("freeing linked lle"));
473
474 pkts_dropped = lltable_drop_entry_queue(lle);
475
476 LLE_FREE_LOCKED(lle);
477
478 return pkts_dropped;
479 }
480
481 /*
482 * (al)locate an llentry for address dst (equivalent to rtalloc for new-arp).
483 *
484 * If found the llentry * is returned referenced and unlocked.
485 */
486 struct llentry *
487 llentry_alloc(struct ifnet *ifp, struct lltable *lt,
488 struct sockaddr_storage *dst)
489 {
490 struct llentry *la, *la_tmp;
491
492 IF_AFDATA_RLOCK(ifp, lt->llt_af);
493 la = lla_lookup(lt, LLE_EXCLUSIVE, (struct sockaddr *)dst);
494 IF_AFDATA_RUNLOCK(ifp, lt->llt_af);
495
496 if (la != NULL) {
497 LLE_ADDREF(la);
498 LLE_WUNLOCK(la);
499 return la;
500 }
501
502 if ((ifp->if_flags & IFF_NOARP) == 0) {
503 la = lltable_alloc_entry(lt, 0, (struct sockaddr *)dst);
504 if (la == NULL) {
505 return NULL;
506 }
507 IF_AFDATA_WLOCK(ifp, lt->llt_af);
508 LLE_WLOCK(la);
509 /* Prefer any existing LLE over newly-created one */
510 la_tmp = lla_lookup(lt, LLE_EXCLUSIVE, (struct sockaddr *)dst);
511 if (la_tmp == NULL) {
512 lltable_link_entry(lt, la);
513 }
514 IF_AFDATA_WUNLOCK(ifp, lt->llt_af);
515 if (la_tmp != NULL) {
516 lltable_free_entry(lt, la);
517 la = la_tmp;
518 }
519 LLE_ADDREF(la);
520 LLE_WUNLOCK(la);
521 }
522
523 return la;
524 }
525
526 /*
527 * Free all entries from given table and free itself.
528 */
529
530 static int
531 lltable_free_cb(struct lltable *llt, struct llentry *lle, void *farg)
532 {
533 #pragma unused(llt)
534 struct llentries *dchain;
535
536 dchain = (struct llentries *)farg;
537
538 LLE_WLOCK(lle);
539 LIST_INSERT_HEAD(dchain, lle, lle_chain);
540
541 return 0;
542 }
543
544 /*
545 * Free all entries from given table and free itself.
546 */
547 void
548 lltable_free(struct lltable *llt)
549 {
550 struct llentry *lle, *next;
551 struct llentries dchain;
552
553 KASSERT(llt != NULL, ("%s: llt is NULL", __func__));
554
555 //lltable_unlink(llt);
556
557 LIST_INIT(&dchain);
558 IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af);
559 /* Push all lles to @dchain */
560 lltable_foreach_lle(llt, lltable_free_cb, &dchain);
561 llentries_unlink(llt, &dchain);
562 IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af);
563
564 LIST_FOREACH_SAFE(lle, &dchain, lle_chain, next) {
565 #if 0
566 if (thread_call_cancel(lle->lle_timer) == TRUE) {
567 LLE_REMREF(lle);
568 }
569 #endif
570 llentry_free(lle);
571 }
572
573 /* XXX We recycle network interfaces so we only purge */
574 /* llt->llt_free_tbl(llt); */
575 }
576
577 #if 0
578 void
579 lltable_drain(int af)
580 {
581 struct lltable *llt;
582 struct llentry *lle;
583 register int i;
584
585 LLTABLE_RLOCK();
586 SLIST_FOREACH(llt, &lltables, llt_link) {
587 if (llt->llt_af != af) {
588 continue;
589 }
590
591 for (i = 0; i < llt->llt_hsize; i++) {
592 LIST_FOREACH(lle, &llt->lle_head[i], lle_next) {
593 LLE_WLOCK(lle);
594 if (lle->la_hold) {
595 m_freem(lle->la_hold);
596 lle->la_hold = NULL;
597 }
598 LLE_WUNLOCK(lle);
599 }
600 }
601 }
602 LLTABLE_RUNLOCK();
603 }
604 #endif
605
606 /*
607 * Deletes an address from given lltable.
608 * Used for userland interaction to remove
609 * individual entries. Skips entries added by OS.
610 */
611 int
612 lltable_delete_addr(struct lltable *llt, u_int flags,
613 const struct sockaddr *l3addr)
614 {
615 struct llentry *lle;
616 struct ifnet *ifp;
617
618 ifp = llt->llt_ifp;
619 IF_AFDATA_WLOCK(ifp, llt->llt_af);
620 lle = lla_lookup(llt, LLE_EXCLUSIVE, l3addr);
621
622 if (lle == NULL) {
623 IF_AFDATA_WUNLOCK(ifp, llt->llt_af);
624 return ENOENT;
625 }
626 if ((lle->la_flags & LLE_IFADDR) != 0 && (flags & LLE_IFADDR) == 0) {
627 IF_AFDATA_WUNLOCK(ifp, llt->llt_af);
628 LLE_WUNLOCK(lle);
629 return EPERM;
630 }
631
632 lltable_unlink_entry(llt, lle);
633 IF_AFDATA_WUNLOCK(ifp, llt->llt_af);
634
635 llt->llt_delete_entry(llt, lle);
636
637 return 0;
638 }
639
640 void
641 lltable_prefix_free(int af, struct sockaddr *addr, struct sockaddr *mask,
642 u_int flags)
643 {
644 struct lltable *llt;
645
646 LLTABLE_RLOCK();
647 SLIST_FOREACH(llt, &lltables, llt_link) {
648 if (llt->llt_af != af) {
649 continue;
650 }
651
652 llt->llt_prefix_free(llt, addr, mask, flags);
653 }
654 LLTABLE_RUNLOCK();
655 }
656
657 struct lltable *
658 lltable_allocate_htbl(uint32_t hsize)
659 {
660 struct lltable *llt;
661 int i;
662
663 MALLOC(llt, struct lltable *, sizeof(struct lltable), M_LLTABLE, M_WAITOK | M_ZERO);
664 llt->llt_hsize = hsize;
665 MALLOC(llt->lle_head, struct llentries *, sizeof(struct llentries) * hsize,
666 M_LLTABLE, M_WAITOK | M_ZERO);
667
668 for (i = 0; i < llt->llt_hsize; i++) {
669 LIST_INIT(&llt->lle_head[i]);
670 }
671
672 /* Set some default callbacks */
673 llt->llt_link_entry = htable_link_entry;
674 llt->llt_unlink_entry = htable_unlink_entry;
675 llt->llt_prefix_free = htable_prefix_free;
676 llt->llt_foreach_entry = htable_foreach_lle;
677 llt->llt_free_tbl = htable_free_tbl;
678
679 return llt;
680 }
681
682 /*
683 * Links lltable to global llt list.
684 */
685 void
686 lltable_link(struct lltable *llt)
687 {
688 LLTABLE_WLOCK();
689 SLIST_INSERT_HEAD(&lltables, llt, llt_link);
690 LLTABLE_WUNLOCK();
691 }
692
693 #if 0
694 static void
695 lltable_unlink(struct lltable *llt)
696 {
697 LLTABLE_WLOCK();
698 SLIST_REMOVE(&lltables, llt, lltable, llt_link);
699 LLTABLE_WUNLOCK();
700 }
701 #endif
702
703 /*
704 * External methods used by lltable consumers
705 */
706
707 int
708 lltable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg)
709 {
710 return llt->llt_foreach_entry(llt, f, farg);
711 }
712
713 struct llentry *
714 lltable_alloc_entry(struct lltable *llt, u_int flags,
715 const struct sockaddr *l3addr)
716 {
717 return llt->llt_alloc_entry(llt, flags, l3addr);
718 }
719
720 void
721 lltable_free_entry(struct lltable *llt, struct llentry *lle)
722 {
723 llt->llt_free_entry(llt, lle);
724 }
725
726 void
727 lltable_link_entry(struct lltable *llt, struct llentry *lle)
728 {
729 llt->llt_link_entry(llt, lle);
730 }
731
732 void
733 lltable_unlink_entry(struct lltable *llt, struct llentry *lle)
734 {
735 llt->llt_unlink_entry(lle);
736 }
737
738 void
739 lltable_fill_sa_entry(const struct llentry *lle, struct sockaddr *sa)
740 {
741 struct lltable *llt;
742
743 llt = lle->lle_tbl;
744 llt->llt_fill_sa_entry(lle, sa);
745 }
746
747 struct ifnet *
748 lltable_get_ifp(const struct lltable *llt)
749 {
750 return llt->llt_ifp;
751 }
752
753 int
754 lltable_get_af(const struct lltable *llt)
755 {
756 return llt->llt_af;
757 }
758
759 #define ifnet_byindex(index) ifindex2ifnet[(index)]
760
761 /*
762 * Called in route_output when rtm_flags contains RTF_LLDATA.
763 */
764 int
765 lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info)
766 {
767 struct sockaddr_dl *dl =
768 (struct sockaddr_dl *)(void *)info->rti_info[RTAX_GATEWAY];
769 struct sockaddr *dst = (struct sockaddr *)info->rti_info[RTAX_DST];
770 struct ifnet *ifp;
771 struct lltable *llt;
772 struct llentry *lle, *lle_tmp;
773 u_int laflags = 0;
774 int error;
775
776 KASSERT(dl != NULL && dl->sdl_family == AF_LINK,
777 ("%s: invalid dl\n", __func__));
778
779 ifp = ifnet_byindex(dl->sdl_index);
780 if (ifp == NULL) {
781 log(LOG_INFO, "%s: invalid ifp (sdl_index %d)\n",
782 __func__, dl->sdl_index);
783 return EINVAL;
784 }
785
786 /* XXX linked list may be too expensive */
787 LLTABLE_RLOCK();
788 SLIST_FOREACH(llt, &lltables, llt_link) {
789 if (llt->llt_af == dst->sa_family &&
790 llt->llt_ifp == ifp) {
791 break;
792 }
793 }
794 LLTABLE_RUNLOCK();
795 KASSERT(llt != NULL, ("Yep, ugly hacks are bad\n"));
796
797 error = 0;
798
799 switch (rtm->rtm_type) {
800 case RTM_ADD:
801 /* Add static LLE */
802 laflags = 0;
803 if (rtm->rtm_rmx.rmx_expire == 0) {
804 laflags = LLE_STATIC;
805 }
806 lle = lltable_alloc_entry(llt, laflags, dst);
807 if (lle == NULL) {
808 return ENOMEM;
809 }
810 #if 0
811 linkhdrsize = sizeof(linkhdr);
812 if (lltable_calc_llheader(ifp, dst->sa_family, LLADDR(dl),
813 (void *)linkhdr, &linkhdrsize, &lladdr_off) != 0) {
814 return EINVAL;
815 }
816 #endif
817 lltable_set_entry_addr(ifp, lle, LLADDR(dl));
818
819 if (rtm->rtm_flags & RTF_ANNOUNCE) {
820 lle->la_flags |= LLE_PUB;
821 }
822 lle->la_expire = rtm->rtm_rmx.rmx_expire;
823
824 laflags = lle->la_flags;
825
826 /* Try to link new entry */
827 lle_tmp = NULL;
828 IF_AFDATA_WLOCK(ifp, llt->llt_af);
829 LLE_WLOCK(lle);
830 lle_tmp = lla_lookup(llt, LLE_EXCLUSIVE, dst);
831 if (lle_tmp != NULL) {
832 /* Check if we are trying to replace immutable entry */
833 if ((lle_tmp->la_flags & LLE_IFADDR) != 0) {
834 IF_AFDATA_WUNLOCK(ifp, llt->llt_af);
835 LLE_WUNLOCK(lle_tmp);
836 lltable_free_entry(llt, lle);
837 return EPERM;
838 }
839 /* Unlink existing entry from table */
840 lltable_unlink_entry(llt, lle_tmp);
841 }
842 lltable_link_entry(llt, lle);
843 IF_AFDATA_WUNLOCK(ifp, llt->llt_af);
844
845 if (lle_tmp != NULL) {
846 EVENTHANDLER_INVOKE(NULL, lle_event, lle_tmp, LLENTRY_EXPIRED);
847 lltable_free_entry(llt, lle_tmp);
848 }
849
850 /*
851 * By invoking LLE handler here we might get
852 * two events on static LLE entry insertion
853 * in routing socket. However, since we might have
854 * other subscribers we need to generate this event.
855 */
856 EVENTHANDLER_INVOKE(NULL, lle_event, lle, LLENTRY_RESOLVED);
857 LLE_WUNLOCK(lle);
858 #ifdef INET
859 /* gratuitous ARP */
860 if ((laflags & LLE_PUB) && dst->sa_family == AF_INET) {
861 dlil_send_arp(ifp, ARPOP_REQUEST, NULL, dst, NULL, dst, 0);
862 }
863 #endif
864
865 break;
866
867 case RTM_DELETE:
868 return lltable_delete_addr(llt, 0, dst);
869
870 default:
871 error = EINVAL;
872 }
873
874 return error;
875 }