]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * Copyright (c) 2004 Luigi Rizzo, Alessandro Cerri. All rights reserved. | |
30 | * Copyright (c) 2004-2008 Qing Li. All rights reserved. | |
31 | * Copyright (c) 2008 Kip Macy. All rights reserved. | |
32 | * | |
33 | * Redistribution and use in source and binary forms, with or without | |
34 | * modification, are permitted provided that the following conditions | |
35 | * are met: | |
36 | * 1. Redistributions of source code must retain the above copyright | |
37 | * notice, this list of conditions and the following disclaimer. | |
38 | * 2. Redistributions in binary form must reproduce the above copyright | |
39 | * notice, this list of conditions and the following disclaimer in the | |
40 | * documentation and/or other materials provided with the distribution. | |
41 | * | |
42 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |
43 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
45 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE | |
46 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
47 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
48 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
49 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
51 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
52 | * SUCH DAMAGE. | |
53 | */ | |
54 | #include <sys/cdefs.h> | |
55 | #include <sys/param.h> | |
56 | #include <sys/systm.h> | |
57 | #include <sys/malloc.h> | |
58 | #include <sys/mbuf.h> | |
59 | #include <sys/syslog.h> | |
60 | #include <sys/sysctl.h> | |
61 | #include <sys/socket.h> | |
62 | #include <sys/kernel.h> | |
63 | #include <kern/queue.h> | |
64 | #include <kern/locks.h> | |
65 | ||
66 | #include <netinet/in.h> | |
67 | #include <net/if_llatbl.h> | |
68 | #include <net/if.h> | |
69 | #include <net/if_dl.h> | |
70 | #include <net/if_var.h> | |
71 | #include <net/dlil.h> | |
72 | #include <net/route.h> | |
73 | #include <netinet/if_ether.h> | |
74 | #include <netinet6/in6_var.h> | |
75 | #include <netinet6/nd6.h> | |
76 | ||
77 | MALLOC_DEFINE(M_LLTABLE, "lltable", "link level address tables"); | |
78 | ||
79 | static SLIST_HEAD(, lltable) lltables = SLIST_HEAD_INITIALIZER(lltables); | |
80 | ||
81 | static lck_grp_attr_t *lltable_rwlock_grp_attr; | |
82 | static lck_grp_t *lltable_rwlock_grp; | |
83 | static lck_attr_t *lltable_rwlock_attr; | |
84 | ||
85 | static lck_grp_attr_t *lle_lock_grp_attr = NULL; | |
86 | lck_grp_t *lle_lock_grp = NULL; | |
87 | lck_attr_t *lle_lock_attr = NULL; | |
88 | ||
89 | decl_lck_rw_data(, lltable_rwlock_data); | |
90 | lck_rw_t *lltable_rwlock = &lltable_rwlock_data; | |
91 | ||
92 | #if 0 | |
93 | static void lltable_unlink(struct lltable *llt); | |
94 | #endif | |
95 | static void llentries_unlink(struct lltable *llt, struct llentries *head); | |
96 | ||
97 | static void htable_unlink_entry(struct llentry *lle); | |
98 | static void htable_link_entry(struct lltable *llt, struct llentry *lle); | |
99 | static int htable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, | |
100 | void *farg); | |
101 | ||
102 | void | |
103 | lltable_glbl_init() | |
104 | { | |
105 | lltable_rwlock_grp_attr = lck_grp_attr_alloc_init(); | |
106 | lltable_rwlock_grp = lck_grp_alloc_init("lltable_rwlock", | |
107 | lltable_rwlock_grp_attr); | |
108 | lltable_rwlock_attr = lck_attr_alloc_init(); | |
109 | lck_rw_init(lltable_rwlock, lltable_rwlock_grp, | |
110 | lltable_rwlock_attr); | |
111 | ||
112 | lle_lock_grp_attr = lck_grp_attr_alloc_init(); | |
113 | lle_lock_grp = lck_grp_alloc_init("lle locks", lle_lock_grp_attr); | |
114 | lle_lock_attr = lck_attr_alloc_init(); | |
115 | } | |
116 | ||
117 | /* | |
118 | * Dump lle state for a specific address family. | |
119 | */ | |
120 | static int | |
121 | lltable_dump_af(struct lltable *llt, struct sysctl_req *wr) | |
122 | { | |
123 | int error; | |
124 | ||
125 | LLTABLE_LOCK_ASSERT(); | |
126 | ||
127 | if (llt->llt_ifp->if_flags & IFF_LOOPBACK) | |
128 | return (0); | |
129 | error = 0; | |
130 | ||
131 | IF_AFDATA_RLOCK(llt->llt_ifp, llt->llt_af); | |
132 | error = lltable_foreach_lle(llt, | |
133 | (llt_foreach_cb_t *)llt->llt_dump_entry, wr); | |
134 | IF_AFDATA_RUNLOCK(llt->llt_ifp, llt->llt_af); | |
135 | ||
136 | return (error); | |
137 | } | |
138 | ||
139 | /* | |
140 | * Dump arp state for a specific address family. | |
141 | */ | |
142 | int | |
143 | lltable_sysctl_dumparp(int af, struct sysctl_req *wr) | |
144 | { | |
145 | struct lltable *llt = NULL; | |
146 | int error = 0; | |
147 | ||
148 | LLTABLE_RLOCK(); | |
149 | SLIST_FOREACH(llt, &lltables, llt_link) { | |
150 | if (llt->llt_af == af) { | |
151 | error = lltable_dump_af(llt, wr); | |
152 | if (error != 0) | |
153 | goto done; | |
154 | } | |
155 | } | |
156 | done: | |
157 | LLTABLE_RUNLOCK(); | |
158 | return (error); | |
159 | } | |
160 | ||
161 | /* | |
162 | * Common function helpers for chained hash table. | |
163 | */ | |
164 | ||
165 | /* | |
166 | * Runs specified callback for each entry in @llt. | |
167 | * Caller does the locking. | |
168 | * | |
169 | */ | |
170 | static int | |
171 | htable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg) | |
172 | { | |
173 | struct llentry *lle, *next; | |
174 | int i, error; | |
175 | ||
176 | error = 0; | |
177 | ||
178 | for (i = 0; i < llt->llt_hsize; i++) { | |
179 | LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) { | |
180 | error = f(llt, lle, farg); | |
181 | if (error != 0) | |
182 | break; | |
183 | } | |
184 | } | |
185 | ||
186 | return (error); | |
187 | } | |
188 | ||
189 | static void | |
190 | htable_link_entry(struct lltable *llt, struct llentry *lle) | |
191 | { | |
192 | struct llentries *lleh; | |
193 | uint32_t hashidx; | |
194 | ||
195 | if ((lle->la_flags & LLE_LINKED) != 0) | |
196 | return; | |
197 | ||
198 | IF_AFDATA_WLOCK_ASSERT(llt->llt_ifp, llt->llt_af); | |
199 | ||
200 | hashidx = llt->llt_hash(lle, llt->llt_hsize); | |
201 | lleh = &llt->lle_head[hashidx]; | |
202 | ||
203 | lle->lle_tbl = llt; | |
204 | lle->lle_head = lleh; | |
205 | lle->la_flags |= LLE_LINKED; | |
206 | LIST_INSERT_HEAD(lleh, lle, lle_next); | |
207 | } | |
208 | ||
209 | static void | |
210 | htable_unlink_entry(struct llentry *lle) | |
211 | { | |
212 | if ((lle->la_flags & LLE_LINKED) != 0) { | |
213 | IF_AFDATA_WLOCK_ASSERT(lle->lle_tbl->llt_ifp, lle->lle_tbl->llt_af); | |
214 | LIST_REMOVE(lle, lle_next); | |
215 | lle->la_flags &= ~(LLE_VALID | LLE_LINKED); | |
216 | #if 0 | |
217 | lle->lle_tbl = NULL; | |
218 | lle->lle_head = NULL; | |
219 | #endif | |
220 | } | |
221 | } | |
222 | ||
223 | struct prefix_match_data { | |
224 | const struct sockaddr *addr; | |
225 | const struct sockaddr *mask; | |
226 | struct llentries dchain; | |
227 | u_int flags; | |
228 | }; | |
229 | ||
230 | static int | |
231 | htable_prefix_free_cb(struct lltable *llt, struct llentry *lle, void *farg) | |
232 | { | |
233 | struct prefix_match_data *pmd; | |
234 | ||
235 | pmd = (struct prefix_match_data *)farg; | |
236 | ||
237 | if (llt->llt_match_prefix(pmd->addr, pmd->mask, pmd->flags, lle)) { | |
238 | LLE_WLOCK(lle); | |
239 | LIST_INSERT_HEAD(&pmd->dchain, lle, lle_chain); | |
240 | } | |
241 | ||
242 | return (0); | |
243 | } | |
244 | ||
245 | static void | |
246 | htable_prefix_free(struct lltable *llt, const struct sockaddr *addr, | |
247 | const struct sockaddr *mask, u_int flags) | |
248 | { | |
249 | struct llentry *lle, *next; | |
250 | struct prefix_match_data pmd; | |
251 | ||
252 | bzero(&pmd, sizeof(pmd)); | |
253 | pmd.addr = addr; | |
254 | pmd.mask = mask; | |
255 | pmd.flags = flags; | |
256 | LIST_INIT(&pmd.dchain); | |
257 | ||
258 | IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af); | |
259 | /* Push matching lles to chain */ | |
260 | lltable_foreach_lle(llt, htable_prefix_free_cb, &pmd); | |
261 | ||
262 | llentries_unlink(llt, &pmd.dchain); | |
263 | IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af); | |
264 | ||
265 | LIST_FOREACH_SAFE(lle, &pmd.dchain, lle_chain, next) | |
266 | lltable_free_entry(llt, lle); | |
267 | } | |
268 | ||
269 | static void | |
270 | htable_free_tbl(struct lltable *llt) | |
271 | { | |
272 | ||
273 | FREE(llt->lle_head, M_LLTABLE); | |
274 | FREE(llt, M_LLTABLE); | |
275 | } | |
276 | ||
277 | static void | |
278 | llentries_unlink(struct lltable *llt, struct llentries *head) | |
279 | { | |
280 | struct llentry *lle, *next; | |
281 | ||
282 | LIST_FOREACH_SAFE(lle, head, lle_chain, next) | |
283 | llt->llt_unlink_entry(lle); | |
284 | } | |
285 | ||
286 | /* | |
287 | * Helper function used to drop all mbufs in hold queue. | |
288 | * | |
289 | * Returns the number of held packets, if any, that were dropped. | |
290 | */ | |
291 | size_t | |
292 | lltable_drop_entry_queue(struct llentry *lle) | |
293 | { | |
294 | size_t pkts_dropped; | |
295 | struct mbuf *next; | |
296 | ||
297 | LLE_WLOCK_ASSERT(lle); | |
298 | ||
299 | pkts_dropped = 0; | |
300 | while ((lle->la_numheld > 0) && (lle->la_hold != NULL)) { | |
301 | next = lle->la_hold->m_nextpkt; | |
302 | m_freem(lle->la_hold); | |
303 | lle->la_hold = next; | |
304 | lle->la_numheld--; | |
305 | pkts_dropped++; | |
306 | } | |
307 | ||
308 | KASSERT(lle->la_numheld == 0, | |
309 | ("%s: la_numheld %d > 0, pkts_droped %zd", __func__, | |
310 | lle->la_numheld, pkts_dropped)); | |
311 | ||
312 | return (pkts_dropped); | |
313 | } | |
314 | ||
315 | void | |
316 | lltable_set_entry_addr(struct ifnet *ifp, struct llentry *lle, | |
317 | const char *ll_addr) | |
318 | { | |
319 | bcopy(ll_addr, &lle->ll_addr, ifp->if_addrlen); | |
320 | lle->la_flags |= LLE_VALID; | |
321 | lle->r_flags |= RLLE_VALID; | |
322 | } | |
323 | ||
324 | #if 0 | |
325 | /* | |
326 | * XXX The following is related to a change to cache destination layer 2 | |
327 | * header cached in the entry instead of just the destination mac address | |
328 | * Right now leaving this code out and just storing the destination's mac | |
329 | * information. | |
330 | */ | |
331 | /* | |
332 | * Tries to update @lle link-level address. | |
333 | * Since update requires AFDATA WLOCK, function | |
334 | * drops @lle lock, acquires AFDATA lock and then acquires | |
335 | * @lle lock to maintain lock order. | |
336 | * | |
337 | * Returns 1 on success. | |
338 | */ | |
339 | int | |
340 | lltable_try_set_entry_addr(struct ifnet *ifp, struct llentry *lle, | |
341 | const char *linkhdr, size_t linkhdrsize, int lladdr_off) | |
342 | { | |
343 | /* Perform real LLE update */ | |
344 | /* use afdata WLOCK to update fields */ | |
345 | LLE_WLOCK_ASSERT(lle); | |
346 | LLE_ADDREF(lle); | |
347 | LLE_WUNLOCK(lle); | |
348 | IF_AFDATA_WLOCK(ifp, lle->lle_tbl->llt_af); | |
349 | LLE_WLOCK(lle); | |
350 | ||
351 | /* | |
352 | * Since we droppped LLE lock, other thread might have deleted | |
353 | * this lle. Check and return | |
354 | */ | |
355 | if ((lle->la_flags & LLE_DELETED) != 0) { | |
356 | IF_AFDATA_WUNLOCK(ifp, lle->lle_tbl->llt_af); | |
357 | LLE_FREE_LOCKED(lle); | |
358 | return (0); | |
359 | } | |
360 | ||
361 | /* Update data */ | |
362 | lltable_set_entry_addr(ifp, lle, linkhdr, linkhdrsize, lladdr_off); | |
363 | ||
364 | IF_AFDATA_WUNLOCK(ifp, lle->lle_tbl->llt_af); | |
365 | ||
366 | LLE_REMREF(lle); | |
367 | ||
368 | return (1); | |
369 | } | |
370 | ||
371 | /* | |
372 | * Helper function used to pre-compute full/partial link-layer | |
373 | * header data suitable for feeding into if_output(). | |
374 | */ | |
375 | int | |
376 | lltable_calc_llheader(struct ifnet *ifp, int family, char *lladdr, | |
377 | char *buf, size_t *bufsize, int *lladdr_off) | |
378 | { | |
379 | struct if_encap_req ereq; | |
380 | int error; | |
381 | ||
382 | bzero(buf, *bufsize); | |
383 | bzero(&ereq, sizeof(ereq)); | |
384 | ereq.buf = buf; | |
385 | ereq.bufsize = *bufsize; | |
386 | ereq.rtype = IFENCAP_LL; | |
387 | ereq.family = family; | |
388 | ereq.lladdr = lladdr; | |
389 | ereq.lladdr_len = ifp->if_addrlen; | |
390 | error = ifp->if_requestencap(ifp, &ereq); | |
391 | if (error == 0) { | |
392 | *bufsize = ereq.bufsize; | |
393 | *lladdr_off = ereq.lladdr_off; | |
394 | } | |
395 | ||
396 | return (error); | |
397 | } | |
398 | ||
399 | /* | |
400 | * Update link-layer header for given @lle after | |
401 | * interface lladdr was changed. | |
402 | */ | |
403 | static int | |
404 | llentry_update_ifaddr(struct lltable *llt, struct llentry *lle, void *farg) | |
405 | { | |
406 | struct ifnet *ifp; | |
407 | u_char linkhdr[LLE_MAX_LINKHDR]; | |
408 | size_t linkhdrsize; | |
409 | u_char *lladdr; | |
410 | int lladdr_off; | |
411 | ||
412 | ifp = (struct ifnet *)farg; | |
413 | ||
414 | lladdr = (void *)lle->ll_addr; | |
415 | ||
416 | LLE_WLOCK(lle); | |
417 | if ((lle->la_flags & LLE_VALID) == 0) { | |
418 | LLE_WUNLOCK(lle); | |
419 | return (0); | |
420 | } | |
421 | ||
422 | if ((lle->la_flags & LLE_IFADDR) != 0) | |
423 | lladdr = (void *)IF_LLADDR(ifp); | |
424 | ||
425 | linkhdrsize = sizeof(linkhdr); | |
426 | lltable_calc_llheader(ifp, llt->llt_af, (void *)lladdr, (void *)linkhdr, &linkhdrsize, | |
427 | &lladdr_off); | |
428 | memcpy(lle->r_linkdata, linkhdr, linkhdrsize); | |
429 | LLE_WUNLOCK(lle); | |
430 | ||
431 | return (0); | |
432 | } | |
433 | ||
434 | /* | |
435 | * Update all calculated headers for given @llt | |
436 | */ | |
437 | void | |
438 | lltable_update_ifaddr(struct lltable *llt) | |
439 | { | |
440 | ||
441 | if (llt->llt_ifp->if_flags & IFF_LOOPBACK) | |
442 | return; | |
443 | ||
444 | IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af); | |
445 | lltable_foreach_lle(llt, llentry_update_ifaddr, llt->llt_ifp); | |
446 | IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af); | |
447 | } | |
448 | #endif | |
449 | ||
450 | /* | |
451 | * | |
452 | * Performs generic cleanup routines and frees lle. | |
453 | * | |
454 | * Called for non-linked entries, with callouts and | |
455 | * other AF-specific cleanups performed. | |
456 | * | |
457 | * @lle must be passed WLOCK'ed | |
458 | * | |
459 | * Returns the number of held packets, if any, that were dropped. | |
460 | */ | |
461 | size_t | |
462 | llentry_free(struct llentry *lle) | |
463 | { | |
464 | size_t pkts_dropped; | |
465 | ||
466 | LLE_WLOCK_ASSERT(lle); | |
467 | ||
468 | KASSERT((lle->la_flags & LLE_LINKED) == 0, ("freeing linked lle")); | |
469 | ||
470 | pkts_dropped = lltable_drop_entry_queue(lle); | |
471 | ||
472 | LLE_FREE_LOCKED(lle); | |
473 | ||
474 | return (pkts_dropped); | |
475 | } | |
476 | ||
477 | /* | |
478 | * (al)locate an llentry for address dst (equivalent to rtalloc for new-arp). | |
479 | * | |
480 | * If found the llentry * is returned referenced and unlocked. | |
481 | */ | |
482 | struct llentry * | |
483 | llentry_alloc(struct ifnet *ifp, struct lltable *lt, | |
484 | struct sockaddr_storage *dst) | |
485 | { | |
486 | struct llentry *la, *la_tmp; | |
487 | ||
488 | IF_AFDATA_RLOCK(ifp, lt->llt_af); | |
489 | la = lla_lookup(lt, LLE_EXCLUSIVE, (struct sockaddr *)dst); | |
490 | IF_AFDATA_RUNLOCK(ifp, lt->llt_af); | |
491 | ||
492 | if (la != NULL) { | |
493 | LLE_ADDREF(la); | |
494 | LLE_WUNLOCK(la); | |
495 | return (la); | |
496 | } | |
497 | ||
498 | if ((ifp->if_flags & IFF_NOARP) == 0) { | |
499 | la = lltable_alloc_entry(lt, 0, (struct sockaddr *)dst); | |
500 | if (la == NULL) | |
501 | return (NULL); | |
502 | IF_AFDATA_WLOCK(ifp, lt->llt_af); | |
503 | LLE_WLOCK(la); | |
504 | /* Prefer any existing LLE over newly-created one */ | |
505 | la_tmp = lla_lookup(lt, LLE_EXCLUSIVE, (struct sockaddr *)dst); | |
506 | if (la_tmp == NULL) | |
507 | lltable_link_entry(lt, la); | |
508 | IF_AFDATA_WUNLOCK(ifp, lt->llt_af); | |
509 | if (la_tmp != NULL) { | |
510 | lltable_free_entry(lt, la); | |
511 | la = la_tmp; | |
512 | } | |
513 | LLE_ADDREF(la); | |
514 | LLE_WUNLOCK(la); | |
515 | } | |
516 | ||
517 | return (la); | |
518 | } | |
519 | ||
520 | /* | |
521 | * Free all entries from given table and free itself. | |
522 | */ | |
523 | ||
524 | static int | |
525 | lltable_free_cb(struct lltable *llt, struct llentry *lle, void *farg) | |
526 | { | |
527 | #pragma unused(llt) | |
528 | struct llentries *dchain; | |
529 | ||
530 | dchain = (struct llentries *)farg; | |
531 | ||
532 | LLE_WLOCK(lle); | |
533 | LIST_INSERT_HEAD(dchain, lle, lle_chain); | |
534 | ||
535 | return (0); | |
536 | } | |
537 | ||
538 | /* | |
539 | * Free all entries from given table and free itself. | |
540 | */ | |
541 | void | |
542 | lltable_free(struct lltable *llt) | |
543 | { | |
544 | struct llentry *lle, *next; | |
545 | struct llentries dchain; | |
546 | ||
547 | KASSERT(llt != NULL, ("%s: llt is NULL", __func__)); | |
548 | ||
549 | //lltable_unlink(llt); | |
550 | ||
551 | LIST_INIT(&dchain); | |
552 | IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af); | |
553 | /* Push all lles to @dchain */ | |
554 | lltable_foreach_lle(llt, lltable_free_cb, &dchain); | |
555 | llentries_unlink(llt, &dchain); | |
556 | IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af); | |
557 | ||
558 | LIST_FOREACH_SAFE(lle, &dchain, lle_chain, next) { | |
559 | #if 0 | |
560 | if (thread_call_cancel(lle->lle_timer) == TRUE) | |
561 | LLE_REMREF(lle); | |
562 | #endif | |
563 | llentry_free(lle); | |
564 | } | |
565 | ||
566 | /* XXX We recycle network interfaces so we only purge */ | |
567 | /* llt->llt_free_tbl(llt); */ | |
568 | } | |
569 | ||
570 | #if 0 | |
571 | void | |
572 | lltable_drain(int af) | |
573 | { | |
574 | struct lltable *llt; | |
575 | struct llentry *lle; | |
576 | register int i; | |
577 | ||
578 | LLTABLE_RLOCK(); | |
579 | SLIST_FOREACH(llt, &lltables, llt_link) { | |
580 | if (llt->llt_af != af) | |
581 | continue; | |
582 | ||
583 | for (i=0; i < llt->llt_hsize; i++) { | |
584 | LIST_FOREACH(lle, &llt->lle_head[i], lle_next) { | |
585 | LLE_WLOCK(lle); | |
586 | if (lle->la_hold) { | |
587 | m_freem(lle->la_hold); | |
588 | lle->la_hold = NULL; | |
589 | } | |
590 | LLE_WUNLOCK(lle); | |
591 | } | |
592 | } | |
593 | } | |
594 | LLTABLE_RUNLOCK(); | |
595 | } | |
596 | #endif | |
597 | ||
598 | /* | |
599 | * Deletes an address from given lltable. | |
600 | * Used for userland interaction to remove | |
601 | * individual entries. Skips entries added by OS. | |
602 | */ | |
603 | int | |
604 | lltable_delete_addr(struct lltable *llt, u_int flags, | |
605 | const struct sockaddr *l3addr) | |
606 | { | |
607 | struct llentry *lle; | |
608 | struct ifnet *ifp; | |
609 | ||
610 | ifp = llt->llt_ifp; | |
611 | IF_AFDATA_WLOCK(ifp, llt->llt_af); | |
612 | lle = lla_lookup(llt, LLE_EXCLUSIVE, l3addr); | |
613 | ||
614 | if (lle == NULL) { | |
615 | IF_AFDATA_WUNLOCK(ifp, llt->llt_af); | |
616 | return (ENOENT); | |
617 | } | |
618 | if ((lle->la_flags & LLE_IFADDR) != 0 && (flags & LLE_IFADDR) == 0) { | |
619 | IF_AFDATA_WUNLOCK(ifp, llt->llt_af); | |
620 | LLE_WUNLOCK(lle); | |
621 | return (EPERM); | |
622 | } | |
623 | ||
624 | lltable_unlink_entry(llt, lle); | |
625 | IF_AFDATA_WUNLOCK(ifp, llt->llt_af); | |
626 | ||
627 | llt->llt_delete_entry(llt, lle); | |
628 | ||
629 | return (0); | |
630 | } | |
631 | ||
632 | void | |
633 | lltable_prefix_free(int af, struct sockaddr *addr, struct sockaddr *mask, | |
634 | u_int flags) | |
635 | { | |
636 | struct lltable *llt; | |
637 | ||
638 | LLTABLE_RLOCK(); | |
639 | SLIST_FOREACH(llt, &lltables, llt_link) { | |
640 | if (llt->llt_af != af) | |
641 | continue; | |
642 | ||
643 | llt->llt_prefix_free(llt, addr, mask, flags); | |
644 | } | |
645 | LLTABLE_RUNLOCK(); | |
646 | } | |
647 | ||
648 | struct lltable * | |
649 | lltable_allocate_htbl(uint32_t hsize) | |
650 | { | |
651 | struct lltable *llt; | |
652 | int i; | |
653 | ||
654 | MALLOC(llt, struct lltable *, sizeof(struct lltable), M_LLTABLE, M_WAITOK | M_ZERO); | |
655 | llt->llt_hsize = hsize; | |
656 | MALLOC(llt->lle_head, struct llentries *, sizeof(struct llentries) * hsize, | |
657 | M_LLTABLE, M_WAITOK | M_ZERO); | |
658 | ||
659 | for (i = 0; i < llt->llt_hsize; i++) | |
660 | LIST_INIT(&llt->lle_head[i]); | |
661 | ||
662 | /* Set some default callbacks */ | |
663 | llt->llt_link_entry = htable_link_entry; | |
664 | llt->llt_unlink_entry = htable_unlink_entry; | |
665 | llt->llt_prefix_free = htable_prefix_free; | |
666 | llt->llt_foreach_entry = htable_foreach_lle; | |
667 | llt->llt_free_tbl = htable_free_tbl; | |
668 | ||
669 | return (llt); | |
670 | } | |
671 | ||
672 | /* | |
673 | * Links lltable to global llt list. | |
674 | */ | |
675 | void | |
676 | lltable_link(struct lltable *llt) | |
677 | { | |
678 | LLTABLE_WLOCK(); | |
679 | SLIST_INSERT_HEAD(&lltables, llt, llt_link); | |
680 | LLTABLE_WUNLOCK(); | |
681 | } | |
682 | ||
683 | #if 0 | |
684 | static void | |
685 | lltable_unlink(struct lltable *llt) | |
686 | { | |
687 | LLTABLE_WLOCK(); | |
688 | SLIST_REMOVE(&lltables, llt, lltable, llt_link); | |
689 | LLTABLE_WUNLOCK(); | |
690 | } | |
691 | #endif | |
692 | ||
693 | /* | |
694 | * External methods used by lltable consumers | |
695 | */ | |
696 | ||
697 | int | |
698 | lltable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg) | |
699 | { | |
700 | return (llt->llt_foreach_entry(llt, f, farg)); | |
701 | } | |
702 | ||
703 | struct llentry * | |
704 | lltable_alloc_entry(struct lltable *llt, u_int flags, | |
705 | const struct sockaddr *l3addr) | |
706 | { | |
707 | return (llt->llt_alloc_entry(llt, flags, l3addr)); | |
708 | } | |
709 | ||
710 | void | |
711 | lltable_free_entry(struct lltable *llt, struct llentry *lle) | |
712 | { | |
713 | llt->llt_free_entry(llt, lle); | |
714 | } | |
715 | ||
716 | void | |
717 | lltable_link_entry(struct lltable *llt, struct llentry *lle) | |
718 | { | |
719 | llt->llt_link_entry(llt, lle); | |
720 | } | |
721 | ||
722 | void | |
723 | lltable_unlink_entry(struct lltable *llt, struct llentry *lle) | |
724 | { | |
725 | llt->llt_unlink_entry(lle); | |
726 | } | |
727 | ||
728 | void | |
729 | lltable_fill_sa_entry(const struct llentry *lle, struct sockaddr *sa) | |
730 | { | |
731 | struct lltable *llt; | |
732 | ||
733 | llt = lle->lle_tbl; | |
734 | llt->llt_fill_sa_entry(lle, sa); | |
735 | } | |
736 | ||
737 | struct ifnet * | |
738 | lltable_get_ifp(const struct lltable *llt) | |
739 | { | |
740 | return (llt->llt_ifp); | |
741 | } | |
742 | ||
743 | int | |
744 | lltable_get_af(const struct lltable *llt) | |
745 | { | |
746 | return (llt->llt_af); | |
747 | } | |
748 | ||
749 | #define ifnet_byindex(index) ifindex2ifnet[(index)] | |
750 | ||
751 | /* | |
752 | * Called in route_output when rtm_flags contains RTF_LLDATA. | |
753 | */ | |
754 | int | |
755 | lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info) | |
756 | { | |
757 | struct sockaddr_dl *dl = | |
758 | (struct sockaddr_dl *)(void *)info->rti_info[RTAX_GATEWAY]; | |
759 | struct sockaddr *dst = (struct sockaddr *)info->rti_info[RTAX_DST]; | |
760 | struct ifnet *ifp; | |
761 | struct lltable *llt; | |
762 | struct llentry *lle, *lle_tmp; | |
763 | u_int laflags = 0; | |
764 | int error; | |
765 | ||
766 | KASSERT(dl != NULL && dl->sdl_family == AF_LINK, | |
767 | ("%s: invalid dl\n", __func__)); | |
768 | ||
769 | ifp = ifnet_byindex(dl->sdl_index); | |
770 | if (ifp == NULL) { | |
771 | log(LOG_INFO, "%s: invalid ifp (sdl_index %d)\n", | |
772 | __func__, dl->sdl_index); | |
773 | return EINVAL; | |
774 | } | |
775 | ||
776 | /* XXX linked list may be too expensive */ | |
777 | LLTABLE_RLOCK(); | |
778 | SLIST_FOREACH(llt, &lltables, llt_link) { | |
779 | if (llt->llt_af == dst->sa_family && | |
780 | llt->llt_ifp == ifp) | |
781 | break; | |
782 | } | |
783 | LLTABLE_RUNLOCK(); | |
784 | KASSERT(llt != NULL, ("Yep, ugly hacks are bad\n")); | |
785 | ||
786 | error = 0; | |
787 | ||
788 | switch (rtm->rtm_type) { | |
789 | case RTM_ADD: | |
790 | /* Add static LLE */ | |
791 | laflags = 0; | |
792 | if (rtm->rtm_rmx.rmx_expire == 0) | |
793 | laflags = LLE_STATIC; | |
794 | lle = lltable_alloc_entry(llt, laflags, dst); | |
795 | if (lle == NULL) | |
796 | return (ENOMEM); | |
797 | #if 0 | |
798 | linkhdrsize = sizeof(linkhdr); | |
799 | if (lltable_calc_llheader(ifp, dst->sa_family, LLADDR(dl), | |
800 | (void *)linkhdr, &linkhdrsize, &lladdr_off) != 0) | |
801 | return (EINVAL); | |
802 | #endif | |
803 | lltable_set_entry_addr(ifp, lle, LLADDR(dl)); | |
804 | ||
805 | if (rtm->rtm_flags & RTF_ANNOUNCE) | |
806 | lle->la_flags |= LLE_PUB; | |
807 | lle->la_expire = rtm->rtm_rmx.rmx_expire; | |
808 | ||
809 | laflags = lle->la_flags; | |
810 | ||
811 | /* Try to link new entry */ | |
812 | lle_tmp = NULL; | |
813 | IF_AFDATA_WLOCK(ifp, llt->llt_af); | |
814 | LLE_WLOCK(lle); | |
815 | lle_tmp = lla_lookup(llt, LLE_EXCLUSIVE, dst); | |
816 | if (lle_tmp != NULL) { | |
817 | /* Check if we are trying to replace immutable entry */ | |
818 | if ((lle_tmp->la_flags & LLE_IFADDR) != 0) { | |
819 | IF_AFDATA_WUNLOCK(ifp, llt->llt_af); | |
820 | LLE_WUNLOCK(lle_tmp); | |
821 | lltable_free_entry(llt, lle); | |
822 | return (EPERM); | |
823 | } | |
824 | /* Unlink existing entry from table */ | |
825 | lltable_unlink_entry(llt, lle_tmp); | |
826 | } | |
827 | lltable_link_entry(llt, lle); | |
828 | IF_AFDATA_WUNLOCK(ifp, llt->llt_af); | |
829 | ||
830 | if (lle_tmp != NULL) { | |
831 | EVENTHANDLER_INVOKE(NULL, lle_event, lle_tmp, LLENTRY_EXPIRED); | |
832 | lltable_free_entry(llt, lle_tmp); | |
833 | } | |
834 | ||
835 | /* | |
836 | * By invoking LLE handler here we might get | |
837 | * two events on static LLE entry insertion | |
838 | * in routing socket. However, since we might have | |
839 | * other subscribers we need to generate this event. | |
840 | */ | |
841 | EVENTHANDLER_INVOKE(NULL, lle_event, lle, LLENTRY_RESOLVED); | |
842 | LLE_WUNLOCK(lle); | |
843 | #ifdef INET | |
844 | /* gratuitous ARP */ | |
845 | if ((laflags & LLE_PUB) && dst->sa_family == AF_INET) | |
846 | dlil_send_arp(ifp, ARPOP_REQUEST, NULL, dst, NULL, dst, 0); | |
847 | #endif | |
848 | ||
849 | break; | |
850 | ||
851 | case RTM_DELETE: | |
852 | return (lltable_delete_addr(llt, 0, dst)); | |
853 | ||
854 | default: | |
855 | error = EINVAL; | |
856 | } | |
857 | ||
858 | return (error); | |
859 | } | |
860 |