]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/if_bridge.c
ca97c63dd6814ba2d2ce746d21c306cb7651c65f
[apple/xnu.git] / bsd / net / if_bridge.c
1 /*
2 * Copyright (c) 2004-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
30 /*
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
50 * written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65 /*
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
68 *
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
71 * are met:
72 * 1. Redistributions of source code must retain the above copyright
73 * notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 * notice, this list of conditions and the following disclaimer in the
76 * documentation and/or other materials provided with the distribution.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
89 *
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
91 */
92
93 /*
94 * Network interface bridge support.
95 *
96 * TODO:
97 *
98 * - Currently only supports Ethernet-like interfaces (Ethernet,
99 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
100 * to bridge other types of interfaces (FDDI-FDDI, and maybe
101 * consider heterogenous bridges).
102 *
103 * - GIF isn't handled due to the lack of IPPROTO_ETHERIP support.
104 */
105
106 #include <sys/cdefs.h>
107
108 #define BRIDGE_DEBUG 1
109
110 #include <sys/param.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/protosw.h>
114 #include <sys/systm.h>
115 #include <sys/time.h>
116 #include <sys/socket.h> /* for net/if.h */
117 #include <sys/sockio.h>
118 #include <sys/kernel.h>
119 #include <sys/random.h>
120 #include <sys/syslog.h>
121 #include <sys/sysctl.h>
122 #include <sys/proc.h>
123 #include <sys/lock.h>
124 #include <sys/mcache.h>
125
126 #include <sys/kauth.h>
127
128 #include <kern/thread_call.h>
129
130 #include <libkern/libkern.h>
131
132 #include <kern/zalloc.h>
133
134 #if NBPFILTER > 0
135 #include <net/bpf.h>
136 #endif
137 #include <net/if.h>
138 #include <net/if_dl.h>
139 #include <net/if_types.h>
140 #include <net/if_var.h>
141 #include <net/if_media.h>
142 #include <net/net_api_stats.h>
143
144 #include <netinet/in.h> /* for struct arpcom */
145 #include <netinet/in_systm.h>
146 #include <netinet/in_var.h>
147 #define _IP_VHL
148 #include <netinet/ip.h>
149 #include <netinet/ip_var.h>
150 #if INET6
151 #include <netinet/ip6.h>
152 #include <netinet6/ip6_var.h>
153 #endif
154 #ifdef DEV_CARP
155 #include <netinet/ip_carp.h>
156 #endif
157 #include <netinet/if_ether.h> /* for struct arpcom */
158 #include <net/bridgestp.h>
159 #include <net/if_bridgevar.h>
160 #include <net/if_llc.h>
161 #if NVLAN > 0
162 #include <net/if_vlan_var.h>
163 #endif /* NVLAN > 0 */
164
165 #include <net/if_ether.h>
166 #include <net/dlil.h>
167 #include <net/kpi_interfacefilter.h>
168
169 #include <net/route.h>
170 #ifdef PFIL_HOOKS
171 #include <netinet/ip_fw2.h>
172 #include <netinet/ip_dummynet.h>
173 #endif /* PFIL_HOOKS */
174 #include <dev/random/randomdev.h>
175
176 #include <netinet/bootp.h>
177 #include <netinet/dhcp.h>
178
179
180 #if BRIDGE_DEBUG
181 #define BR_DBGF_LIFECYCLE 0x0001
182 #define BR_DBGF_INPUT 0x0002
183 #define BR_DBGF_OUTPUT 0x0004
184 #define BR_DBGF_RT_TABLE 0x0008
185 #define BR_DBGF_DELAYED_CALL 0x0010
186 #define BR_DBGF_IOCTL 0x0020
187 #define BR_DBGF_MBUF 0x0040
188 #define BR_DBGF_MCAST 0x0080
189 #define BR_DBGF_HOSTFILTER 0x0100
190 #endif /* BRIDGE_DEBUG */
191
192 #define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
193 #define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
194 #define BRIDGE_LOCK_ASSERT_HELD(_sc) \
195 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
196 #define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
197 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
198
199 #if BRIDGE_DEBUG
200
201 #define BR_LCKDBG_MAX 4
202
203 #define BRIDGE_LOCK(_sc) bridge_lock(_sc)
204 #define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
205 #define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
206 #define BRIDGE_UNREF(_sc) bridge_unref(_sc)
207 #define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
208 #define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
209
210 #else /* !BRIDGE_DEBUG */
211
212 #define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
213 #define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
214 #define BRIDGE_LOCK2REF(_sc, _err) do { \
215 BRIDGE_LOCK_ASSERT_HELD(_sc); \
216 if ((_sc)->sc_iflist_xcnt > 0) \
217 (_err) = EBUSY; \
218 else \
219 (_sc)->sc_iflist_ref++; \
220 _BRIDGE_UNLOCK(_sc); \
221 } while (0)
222 #define BRIDGE_UNREF(_sc) do { \
223 _BRIDGE_LOCK(_sc); \
224 (_sc)->sc_iflist_ref--; \
225 if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
226 _BRIDGE_UNLOCK(_sc); \
227 wakeup(&(_sc)->sc_cv); \
228 } else \
229 _BRIDGE_UNLOCK(_sc); \
230 } while (0)
231 #define BRIDGE_XLOCK(_sc) do { \
232 BRIDGE_LOCK_ASSERT_HELD(_sc); \
233 (_sc)->sc_iflist_xcnt++; \
234 while ((_sc)->sc_iflist_ref > 0) \
235 msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
236 "BRIDGE_XLOCK", NULL); \
237 } while (0)
238 #define BRIDGE_XDROP(_sc) do { \
239 BRIDGE_LOCK_ASSERT_HELD(_sc); \
240 (_sc)->sc_iflist_xcnt--; \
241 } while (0)
242
243 #endif /* BRIDGE_DEBUG */
244
245 #if NBPFILTER > 0
246 #define BRIDGE_BPF_MTAP_INPUT(sc, m) \
247 if (sc->sc_bpf_input) \
248 bridge_bpf_input(sc->sc_ifp, m)
249 #else /* NBPFILTER */
250 #define BRIDGE_BPF_MTAP_INPUT(ifp, m)
251 #endif /* NBPFILTER */
252
253 /*
254 * Initial size of the route hash table. Must be a power of two.
255 */
256 #ifndef BRIDGE_RTHASH_SIZE
257 #define BRIDGE_RTHASH_SIZE 16
258 #endif
259
260 /*
261 * Maximum size of the routing hash table
262 */
263 #define BRIDGE_RTHASH_SIZE_MAX 2048
264
265 #define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
266
267 /*
268 * Maximum number of addresses to cache.
269 */
270 #ifndef BRIDGE_RTABLE_MAX
271 #define BRIDGE_RTABLE_MAX 100
272 #endif
273
274
275 /*
276 * Timeout (in seconds) for entries learned dynamically.
277 */
278 #ifndef BRIDGE_RTABLE_TIMEOUT
279 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
280 #endif
281
282 /*
283 * Number of seconds between walks of the route list.
284 */
285 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
286 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
287 #endif
288
289 /*
290 * List of capabilities to possibly mask on the member interface.
291 */
292 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
293 /*
294 * List of capabilities to disable on the member interface.
295 */
296 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
297
298 /*
299 * Bridge interface list entry.
300 */
301 struct bridge_iflist {
302 TAILQ_ENTRY(bridge_iflist) bif_next;
303 struct ifnet *bif_ifp; /* member if */
304 struct bstp_port bif_stp; /* STP state */
305 uint32_t bif_ifflags; /* member if flags */
306 int bif_savedcaps; /* saved capabilities */
307 uint32_t bif_addrmax; /* max # of addresses */
308 uint32_t bif_addrcnt; /* cur. # of addresses */
309 uint32_t bif_addrexceeded; /* # of address violations */
310
311 interface_filter_t bif_iff_ref;
312 struct bridge_softc *bif_sc;
313 uint32_t bif_flags;
314
315 struct in_addr bif_hf_ipsrc;
316 uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN];
317 };
318
319 #define BIFF_PROMISC 0x01 /* promiscuous mode set */
320 #define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
321 #define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
322 #define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
323 #define BIFF_HOST_FILTER 0x10 /* host filter enabled */
324 #define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
325 #define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
326
327 /*
328 * Bridge route node.
329 */
330 struct bridge_rtnode {
331 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
332 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
333 struct bridge_iflist *brt_dst; /* destination if */
334 unsigned long brt_expire; /* expiration time */
335 uint8_t brt_flags; /* address flags */
336 uint8_t brt_addr[ETHER_ADDR_LEN];
337 uint16_t brt_vlan; /* vlan id */
338
339 };
340 #define brt_ifp brt_dst->bif_ifp
341
342 /*
343 * Bridge delayed function call context
344 */
345 typedef void (*bridge_delayed_func_t)(struct bridge_softc *);
346
347 struct bridge_delayed_call {
348 struct bridge_softc *bdc_sc;
349 bridge_delayed_func_t bdc_func; /* Function to call */
350 struct timespec bdc_ts; /* Time to call */
351 u_int32_t bdc_flags;
352 thread_call_t bdc_thread_call;
353 };
354
355 #define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
356 #define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
357
358
359 /*
360 * Software state for each bridge.
361 */
362 LIST_HEAD(_bridge_rtnode_list, bridge_rtnode);
363
364 typedef struct {
365 struct _bridge_rtnode_list *bb_rthash; /* our forwarding table */
366 struct _bridge_rtnode_list bb_rtlist; /* list version of above */
367 uint32_t bb_rthash_key; /* key for hash */
368 uint32_t bb_rthash_size; /* size of the hash table */
369 struct bridge_delayed_call bb_aging_timer;
370 struct bridge_delayed_call bb_resize_call;
371 TAILQ_HEAD(, bridge_iflist) bb_spanlist; /* span ports list */
372 struct bstp_state bb_stp; /* STP state */
373 bpf_packet_func bb_bpf_input;
374 bpf_packet_func bb_bpf_output;
375 } bridge_bsd, *bridge_bsd_t;
376
377 #define sc_rthash sc_u.scu_bsd.bb_rthash
378 #define sc_rtlist sc_u.scu_bsd.bb_rtlist
379 #define sc_rthash_key sc_u.scu_bsd.bb_rthash_key
380 #define sc_rthash_size sc_u.scu_bsd.bb_rthash_size
381 #define sc_aging_timer sc_u.scu_bsd.bb_aging_timer
382 #define sc_resize_call sc_u.scu_bsd.bb_resize_call
383 #define sc_spanlist sc_u.scu_bsd.bb_spanlist
384 #define sc_stp sc_u.scu_bsd.bb_stp
385 #define sc_bpf_input sc_u.scu_bsd.bb_bpf_input
386 #define sc_bpf_output sc_u.scu_bsd.bb_bpf_output
387
388 struct bridge_softc {
389 struct ifnet *sc_ifp; /* make this an interface */
390 u_int32_t sc_flags;
391 union {
392 bridge_bsd scu_bsd;
393 } sc_u;
394 LIST_ENTRY(bridge_softc) sc_list;
395 decl_lck_mtx_data(, sc_mtx);
396 void *sc_cv;
397 uint32_t sc_brtmax; /* max # of addresses */
398 uint32_t sc_brtcnt; /* cur. # of addresses */
399 uint32_t sc_brttimeout; /* rt timeout in seconds */
400 uint32_t sc_iflist_ref; /* refcount for sc_iflist */
401 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
402 TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
403 uint32_t sc_brtexceeded; /* # of cache drops */
404 uint32_t sc_filter_flags; /* ipf and flags */
405 struct ifnet *sc_ifaddr; /* member mac copied from */
406 u_char sc_defaddr[6]; /* Default MAC address */
407 char sc_if_xname[IFNAMSIZ];
408
409 #if BRIDGE_DEBUG
410 /*
411 * Locking and unlocking calling history
412 */
413 void *lock_lr[BR_LCKDBG_MAX];
414 int next_lock_lr;
415 void *unlock_lr[BR_LCKDBG_MAX];
416 int next_unlock_lr;
417 #endif /* BRIDGE_DEBUG */
418 };
419
420 #define SCF_DETACHING 0x01
421 #define SCF_RESIZING 0x02
422 #define SCF_MEDIA_ACTIVE 0x04
423 #define SCF_BSD_MODE 0x08
424
425 static inline void
426 bridge_set_bsd_mode(struct bridge_softc * sc)
427 {
428 sc->sc_flags |= SCF_BSD_MODE;
429 }
430
431 static inline boolean_t
432 bridge_in_bsd_mode(const struct bridge_softc * sc)
433 {
434 return (sc->sc_flags & SCF_BSD_MODE) != 0;
435 }
436
437 struct bridge_hostfilter_stats bridge_hostfilter_stats;
438
439 decl_lck_mtx_data(static, bridge_list_mtx);
440
441 static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
442
443 static zone_t bridge_rtnode_pool = NULL;
444
445 static int bridge_clone_create(struct if_clone *, uint32_t, void *);
446 static int bridge_clone_destroy(struct ifnet *);
447
448 static errno_t bridge_ioctl(struct ifnet *, u_long, void *);
449 #if HAS_IF_CAP
450 static void bridge_mutecaps(struct bridge_softc *);
451 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
452 int);
453 #endif
454 static errno_t bridge_set_tso(struct bridge_softc *);
455 __private_extern__ void bridge_ifdetach(struct bridge_iflist *, struct ifnet *);
456 static int bridge_init(struct ifnet *);
457 #if HAS_BRIDGE_DUMMYNET
458 static void bridge_dummynet(struct mbuf *, struct ifnet *);
459 #endif
460 static void bridge_ifstop(struct ifnet *, int);
461 static int bridge_output(struct ifnet *, struct mbuf *);
462 static void bridge_finalize_cksum(struct ifnet *, struct mbuf *);
463 static void bridge_start(struct ifnet *);
464 __private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *);
465 #if BRIDGE_MEMBER_OUT_FILTER
466 static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t,
467 mbuf_t *);
468 static int bridge_member_output(struct ifnet *, struct mbuf *,
469 struct sockaddr *, struct rtentry *);
470 #endif
471 static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
472 struct mbuf *);
473 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
474
475 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
476 struct mbuf *);
477
478 static void bridge_aging_timer(struct bridge_softc *sc);
479
480 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
481 struct mbuf *, int);
482 static void bridge_span(struct bridge_softc *, struct mbuf *);
483
484 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
485 uint16_t, struct bridge_iflist *, int, uint8_t);
486 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
487 uint16_t);
488 static void bridge_rttrim(struct bridge_softc *);
489 static void bridge_rtage(struct bridge_softc *);
490 static void bridge_rtflush(struct bridge_softc *, int);
491 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
492 uint16_t);
493
494 static int bridge_rtable_init(struct bridge_softc *);
495 static void bridge_rtable_fini(struct bridge_softc *);
496
497 static void bridge_rthash_resize(struct bridge_softc *);
498
499 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
500 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
501 const uint8_t *, uint16_t);
502 static int bridge_rtnode_hash(struct bridge_softc *,
503 struct bridge_rtnode *);
504 static int bridge_rtnode_insert(struct bridge_softc *,
505 struct bridge_rtnode *);
506 static void bridge_rtnode_destroy(struct bridge_softc *,
507 struct bridge_rtnode *);
508 #if BRIDGESTP
509 static void bridge_rtable_expire(struct ifnet *, int);
510 static void bridge_state_change(struct ifnet *, int);
511 #endif /* BRIDGESTP */
512
513 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
514 const char *name);
515 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
516 struct ifnet *ifp);
517 static void bridge_delete_member(struct bridge_softc *,
518 struct bridge_iflist *, int);
519 static void bridge_delete_span(struct bridge_softc *,
520 struct bridge_iflist *);
521
522 static int bridge_ioctl_add(struct bridge_softc *, void *);
523 static int bridge_ioctl_del(struct bridge_softc *, void *);
524 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
525 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
526 static int bridge_ioctl_scache(struct bridge_softc *, void *);
527 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
528 static int bridge_ioctl_gifs32(struct bridge_softc *, void *);
529 static int bridge_ioctl_gifs64(struct bridge_softc *, void *);
530 static int bridge_ioctl_rts32(struct bridge_softc *, void *);
531 static int bridge_ioctl_rts64(struct bridge_softc *, void *);
532 static int bridge_ioctl_saddr32(struct bridge_softc *, void *);
533 static int bridge_ioctl_saddr64(struct bridge_softc *, void *);
534 static int bridge_ioctl_sto(struct bridge_softc *, void *);
535 static int bridge_ioctl_gto(struct bridge_softc *, void *);
536 static int bridge_ioctl_daddr32(struct bridge_softc *, void *);
537 static int bridge_ioctl_daddr64(struct bridge_softc *, void *);
538 static int bridge_ioctl_flush(struct bridge_softc *, void *);
539 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
540 static int bridge_ioctl_spri(struct bridge_softc *, void *);
541 static int bridge_ioctl_ght(struct bridge_softc *, void *);
542 static int bridge_ioctl_sht(struct bridge_softc *, void *);
543 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
544 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
545 static int bridge_ioctl_gma(struct bridge_softc *, void *);
546 static int bridge_ioctl_sma(struct bridge_softc *, void *);
547 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
548 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
549 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
550 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
551 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
552 static int bridge_ioctl_gbparam32(struct bridge_softc *, void *);
553 static int bridge_ioctl_gbparam64(struct bridge_softc *, void *);
554 static int bridge_ioctl_grte(struct bridge_softc *, void *);
555 static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
556 static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
557 static int bridge_ioctl_sproto(struct bridge_softc *, void *);
558 static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
559 static int bridge_ioctl_purge(struct bridge_softc *sc, void *);
560 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
561 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
562 static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *);
563 static int bridge_ioctl_shostfilter(struct bridge_softc *, void *);
564 #ifdef PFIL_HOOKS
565 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
566 int);
567 static int bridge_ip_checkbasic(struct mbuf **);
568 #ifdef INET6
569 static int bridge_ip6_checkbasic(struct mbuf **);
570 #endif /* INET6 */
571 static int bridge_fragment(struct ifnet *, struct mbuf *,
572 struct ether_header *, int, struct llc *);
573 #endif /* PFIL_HOOKS */
574
575 static errno_t bridge_set_bpf_tap(ifnet_t, bpf_tap_mode, bpf_packet_func);
576 __private_extern__ errno_t bridge_bpf_input(ifnet_t, struct mbuf *);
577 __private_extern__ errno_t bridge_bpf_output(ifnet_t, struct mbuf *);
578
579 static void bridge_detach(ifnet_t);
580 static void bridge_link_event(struct ifnet *, u_int32_t);
581 static void bridge_iflinkevent(struct ifnet *);
582 static u_int32_t bridge_updatelinkstatus(struct bridge_softc *);
583 static int interface_media_active(struct ifnet *);
584 static void bridge_schedule_delayed_call(struct bridge_delayed_call *);
585 static void bridge_cancel_delayed_call(struct bridge_delayed_call *);
586 static void bridge_cleanup_delayed_call(struct bridge_delayed_call *);
587 static int bridge_host_filter(struct bridge_iflist *, struct mbuf *);
588
589
590 #define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
591
592 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
593 #define VLANTAGOF(_m) 0
594
595 u_int8_t bstp_etheraddr[ETHER_ADDR_LEN] =
596 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
597
598 static u_int8_t ethernulladdr[ETHER_ADDR_LEN] =
599 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
600
601 #if BRIDGESTP
602 static struct bstp_cb_ops bridge_ops = {
603 .bcb_state = bridge_state_change,
604 .bcb_rtage = bridge_rtable_expire
605 };
606 #endif /* BRIDGESTP */
607
608 SYSCTL_DECL(_net_link);
609 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
610 "Bridge");
611
612 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */
613 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
614 CTLFLAG_RW | CTLFLAG_LOCKED,
615 &bridge_inherit_mac, 0,
616 "Inherit MAC address from the first bridge member");
617
618 SYSCTL_INT(_net_link_bridge, OID_AUTO, rtable_prune_period,
619 CTLFLAG_RW | CTLFLAG_LOCKED,
620 &bridge_rtable_prune_period, 0,
621 "Interval between pruning of routing table");
622
623 static unsigned int bridge_rtable_hash_size_max = BRIDGE_RTHASH_SIZE_MAX;
624 SYSCTL_UINT(_net_link_bridge, OID_AUTO, rtable_hash_size_max,
625 CTLFLAG_RW | CTLFLAG_LOCKED,
626 &bridge_rtable_hash_size_max, 0,
627 "Maximum size of the routing hash table");
628
629 #if BRIDGE_DEBUG_DELAYED_CALLBACK
630 static int bridge_delayed_callback_delay = 0;
631 SYSCTL_INT(_net_link_bridge, OID_AUTO, delayed_callback_delay,
632 CTLFLAG_RW | CTLFLAG_LOCKED,
633 &bridge_delayed_callback_delay, 0,
634 "Delay before calling delayed function");
635 #endif
636
637 static int bridge_bsd_mode = 1;
638 #if (DEVELOPMENT || DEBUG)
639 SYSCTL_INT(_net_link_bridge, OID_AUTO, bsd_mode,
640 CTLFLAG_RW | CTLFLAG_LOCKED,
641 &bridge_bsd_mode, 0,
642 "Bridge using bsd mode");
643 #endif /* (DEVELOPMENT || DEBUG) */
644
645 SYSCTL_STRUCT(_net_link_bridge, OID_AUTO,
646 hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED,
647 &bridge_hostfilter_stats, bridge_hostfilter_stats, "");
648
649 #if defined(PFIL_HOOKS)
650 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
651 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
652 static int pfil_member = 1; /* run pfil hooks on the member interface */
653 static int pfil_ipfw = 0; /* layer2 filter with ipfw */
654 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */
655 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface */
656 /* for locally destined packets */
657 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW | CTLFLAG_LOCKED,
658 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
659 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW | CTLFLAG_LOCKED,
660 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
661 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW | CTLFLAG_LOCKED,
662 &pfil_bridge, 0, "Packet filter on the bridge interface");
663 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW | CTLFLAG_LOCKED,
664 &pfil_member, 0, "Packet filter on the member interface");
665 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
666 CTLFLAG_RW | CTLFLAG_LOCKED, &pfil_local_phys, 0,
667 "Packet filter on the physical interface for locally destined packets");
668 #endif /* PFIL_HOOKS */
669
670 #if BRIDGESTP
671 static int log_stp = 0; /* log STP state changes */
672 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
673 &log_stp, 0, "Log STP state changes");
674 #endif /* BRIDGESTP */
675
676 struct bridge_control {
677 int (*bc_func)(struct bridge_softc *, void *);
678 unsigned int bc_argsize;
679 unsigned int bc_flags;
680 };
681
682 #define BC_F_COPYIN 0x01 /* copy arguments in */
683 #define BC_F_COPYOUT 0x02 /* copy arguments out */
684 #define BC_F_SUSER 0x04 /* do super-user check */
685
686 static const struct bridge_control bridge_control_table32[] = {
687 { bridge_ioctl_add, sizeof(struct ifbreq), /* 0 */
688 BC_F_COPYIN | BC_F_SUSER },
689 { bridge_ioctl_del, sizeof(struct ifbreq),
690 BC_F_COPYIN | BC_F_SUSER },
691
692 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
693 BC_F_COPYIN | BC_F_COPYOUT },
694 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
695 BC_F_COPYIN | BC_F_SUSER },
696
697 { bridge_ioctl_scache, sizeof(struct ifbrparam),
698 BC_F_COPYIN | BC_F_SUSER },
699 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
700 BC_F_COPYOUT },
701
702 { bridge_ioctl_gifs32, sizeof(struct ifbifconf32),
703 BC_F_COPYIN | BC_F_COPYOUT },
704 { bridge_ioctl_rts32, sizeof(struct ifbaconf32),
705 BC_F_COPYIN | BC_F_COPYOUT },
706
707 { bridge_ioctl_saddr32, sizeof(struct ifbareq32),
708 BC_F_COPYIN | BC_F_SUSER },
709
710 { bridge_ioctl_sto, sizeof(struct ifbrparam),
711 BC_F_COPYIN | BC_F_SUSER },
712 { bridge_ioctl_gto, sizeof(struct ifbrparam), /* 10 */
713 BC_F_COPYOUT },
714
715 { bridge_ioctl_daddr32, sizeof(struct ifbareq32),
716 BC_F_COPYIN | BC_F_SUSER },
717
718 { bridge_ioctl_flush, sizeof(struct ifbreq),
719 BC_F_COPYIN | BC_F_SUSER },
720
721 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
722 BC_F_COPYOUT },
723 { bridge_ioctl_spri, sizeof(struct ifbrparam),
724 BC_F_COPYIN | BC_F_SUSER },
725
726 { bridge_ioctl_ght, sizeof(struct ifbrparam),
727 BC_F_COPYOUT },
728 { bridge_ioctl_sht, sizeof(struct ifbrparam),
729 BC_F_COPYIN | BC_F_SUSER },
730
731 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
732 BC_F_COPYOUT },
733 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
734 BC_F_COPYIN | BC_F_SUSER },
735
736 { bridge_ioctl_gma, sizeof(struct ifbrparam),
737 BC_F_COPYOUT },
738 { bridge_ioctl_sma, sizeof(struct ifbrparam), /* 20 */
739 BC_F_COPYIN | BC_F_SUSER },
740
741 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
742 BC_F_COPYIN | BC_F_SUSER },
743
744 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
745 BC_F_COPYIN | BC_F_SUSER },
746
747 { bridge_ioctl_gfilt, sizeof(struct ifbrparam),
748 BC_F_COPYOUT },
749 { bridge_ioctl_sfilt, sizeof(struct ifbrparam),
750 BC_F_COPYIN | BC_F_SUSER },
751
752 { bridge_ioctl_purge, sizeof(struct ifbreq),
753 BC_F_COPYIN | BC_F_SUSER },
754
755 { bridge_ioctl_addspan, sizeof(struct ifbreq),
756 BC_F_COPYIN | BC_F_SUSER },
757 { bridge_ioctl_delspan, sizeof(struct ifbreq),
758 BC_F_COPYIN | BC_F_SUSER },
759
760 { bridge_ioctl_gbparam32, sizeof(struct ifbropreq32),
761 BC_F_COPYOUT },
762
763 { bridge_ioctl_grte, sizeof(struct ifbrparam),
764 BC_F_COPYOUT },
765
766 { bridge_ioctl_gifsstp32, sizeof(struct ifbpstpconf32), /* 30 */
767 BC_F_COPYIN | BC_F_COPYOUT },
768
769 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
770 BC_F_COPYIN | BC_F_SUSER },
771
772 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
773 BC_F_COPYIN | BC_F_SUSER },
774
775 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
776 BC_F_COPYIN | BC_F_SUSER },
777
778 { bridge_ioctl_ghostfilter, sizeof(struct ifbrhostfilter),
779 BC_F_COPYIN | BC_F_COPYOUT },
780 { bridge_ioctl_shostfilter, sizeof(struct ifbrhostfilter),
781 BC_F_COPYIN | BC_F_SUSER },
782 };
783
784 static const struct bridge_control bridge_control_table64[] = {
785 { bridge_ioctl_add, sizeof(struct ifbreq), /* 0 */
786 BC_F_COPYIN | BC_F_SUSER },
787 { bridge_ioctl_del, sizeof(struct ifbreq),
788 BC_F_COPYIN | BC_F_SUSER },
789
790 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
791 BC_F_COPYIN | BC_F_COPYOUT },
792 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
793 BC_F_COPYIN | BC_F_SUSER },
794
795 { bridge_ioctl_scache, sizeof(struct ifbrparam),
796 BC_F_COPYIN | BC_F_SUSER },
797 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
798 BC_F_COPYOUT },
799
800 { bridge_ioctl_gifs64, sizeof(struct ifbifconf64),
801 BC_F_COPYIN | BC_F_COPYOUT },
802 { bridge_ioctl_rts64, sizeof(struct ifbaconf64),
803 BC_F_COPYIN | BC_F_COPYOUT },
804
805 { bridge_ioctl_saddr64, sizeof(struct ifbareq64),
806 BC_F_COPYIN | BC_F_SUSER },
807
808 { bridge_ioctl_sto, sizeof(struct ifbrparam),
809 BC_F_COPYIN | BC_F_SUSER },
810 { bridge_ioctl_gto, sizeof(struct ifbrparam), /* 10 */
811 BC_F_COPYOUT },
812
813 { bridge_ioctl_daddr64, sizeof(struct ifbareq64),
814 BC_F_COPYIN | BC_F_SUSER },
815
816 { bridge_ioctl_flush, sizeof(struct ifbreq),
817 BC_F_COPYIN | BC_F_SUSER },
818
819 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
820 BC_F_COPYOUT },
821 { bridge_ioctl_spri, sizeof(struct ifbrparam),
822 BC_F_COPYIN | BC_F_SUSER },
823
824 { bridge_ioctl_ght, sizeof(struct ifbrparam),
825 BC_F_COPYOUT },
826 { bridge_ioctl_sht, sizeof(struct ifbrparam),
827 BC_F_COPYIN | BC_F_SUSER },
828
829 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
830 BC_F_COPYOUT },
831 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
832 BC_F_COPYIN | BC_F_SUSER },
833
834 { bridge_ioctl_gma, sizeof(struct ifbrparam),
835 BC_F_COPYOUT },
836 { bridge_ioctl_sma, sizeof(struct ifbrparam), /* 20 */
837 BC_F_COPYIN | BC_F_SUSER },
838
839 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
840 BC_F_COPYIN | BC_F_SUSER },
841
842 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
843 BC_F_COPYIN | BC_F_SUSER },
844
845 { bridge_ioctl_gfilt, sizeof(struct ifbrparam),
846 BC_F_COPYOUT },
847 { bridge_ioctl_sfilt, sizeof(struct ifbrparam),
848 BC_F_COPYIN | BC_F_SUSER },
849
850 { bridge_ioctl_purge, sizeof(struct ifbreq),
851 BC_F_COPYIN | BC_F_SUSER },
852
853 { bridge_ioctl_addspan, sizeof(struct ifbreq),
854 BC_F_COPYIN | BC_F_SUSER },
855 { bridge_ioctl_delspan, sizeof(struct ifbreq),
856 BC_F_COPYIN | BC_F_SUSER },
857
858 { bridge_ioctl_gbparam64, sizeof(struct ifbropreq64),
859 BC_F_COPYOUT },
860
861 { bridge_ioctl_grte, sizeof(struct ifbrparam),
862 BC_F_COPYOUT },
863
864 { bridge_ioctl_gifsstp64, sizeof(struct ifbpstpconf64), /* 30 */
865 BC_F_COPYIN | BC_F_COPYOUT },
866
867 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
868 BC_F_COPYIN | BC_F_SUSER },
869
870 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
871 BC_F_COPYIN | BC_F_SUSER },
872
873 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
874 BC_F_COPYIN | BC_F_SUSER },
875
876 { bridge_ioctl_ghostfilter, sizeof(struct ifbrhostfilter),
877 BC_F_COPYIN | BC_F_COPYOUT },
878 { bridge_ioctl_shostfilter, sizeof(struct ifbrhostfilter),
879 BC_F_COPYIN | BC_F_SUSER },
880 };
881
882 static const unsigned int bridge_control_table_size =
883 sizeof(bridge_control_table32) / sizeof(bridge_control_table32[0]);
884
885 static LIST_HEAD(, bridge_softc) bridge_list =
886 LIST_HEAD_INITIALIZER(bridge_list);
887
888 static lck_grp_t *bridge_lock_grp = NULL;
889 static lck_attr_t *bridge_lock_attr = NULL;
890
891 #define BRIDGENAME "bridge"
892 #define BRIDGES_MAX IF_MAXUNIT
893 #define BRIDGE_ZONE_MAX_ELEM MIN(IFNETS_MAX, BRIDGES_MAX)
894
895 static struct if_clone bridge_cloner =
896 IF_CLONE_INITIALIZER(BRIDGENAME, bridge_clone_create, bridge_clone_destroy,
897 0, BRIDGES_MAX, BRIDGE_ZONE_MAX_ELEM, sizeof(struct bridge_softc));
898
899 static int if_bridge_txstart = 0;
900 SYSCTL_INT(_net_link_bridge, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED,
901 &if_bridge_txstart, 0, "Bridge interface uses TXSTART model");
902
903 #if BRIDGE_DEBUG
904 static int if_bridge_debug = 0;
905 SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
906 &if_bridge_debug, 0, "Bridge debug");
907
908 static void printf_ether_header(struct ether_header *);
909 static void printf_mbuf_data(mbuf_t, size_t, size_t);
910 static void printf_mbuf_pkthdr(mbuf_t, const char *, const char *);
911 static void printf_mbuf(mbuf_t, const char *, const char *);
912 static void link_print(struct bridge_softc * sc);
913
914 static void bridge_lock(struct bridge_softc *);
915 static void bridge_unlock(struct bridge_softc *);
916 static int bridge_lock2ref(struct bridge_softc *);
917 static void bridge_unref(struct bridge_softc *);
918 static void bridge_xlock(struct bridge_softc *);
919 static void bridge_xdrop(struct bridge_softc *);
920
921 static void
922 bridge_lock(struct bridge_softc *sc)
923 {
924 void *lr_saved = __builtin_return_address(0);
925
926 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
927
928 _BRIDGE_LOCK(sc);
929
930 sc->lock_lr[sc->next_lock_lr] = lr_saved;
931 sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX;
932 }
933
934 static void
935 bridge_unlock(struct bridge_softc *sc)
936 {
937 void *lr_saved = __builtin_return_address(0);
938
939 BRIDGE_LOCK_ASSERT_HELD(sc);
940
941 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
942 sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
943
944 _BRIDGE_UNLOCK(sc);
945 }
946
947 static int
948 bridge_lock2ref(struct bridge_softc *sc)
949 {
950 int error = 0;
951 void *lr_saved = __builtin_return_address(0);
952
953 BRIDGE_LOCK_ASSERT_HELD(sc);
954
955 if (sc->sc_iflist_xcnt > 0) {
956 error = EBUSY;
957 } else {
958 sc->sc_iflist_ref++;
959 }
960
961 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
962 sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
963
964 _BRIDGE_UNLOCK(sc);
965
966 return error;
967 }
968
969 static void
970 bridge_unref(struct bridge_softc *sc)
971 {
972 void *lr_saved = __builtin_return_address(0);
973
974 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
975
976 _BRIDGE_LOCK(sc);
977 sc->lock_lr[sc->next_lock_lr] = lr_saved;
978 sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX;
979
980 sc->sc_iflist_ref--;
981
982 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
983 sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
984 if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) {
985 _BRIDGE_UNLOCK(sc);
986 wakeup(&sc->sc_cv);
987 } else {
988 _BRIDGE_UNLOCK(sc);
989 }
990 }
991
992 static void
993 bridge_xlock(struct bridge_softc *sc)
994 {
995 void *lr_saved = __builtin_return_address(0);
996
997 BRIDGE_LOCK_ASSERT_HELD(sc);
998
999 sc->sc_iflist_xcnt++;
1000 while (sc->sc_iflist_ref > 0) {
1001 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
1002 sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
1003
1004 msleep(&sc->sc_cv, &sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL);
1005
1006 sc->lock_lr[sc->next_lock_lr] = lr_saved;
1007 sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX;
1008 }
1009 }
1010
1011 static void
1012 bridge_xdrop(struct bridge_softc *sc)
1013 {
1014 BRIDGE_LOCK_ASSERT_HELD(sc);
1015
1016 sc->sc_iflist_xcnt--;
1017 }
1018
1019 void
1020 printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
1021 {
1022 if (m) {
1023 printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
1024 "nextpkt: 0x%llx%s",
1025 prefix ? prefix : "", (unsigned int)mbuf_pkthdr_len(m),
1026 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
1027 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m)),
1028 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m)),
1029 suffix ? suffix : "");
1030 } else {
1031 printf("%s<NULL>%s\n", prefix, suffix);
1032 }
1033 }
1034
1035 void
1036 printf_mbuf(mbuf_t m, const char *prefix, const char *suffix)
1037 {
1038 if (m) {
1039 printf("%s0x%llx type: %u flags: 0x%x len: %u data: 0x%llx "
1040 "maxlen: %u datastart: 0x%llx next: 0x%llx%s",
1041 prefix ? prefix : "", (uint64_t)VM_KERNEL_ADDRPERM(m),
1042 mbuf_type(m), mbuf_flags(m), (unsigned int)mbuf_len(m),
1043 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)),
1044 (unsigned int)mbuf_maxlen(m),
1045 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m)),
1046 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m)),
1047 !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
1048 if ((mbuf_flags(m) & MBUF_PKTHDR)) {
1049 printf_mbuf_pkthdr(m, " ", suffix);
1050 }
1051 } else {
1052 printf("%s<NULL>%s\n", prefix, suffix);
1053 }
1054 }
1055
1056 void
1057 printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
1058 {
1059 mbuf_t n;
1060 size_t i, j;
1061 size_t pktlen, mlen, maxlen;
1062 unsigned char *ptr;
1063
1064 pktlen = mbuf_pkthdr_len(m);
1065
1066 if (offset > pktlen) {
1067 return;
1068 }
1069
1070 maxlen = (pktlen - offset > len) ? len : pktlen - offset;
1071 n = m;
1072 mlen = mbuf_len(n);
1073 ptr = mbuf_data(n);
1074 for (i = 0, j = 0; i < maxlen; i++, j++) {
1075 if (j >= mlen) {
1076 n = mbuf_next(n);
1077 if (n == 0) {
1078 break;
1079 }
1080 ptr = mbuf_data(n);
1081 mlen = mbuf_len(n);
1082 j = 0;
1083 }
1084 if (i >= offset) {
1085 printf("%02x%s", ptr[j], i % 2 ? " " : "");
1086 }
1087 }
1088 }
1089
1090 static void
1091 printf_ether_header(struct ether_header *eh)
1092 {
1093 printf("%02x:%02x:%02x:%02x:%02x:%02x > "
1094 "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
1095 eh->ether_shost[0], eh->ether_shost[1], eh->ether_shost[2],
1096 eh->ether_shost[3], eh->ether_shost[4], eh->ether_shost[5],
1097 eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
1098 eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5],
1099 ntohs(eh->ether_type));
1100 }
1101
1102 static void
1103 link_print(struct bridge_softc * sc)
1104 {
1105 int i;
1106 uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) +
1107 IFNAMSIZ + ETHER_ADDR_LEN];
1108 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
1109
1110 memset(sdl, 0, sizeof(sdl_buffer));
1111 sdl->sdl_family = AF_LINK;
1112 sdl->sdl_nlen = strlen(sc->sc_if_xname);
1113 sdl->sdl_alen = ETHER_ADDR_LEN;
1114 sdl->sdl_len = offsetof(struct sockaddr_dl, sdl_data);
1115 memcpy(sdl->sdl_data, sc->sc_if_xname, sdl->sdl_nlen);
1116 memcpy(LLADDR(sdl), sc->sc_defaddr, ETHER_ADDR_LEN);
1117
1118 #if 1
1119 printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
1120 " slen %d addr ", sdl->sdl_len, sdl->sdl_index,
1121 sdl->sdl_family, sdl->sdl_type, sdl->sdl_nlen,
1122 sdl->sdl_alen, sdl->sdl_slen);
1123 #endif
1124 for (i = 0; i < sdl->sdl_alen; i++) {
1125 printf("%s%x", i ? ":" : "", (CONST_LLADDR(sdl))[i]);
1126 }
1127 printf("\n");
1128 }
1129
1130 #endif /* BRIDGE_DEBUG */
1131
1132 /*
1133 * bridgeattach:
1134 *
1135 * Pseudo-device attach routine.
1136 */
1137 __private_extern__ int
1138 bridgeattach(int n)
1139 {
1140 #pragma unused(n)
1141 int error;
1142 lck_grp_attr_t *lck_grp_attr = NULL;
1143
1144 bridge_rtnode_pool = zinit(sizeof(struct bridge_rtnode),
1145 1024 * sizeof(struct bridge_rtnode), 0, "bridge_rtnode");
1146 zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE);
1147
1148 lck_grp_attr = lck_grp_attr_alloc_init();
1149
1150 bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr);
1151
1152 bridge_lock_attr = lck_attr_alloc_init();
1153
1154 #if BRIDGE_DEBUG
1155 lck_attr_setdebug(bridge_lock_attr);
1156 #endif
1157
1158 lck_mtx_init(&bridge_list_mtx, bridge_lock_grp, bridge_lock_attr);
1159
1160 /* can free the attributes once we've allocated the group lock */
1161 lck_grp_attr_free(lck_grp_attr);
1162
1163 LIST_INIT(&bridge_list);
1164
1165 #if BRIDGESTP
1166 bstp_sys_init();
1167 #endif /* BRIDGESTP */
1168
1169 error = if_clone_attach(&bridge_cloner);
1170 if (error != 0) {
1171 printf("%s: ifnet_clone_attach failed %d\n", __func__, error);
1172 }
1173
1174 return error;
1175 }
1176
1177 #if defined(PFIL_HOOKS)
1178 /*
1179 * handler for net.link.bridge.pfil_ipfw
1180 */
1181 static int
1182 sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1183 {
1184 #pragma unused(arg1, arg2)
1185 int enable = pfil_ipfw;
1186 int error;
1187
1188 error = sysctl_handle_int(oidp, &enable, 0, req);
1189 enable = (enable) ? 1 : 0;
1190
1191 if (enable != pfil_ipfw) {
1192 pfil_ipfw = enable;
1193
1194 /*
1195 * Disable pfil so that ipfw doesnt run twice, if the user
1196 * really wants both then they can re-enable pfil_bridge and/or
1197 * pfil_member. Also allow non-ip packets as ipfw can filter by
1198 * layer2 type.
1199 */
1200 if (pfil_ipfw) {
1201 pfil_onlyip = 0;
1202 pfil_bridge = 0;
1203 pfil_member = 0;
1204 }
1205 }
1206
1207 return error;
1208 }
1209
1210 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT | CTLFLAG_RW,
1211 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
1212 #endif /* PFIL_HOOKS */
1213
1214 static errno_t
1215 bridge_ifnet_set_attrs(struct ifnet * ifp)
1216 {
1217 errno_t error;
1218
1219 error = ifnet_set_mtu(ifp, ETHERMTU);
1220 if (error != 0) {
1221 printf("%s: ifnet_set_mtu failed %d\n", __func__, error);
1222 goto done;
1223 }
1224 error = ifnet_set_addrlen(ifp, ETHER_ADDR_LEN);
1225 if (error != 0) {
1226 printf("%s: ifnet_set_addrlen failed %d\n", __func__, error);
1227 goto done;
1228 }
1229 error = ifnet_set_hdrlen(ifp, ETHER_HDR_LEN);
1230 if (error != 0) {
1231 printf("%s: ifnet_set_hdrlen failed %d\n", __func__, error);
1232 goto done;
1233 }
1234 error = ifnet_set_flags(ifp,
1235 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST,
1236 0xffff);
1237
1238 if (error != 0) {
1239 printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1240 goto done;
1241 }
1242 done:
1243 return error;
1244 }
1245
1246 /*
1247 * bridge_clone_create:
1248 *
1249 * Create a new bridge instance.
1250 */
1251 static int
1252 bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params)
1253 {
1254 #pragma unused(params)
1255 struct ifnet *ifp = NULL;
1256 struct bridge_softc *sc = NULL;
1257 struct bridge_softc *sc2 = NULL;
1258 struct ifnet_init_eparams init_params;
1259 errno_t error = 0;
1260 uint8_t eth_hostid[ETHER_ADDR_LEN];
1261 int fb, retry, has_hostid;
1262
1263 sc = if_clone_softc_allocate(&bridge_cloner);
1264 if (sc == NULL) {
1265 error = ENOMEM;
1266 goto done;
1267 }
1268
1269 lck_mtx_init(&sc->sc_mtx, bridge_lock_grp, bridge_lock_attr);
1270 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
1271 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
1272 sc->sc_filter_flags = IFBF_FILT_DEFAULT;
1273 #ifndef BRIDGE_IPF
1274 /*
1275 * For backwards compatibility with previous behaviour...
1276 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1277 * not defined.
1278 */
1279 sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
1280 #endif
1281
1282 if (bridge_bsd_mode != 0) {
1283 bridge_set_bsd_mode(sc);
1284 }
1285
1286 TAILQ_INIT(&sc->sc_iflist);
1287
1288 /* use the interface name as the unique id for ifp recycle */
1289 snprintf(sc->sc_if_xname, sizeof(sc->sc_if_xname), "%s%d",
1290 ifc->ifc_name, unit);
1291 bzero(&init_params, sizeof(init_params));
1292 init_params.ver = IFNET_INIT_CURRENT_VERSION;
1293 init_params.len = sizeof(init_params);
1294 if (bridge_in_bsd_mode(sc)) {
1295 /* Initialize our routing table. */
1296 error = bridge_rtable_init(sc);
1297 if (error != 0) {
1298 printf("%s: bridge_rtable_init failed %d\n",
1299 __func__, error);
1300 goto done;
1301 }
1302 TAILQ_INIT(&sc->sc_spanlist);
1303 if (if_bridge_txstart) {
1304 init_params.start = bridge_start;
1305 } else {
1306 init_params.flags = IFNET_INIT_LEGACY;
1307 init_params.output = bridge_output;
1308 }
1309 init_params.set_bpf_tap = bridge_set_bpf_tap;
1310 }
1311 init_params.uniqueid = sc->sc_if_xname;
1312 init_params.uniqueid_len = strlen(sc->sc_if_xname);
1313 init_params.sndq_maxlen = IFQ_MAXLEN;
1314 init_params.name = ifc->ifc_name;
1315 init_params.unit = unit;
1316 init_params.family = IFNET_FAMILY_ETHERNET;
1317 init_params.type = IFT_BRIDGE;
1318 init_params.demux = ether_demux;
1319 init_params.add_proto = ether_add_proto;
1320 init_params.del_proto = ether_del_proto;
1321 init_params.check_multi = ether_check_multi;
1322 init_params.framer_extended = ether_frameout_extended;
1323 init_params.softc = sc;
1324 init_params.ioctl = bridge_ioctl;
1325 init_params.detach = bridge_detach;
1326 init_params.broadcast_addr = etherbroadcastaddr;
1327 init_params.broadcast_len = ETHER_ADDR_LEN;
1328
1329 if (bridge_in_bsd_mode(sc)) {
1330 error = ifnet_allocate_extended(&init_params, &ifp);
1331 if (error != 0) {
1332 printf("%s: ifnet_allocate failed %d\n",
1333 __func__, error);
1334 goto done;
1335 }
1336 sc->sc_ifp = ifp;
1337 error = bridge_ifnet_set_attrs(ifp);
1338 if (error != 0) {
1339 printf("%s: bridge_ifnet_set_attrs failed %d\n",
1340 __func__, error);
1341 goto done;
1342 }
1343 }
1344
1345 /*
1346 * Generate an ethernet address with a locally administered address.
1347 *
1348 * Since we are using random ethernet addresses for the bridge, it is
1349 * possible that we might have address collisions, so make sure that
1350 * this hardware address isn't already in use on another bridge.
1351 * The first try uses the "hostid" and falls back to read_frandom();
1352 * for "hostid", we use the MAC address of the first-encountered
1353 * Ethernet-type interface that is currently configured.
1354 */
1355 fb = 0;
1356 has_hostid = (uuid_get_ethernet(&eth_hostid[0]) == 0);
1357 for (retry = 1; retry != 0;) {
1358 if (fb || has_hostid == 0) {
1359 read_frandom(&sc->sc_defaddr, ETHER_ADDR_LEN);
1360 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
1361 sc->sc_defaddr[0] |= 2; /* set the LAA bit */
1362 } else {
1363 bcopy(&eth_hostid[0], &sc->sc_defaddr,
1364 ETHER_ADDR_LEN);
1365 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
1366 sc->sc_defaddr[0] |= 2; /* set the LAA bit */
1367 sc->sc_defaddr[3] = /* stir it up a bit */
1368 ((sc->sc_defaddr[3] & 0x0f) << 4) |
1369 ((sc->sc_defaddr[3] & 0xf0) >> 4);
1370 /*
1371 * Mix in the LSB as it's actually pretty significant,
1372 * see rdar://14076061
1373 */
1374 sc->sc_defaddr[4] =
1375 (((sc->sc_defaddr[4] & 0x0f) << 4) |
1376 ((sc->sc_defaddr[4] & 0xf0) >> 4)) ^
1377 sc->sc_defaddr[5];
1378 sc->sc_defaddr[5] = ifp->if_unit & 0xff;
1379 }
1380
1381 fb = 1;
1382 retry = 0;
1383 lck_mtx_lock(&bridge_list_mtx);
1384 LIST_FOREACH(sc2, &bridge_list, sc_list) {
1385 if (memcmp(sc->sc_defaddr,
1386 IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0) {
1387 retry = 1;
1388 }
1389 }
1390 lck_mtx_unlock(&bridge_list_mtx);
1391 }
1392
1393 sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
1394
1395 #if BRIDGE_DEBUG
1396 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
1397 link_print(sc);
1398 }
1399 #endif
1400 if (bridge_in_bsd_mode(sc)) {
1401 error = ifnet_attach(ifp, NULL);
1402 if (error != 0) {
1403 printf("%s: ifnet_attach failed %d\n", __func__, error);
1404 goto done;
1405 }
1406 }
1407
1408 error = ifnet_set_lladdr_and_type(ifp, sc->sc_defaddr, ETHER_ADDR_LEN,
1409 IFT_ETHER);
1410 if (error != 0) {
1411 printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__,
1412 error);
1413 goto done;
1414 }
1415
1416 if (bridge_in_bsd_mode(sc)) {
1417 ifnet_set_offload(ifp,
1418 IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP |
1419 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES);
1420 error = bridge_set_tso(sc);
1421 if (error != 0) {
1422 printf("%s: bridge_set_tso failed %d\n",
1423 __func__, error);
1424 goto done;
1425 }
1426 #if BRIDGESTP
1427 bstp_attach(&sc->sc_stp, &bridge_ops);
1428 #endif /* BRIDGESTP */
1429 }
1430
1431 lck_mtx_lock(&bridge_list_mtx);
1432 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
1433 lck_mtx_unlock(&bridge_list_mtx);
1434
1435 /* attach as ethernet */
1436 error = bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header),
1437 NULL, NULL);
1438
1439 done:
1440 if (error != 0) {
1441 printf("%s failed error %d\n", __func__, error);
1442 /* TBD: Clean up: sc, sc_rthash etc */
1443 }
1444
1445 return error;
1446 }
1447
1448 /*
1449 * bridge_clone_destroy:
1450 *
1451 * Destroy a bridge instance.
1452 */
1453 static int
1454 bridge_clone_destroy(struct ifnet *ifp)
1455 {
1456 struct bridge_softc *sc = ifp->if_softc;
1457 struct bridge_iflist *bif;
1458 errno_t error;
1459
1460 BRIDGE_LOCK(sc);
1461 if ((sc->sc_flags & SCF_DETACHING)) {
1462 BRIDGE_UNLOCK(sc);
1463 return 0;
1464 }
1465 sc->sc_flags |= SCF_DETACHING;
1466
1467 bridge_ifstop(ifp, 1);
1468
1469 if (bridge_in_bsd_mode(sc)) {
1470 bridge_cancel_delayed_call(&sc->sc_resize_call);
1471
1472 bridge_cleanup_delayed_call(&sc->sc_resize_call);
1473 bridge_cleanup_delayed_call(&sc->sc_aging_timer);
1474 }
1475
1476 error = ifnet_set_flags(ifp, 0, IFF_UP);
1477 if (error != 0) {
1478 printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1479 }
1480
1481 while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL) {
1482 bridge_delete_member(sc, bif, 0);
1483 }
1484
1485 if (bridge_in_bsd_mode(sc)) {
1486 while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
1487 bridge_delete_span(sc, bif);
1488 }
1489 BRIDGE_UNLOCK(sc);
1490 }
1491
1492 error = ifnet_detach(ifp);
1493 if (error != 0) {
1494 panic("%s: ifnet_detach(%p) failed %d\n",
1495 __func__, ifp, error);
1496 }
1497 return 0;
1498 }
1499
1500 #define DRVSPEC do { \
1501 if (ifd->ifd_cmd >= bridge_control_table_size) { \
1502 error = EINVAL; \
1503 break; \
1504 } \
1505 bc = &bridge_control_table[ifd->ifd_cmd]; \
1506 \
1507 if (cmd == SIOCGDRVSPEC && \
1508 (bc->bc_flags & BC_F_COPYOUT) == 0) { \
1509 error = EINVAL; \
1510 break; \
1511 } else if (cmd == SIOCSDRVSPEC && \
1512 (bc->bc_flags & BC_F_COPYOUT) != 0) { \
1513 error = EINVAL; \
1514 break; \
1515 } \
1516 \
1517 if (bc->bc_flags & BC_F_SUSER) { \
1518 error = kauth_authorize_generic(kauth_cred_get(), \
1519 KAUTH_GENERIC_ISSUSER); \
1520 if (error) \
1521 break; \
1522 } \
1523 \
1524 if (ifd->ifd_len != bc->bc_argsize || \
1525 ifd->ifd_len > sizeof (args)) { \
1526 error = EINVAL; \
1527 break; \
1528 } \
1529 \
1530 bzero(&args, sizeof (args)); \
1531 if (bc->bc_flags & BC_F_COPYIN) { \
1532 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
1533 if (error) \
1534 break; \
1535 } \
1536 \
1537 BRIDGE_LOCK(sc); \
1538 error = (*bc->bc_func)(sc, &args); \
1539 BRIDGE_UNLOCK(sc); \
1540 if (error) \
1541 break; \
1542 \
1543 if (bc->bc_flags & BC_F_COPYOUT) \
1544 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
1545 } while (0)
1546
1547 /*
1548 * bridge_ioctl:
1549 *
1550 * Handle a control request from the operator.
1551 */
1552 static errno_t
1553 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1554 {
1555 struct bridge_softc *sc = ifp->if_softc;
1556 struct ifreq *ifr = (struct ifreq *)data;
1557 struct bridge_iflist *bif;
1558 int error = 0;
1559
1560 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1561
1562 #if BRIDGE_DEBUG
1563 if (if_bridge_debug & BR_DBGF_IOCTL) {
1564 printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1565 __func__, ifp->if_xname, cmd, (cmd & IOC_IN) ? 'I' : ' ',
1566 (cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd),
1567 (char)IOCGROUP(cmd), cmd & 0xff);
1568 }
1569 #endif /* BRIDGE_DEBUG */
1570
1571 switch (cmd) {
1572 case SIOCSIFADDR:
1573 case SIOCAIFADDR:
1574 ifnet_set_flags(ifp, IFF_UP, IFF_UP);
1575 break;
1576
1577 case SIOCGIFMEDIA32:
1578 case SIOCGIFMEDIA64: {
1579 struct ifmediareq *ifmr = (struct ifmediareq *)data;
1580 user_addr_t user_addr;
1581
1582 user_addr = (cmd == SIOCGIFMEDIA64) ?
1583 ((struct ifmediareq64 *)ifmr)->ifmu_ulist :
1584 CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist);
1585
1586 ifmr->ifm_status = IFM_AVALID;
1587 ifmr->ifm_mask = 0;
1588 ifmr->ifm_count = 1;
1589
1590 BRIDGE_LOCK(sc);
1591 if (!(sc->sc_flags & SCF_DETACHING) &&
1592 (sc->sc_flags & SCF_MEDIA_ACTIVE)) {
1593 ifmr->ifm_status |= IFM_ACTIVE;
1594 ifmr->ifm_active = ifmr->ifm_current =
1595 IFM_ETHER | IFM_AUTO;
1596 } else {
1597 ifmr->ifm_active = ifmr->ifm_current = IFM_NONE;
1598 }
1599 BRIDGE_UNLOCK(sc);
1600
1601 if (user_addr != USER_ADDR_NULL) {
1602 error = copyout(&ifmr->ifm_current, user_addr,
1603 sizeof(int));
1604 }
1605 break;
1606 }
1607
1608 case SIOCADDMULTI:
1609 case SIOCDELMULTI:
1610 break;
1611
1612 case SIOCSDRVSPEC32:
1613 case SIOCGDRVSPEC32: {
1614 union {
1615 struct ifbreq ifbreq;
1616 struct ifbifconf32 ifbifconf;
1617 struct ifbareq32 ifbareq;
1618 struct ifbaconf32 ifbaconf;
1619 struct ifbrparam ifbrparam;
1620 struct ifbropreq32 ifbropreq;
1621 } args;
1622 struct ifdrv32 *ifd = (struct ifdrv32 *)data;
1623 const struct bridge_control *bridge_control_table =
1624 bridge_control_table32, *bc;
1625
1626 DRVSPEC;
1627
1628 break;
1629 }
1630 case SIOCSDRVSPEC64:
1631 case SIOCGDRVSPEC64: {
1632 union {
1633 struct ifbreq ifbreq;
1634 struct ifbifconf64 ifbifconf;
1635 struct ifbareq64 ifbareq;
1636 struct ifbaconf64 ifbaconf;
1637 struct ifbrparam ifbrparam;
1638 struct ifbropreq64 ifbropreq;
1639 } args;
1640 struct ifdrv64 *ifd = (struct ifdrv64 *)data;
1641 const struct bridge_control *bridge_control_table =
1642 bridge_control_table64, *bc;
1643
1644 DRVSPEC;
1645
1646 break;
1647 }
1648
1649 case SIOCSIFFLAGS:
1650 if (!(ifp->if_flags & IFF_UP) &&
1651 (ifp->if_flags & IFF_RUNNING)) {
1652 /*
1653 * If interface is marked down and it is running,
1654 * then stop and disable it.
1655 */
1656 BRIDGE_LOCK(sc);
1657 bridge_ifstop(ifp, 1);
1658 BRIDGE_UNLOCK(sc);
1659 } else if ((ifp->if_flags & IFF_UP) &&
1660 !(ifp->if_flags & IFF_RUNNING)) {
1661 /*
1662 * If interface is marked up and it is stopped, then
1663 * start it.
1664 */
1665 BRIDGE_LOCK(sc);
1666 error = bridge_init(ifp);
1667 BRIDGE_UNLOCK(sc);
1668 }
1669 break;
1670
1671 case SIOCSIFLLADDR:
1672 error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data,
1673 ifr->ifr_addr.sa_len);
1674 if (error != 0) {
1675 printf("%s: SIOCSIFLLADDR error %d\n", ifp->if_xname,
1676 error);
1677 }
1678 break;
1679
1680 case SIOCSIFMTU:
1681 if (ifr->ifr_mtu < 576) {
1682 error = EINVAL;
1683 break;
1684 }
1685 BRIDGE_LOCK(sc);
1686 if (TAILQ_EMPTY(&sc->sc_iflist)) {
1687 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1688 BRIDGE_UNLOCK(sc);
1689 break;
1690 }
1691 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1692 if (bif->bif_ifp->if_mtu != (unsigned)ifr->ifr_mtu) {
1693 printf("%s: invalid MTU: %u(%s) != %d\n",
1694 sc->sc_ifp->if_xname,
1695 bif->bif_ifp->if_mtu,
1696 bif->bif_ifp->if_xname, ifr->ifr_mtu);
1697 error = EINVAL;
1698 break;
1699 }
1700 }
1701 if (!error) {
1702 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1703 }
1704 BRIDGE_UNLOCK(sc);
1705 break;
1706
1707 default:
1708 error = ether_ioctl(ifp, cmd, data);
1709 #if BRIDGE_DEBUG
1710 if (error != 0 && error != EOPNOTSUPP) {
1711 printf("%s: ifp %s cmd 0x%08lx "
1712 "(%c%c [%lu] %c %lu) failed error: %d\n",
1713 __func__, ifp->if_xname, cmd,
1714 (cmd & IOC_IN) ? 'I' : ' ',
1715 (cmd & IOC_OUT) ? 'O' : ' ',
1716 IOCPARM_LEN(cmd), (char)IOCGROUP(cmd),
1717 cmd & 0xff, error);
1718 }
1719 #endif /* BRIDGE_DEBUG */
1720 break;
1721 }
1722 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1723
1724 return error;
1725 }
1726
1727 #if HAS_IF_CAP
1728 /*
1729 * bridge_mutecaps:
1730 *
1731 * Clear or restore unwanted capabilities on the member interface
1732 */
1733 static void
1734 bridge_mutecaps(struct bridge_softc *sc)
1735 {
1736 struct bridge_iflist *bif;
1737 int enabled, mask;
1738
1739 /* Initial bitmask of capabilities to test */
1740 mask = BRIDGE_IFCAPS_MASK;
1741
1742 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1743 /* Every member must support it or its disabled */
1744 mask &= bif->bif_savedcaps;
1745 }
1746
1747 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1748 enabled = bif->bif_ifp->if_capenable;
1749 enabled &= ~BRIDGE_IFCAPS_STRIP;
1750 /* strip off mask bits and enable them again if allowed */
1751 enabled &= ~BRIDGE_IFCAPS_MASK;
1752 enabled |= mask;
1753
1754 bridge_set_ifcap(sc, bif, enabled);
1755 }
1756 }
1757
1758 static void
1759 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1760 {
1761 struct ifnet *ifp = bif->bif_ifp;
1762 struct ifreq ifr;
1763 int error;
1764
1765 bzero(&ifr, sizeof(ifr));
1766 ifr.ifr_reqcap = set;
1767
1768 if (ifp->if_capenable != set) {
1769 IFF_LOCKGIANT(ifp);
1770 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1771 IFF_UNLOCKGIANT(ifp);
1772 if (error) {
1773 printf("%s: %s error setting interface capabilities "
1774 "on %s\n", __func__, sc->sc_ifp->if_xname,
1775 ifp->if_xname);
1776 }
1777 }
1778 }
1779 #endif /* HAS_IF_CAP */
1780
1781 static errno_t
1782 bridge_set_tso(struct bridge_softc *sc)
1783 {
1784 struct bridge_iflist *bif;
1785 u_int32_t tso_v4_mtu;
1786 u_int32_t tso_v6_mtu;
1787 ifnet_offload_t offload;
1788 errno_t error = 0;
1789
1790 /* By default, support TSO */
1791 offload = sc->sc_ifp->if_hwassist | IFNET_TSO_IPV4 | IFNET_TSO_IPV6;
1792 tso_v4_mtu = IP_MAXPACKET;
1793 tso_v6_mtu = IP_MAXPACKET;
1794
1795 /* Use the lowest common denominator of the members */
1796 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1797 ifnet_t ifp = bif->bif_ifp;
1798
1799 if (ifp == NULL) {
1800 continue;
1801 }
1802
1803 if (offload & IFNET_TSO_IPV4) {
1804 if (ifp->if_hwassist & IFNET_TSO_IPV4) {
1805 if (tso_v4_mtu > ifp->if_tso_v4_mtu) {
1806 tso_v4_mtu = ifp->if_tso_v4_mtu;
1807 }
1808 } else {
1809 offload &= ~IFNET_TSO_IPV4;
1810 tso_v4_mtu = 0;
1811 }
1812 }
1813 if (offload & IFNET_TSO_IPV6) {
1814 if (ifp->if_hwassist & IFNET_TSO_IPV6) {
1815 if (tso_v6_mtu > ifp->if_tso_v6_mtu) {
1816 tso_v6_mtu = ifp->if_tso_v6_mtu;
1817 }
1818 } else {
1819 offload &= ~IFNET_TSO_IPV6;
1820 tso_v6_mtu = 0;
1821 }
1822 }
1823 }
1824
1825 if (offload != sc->sc_ifp->if_hwassist) {
1826 error = ifnet_set_offload(sc->sc_ifp, offload);
1827 if (error != 0) {
1828 #if BRIDGE_DEBUG
1829 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
1830 printf("%s: ifnet_set_offload(%s, 0x%x) "
1831 "failed %d\n", __func__,
1832 sc->sc_ifp->if_xname, offload, error);
1833 }
1834 #endif /* BRIDGE_DEBUG */
1835 goto done;
1836 }
1837 /*
1838 * For ifnet_set_tso_mtu() sake, the TSO MTU must be at least
1839 * as large as the interface MTU
1840 */
1841 if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV4) {
1842 if (tso_v4_mtu < sc->sc_ifp->if_mtu) {
1843 tso_v4_mtu = sc->sc_ifp->if_mtu;
1844 }
1845 error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET,
1846 tso_v4_mtu);
1847 if (error != 0) {
1848 #if BRIDGE_DEBUG
1849 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
1850 printf("%s: ifnet_set_tso_mtu(%s, "
1851 "AF_INET, %u) failed %d\n",
1852 __func__, sc->sc_ifp->if_xname,
1853 tso_v4_mtu, error);
1854 }
1855 #endif /* BRIDGE_DEBUG */
1856 goto done;
1857 }
1858 }
1859 if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV6) {
1860 if (tso_v6_mtu < sc->sc_ifp->if_mtu) {
1861 tso_v6_mtu = sc->sc_ifp->if_mtu;
1862 }
1863 error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET6,
1864 tso_v6_mtu);
1865 if (error != 0) {
1866 #if BRIDGE_DEBUG
1867 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
1868 printf("%s: ifnet_set_tso_mtu(%s, "
1869 "AF_INET6, %u) failed %d\n",
1870 __func__, sc->sc_ifp->if_xname,
1871 tso_v6_mtu, error);
1872 }
1873 #endif /* BRIDGE_DEBUG */
1874 goto done;
1875 }
1876 }
1877 }
1878 done:
1879 return error;
1880 }
1881
1882 /*
1883 * bridge_lookup_member:
1884 *
1885 * Lookup a bridge member interface.
1886 */
1887 static struct bridge_iflist *
1888 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1889 {
1890 struct bridge_iflist *bif;
1891 struct ifnet *ifp;
1892
1893 BRIDGE_LOCK_ASSERT_HELD(sc);
1894
1895 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1896 ifp = bif->bif_ifp;
1897 if (strcmp(ifp->if_xname, name) == 0) {
1898 return bif;
1899 }
1900 }
1901
1902 return NULL;
1903 }
1904
1905 /*
1906 * bridge_lookup_member_if:
1907 *
1908 * Lookup a bridge member interface by ifnet*.
1909 */
1910 static struct bridge_iflist *
1911 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1912 {
1913 struct bridge_iflist *bif;
1914
1915 BRIDGE_LOCK_ASSERT_HELD(sc);
1916
1917 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1918 if (bif->bif_ifp == member_ifp) {
1919 return bif;
1920 }
1921 }
1922
1923 return NULL;
1924 }
1925
1926 static errno_t
1927 bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol,
1928 mbuf_t *data, char **frame_ptr)
1929 {
1930 #pragma unused(protocol)
1931 errno_t error = 0;
1932 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1933 struct bridge_softc *sc = bif->bif_sc;
1934 int included = 0;
1935 size_t frmlen = 0;
1936 mbuf_t m = *data;
1937
1938 if ((m->m_flags & M_PROTO1)) {
1939 goto out;
1940 }
1941
1942 if (*frame_ptr >= (char *)mbuf_datastart(m) &&
1943 *frame_ptr <= (char *)mbuf_data(m)) {
1944 included = 1;
1945 frmlen = (char *)mbuf_data(m) - *frame_ptr;
1946 }
1947 #if BRIDGE_DEBUG
1948 if (if_bridge_debug & BR_DBGF_INPUT) {
1949 printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
1950 "frmlen %lu\n", __func__, sc->sc_ifp->if_xname,
1951 ifp->if_xname, (uint64_t)VM_KERNEL_ADDRPERM(m),
1952 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)),
1953 (uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr),
1954 included ? "inside" : "outside", frmlen);
1955
1956 if (if_bridge_debug & BR_DBGF_MBUF) {
1957 printf_mbuf(m, "bridge_iff_input[", "\n");
1958 printf_ether_header((struct ether_header *)
1959 (void *)*frame_ptr);
1960 printf_mbuf_data(m, 0, 20);
1961 printf("\n");
1962 }
1963 }
1964 #endif /* BRIDGE_DEBUG */
1965
1966 /* Move data pointer to start of frame to the link layer header */
1967 if (included) {
1968 (void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen,
1969 mbuf_len(m) + frmlen);
1970 (void) mbuf_pkthdr_adjustlen(m, frmlen);
1971 } else {
1972 printf("%s: frame_ptr outside mbuf\n", __func__);
1973 goto out;
1974 }
1975
1976 error = bridge_input(ifp, m, *frame_ptr);
1977
1978 /* Adjust packet back to original */
1979 if (error == 0) {
1980 (void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen,
1981 mbuf_len(m) - frmlen);
1982 (void) mbuf_pkthdr_adjustlen(m, -frmlen);
1983 }
1984 #if BRIDGE_DEBUG
1985 if ((if_bridge_debug & BR_DBGF_INPUT) &&
1986 (if_bridge_debug & BR_DBGF_MBUF)) {
1987 printf("\n");
1988 printf_mbuf(m, "bridge_iff_input]", "\n");
1989 }
1990 #endif /* BRIDGE_DEBUG */
1991
1992 out:
1993 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1994
1995 return error;
1996 }
1997
1998 #if BRIDGE_MEMBER_OUT_FILTER
1999 static errno_t
2000 bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol,
2001 mbuf_t *data)
2002 {
2003 #pragma unused(protocol)
2004 errno_t error = 0;
2005 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
2006 struct bridge_softc *sc = bif->bif_sc;
2007 mbuf_t m = *data;
2008
2009 if ((m->m_flags & M_PROTO1)) {
2010 goto out;
2011 }
2012
2013 #if BRIDGE_DEBUG
2014 if (if_bridge_debug & BR_DBGF_OUTPUT) {
2015 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
2016 sc->sc_ifp->if_xname, ifp->if_xname,
2017 (uint64_t)VM_KERNEL_ADDRPERM(m),
2018 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
2019 }
2020 #endif /* BRIDGE_DEBUG */
2021
2022 error = bridge_member_output(sc, ifp, m);
2023 if (error != 0) {
2024 printf("%s: bridge_member_output failed error %d\n", __func__,
2025 error);
2026 }
2027
2028 out:
2029 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
2030
2031 return error;
2032 }
2033 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2034
2035 static void
2036 bridge_iff_event(void *cookie, ifnet_t ifp, protocol_family_t protocol,
2037 const struct kev_msg *event_msg)
2038 {
2039 #pragma unused(protocol)
2040 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
2041 struct bridge_softc *sc = bif->bif_sc;
2042
2043 if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
2044 event_msg->kev_class == KEV_NETWORK_CLASS &&
2045 event_msg->kev_subclass == KEV_DL_SUBCLASS) {
2046 #if BRIDGE_DEBUG
2047 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
2048 printf("%s: %s event_code %u - %s\n", __func__,
2049 ifp->if_xname, event_msg->event_code,
2050 dlil_kev_dl_code_str(event_msg->event_code));
2051 }
2052 #endif /* BRIDGE_DEBUG */
2053
2054 switch (event_msg->event_code) {
2055 case KEV_DL_IF_DETACHING:
2056 case KEV_DL_IF_DETACHED: {
2057 bridge_ifdetach(bif, ifp);
2058 break;
2059 }
2060 case KEV_DL_LINK_OFF:
2061 case KEV_DL_LINK_ON: {
2062 bridge_iflinkevent(ifp);
2063 #if BRIDGESTP
2064 bstp_linkstate(ifp, event_msg->event_code);
2065 #endif /* BRIDGESTP */
2066 break;
2067 }
2068 case KEV_DL_SIFFLAGS: {
2069 if ((bif->bif_flags & BIFF_PROMISC) == 0 &&
2070 (ifp->if_flags & IFF_UP)) {
2071 errno_t error;
2072
2073 error = ifnet_set_promiscuous(ifp, 1);
2074 if (error != 0) {
2075 printf("%s: "
2076 "ifnet_set_promiscuous (%s)"
2077 " failed %d\n",
2078 __func__, ifp->if_xname,
2079 error);
2080 } else {
2081 bif->bif_flags |= BIFF_PROMISC;
2082 }
2083 }
2084 break;
2085 }
2086 case KEV_DL_IFCAP_CHANGED: {
2087 BRIDGE_LOCK(sc);
2088 bridge_set_tso(sc);
2089 BRIDGE_UNLOCK(sc);
2090 break;
2091 }
2092 default:
2093 break;
2094 }
2095 }
2096 }
2097
2098 /*
2099 * bridge_iff_detached:
2100 *
2101 * Detach an interface from a bridge. Called when a member
2102 * interface is detaching.
2103 */
2104 static void
2105 bridge_iff_detached(void *cookie, ifnet_t ifp)
2106 {
2107 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
2108
2109 #if BRIDGE_DEBUG
2110 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
2111 printf("%s: %s\n", __func__, ifp->if_xname);
2112 }
2113 #endif /* BRIDGE_DEBUG */
2114
2115 bridge_ifdetach(bif, ifp);
2116
2117 _FREE(bif, M_DEVBUF);
2118 }
2119
2120 static errno_t
2121 bridge_proto_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet,
2122 char *header)
2123 {
2124 #pragma unused(protocol, packet, header)
2125 #if BRIDGE_DEBUG
2126 printf("%s: unexpected packet from %s\n", __func__,
2127 ifp->if_xname);
2128 #endif /* BRIDGE_DEBUG */
2129 return 0;
2130 }
2131
2132 static int
2133 bridge_attach_protocol(struct ifnet *ifp)
2134 {
2135 int error;
2136 struct ifnet_attach_proto_param reg;
2137
2138 #if BRIDGE_DEBUG
2139 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
2140 printf("%s: %s\n", __func__, ifp->if_xname);
2141 }
2142 #endif /* BRIDGE_DEBUG */
2143
2144 bzero(&reg, sizeof(reg));
2145 reg.input = bridge_proto_input;
2146
2147 error = ifnet_attach_protocol(ifp, PF_BRIDGE, &reg);
2148 if (error) {
2149 printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
2150 __func__, ifp->if_xname, error);
2151 }
2152
2153 return error;
2154 }
2155
2156 static int
2157 bridge_detach_protocol(struct ifnet *ifp)
2158 {
2159 int error;
2160
2161 #if BRIDGE_DEBUG
2162 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
2163 printf("%s: %s\n", __func__, ifp->if_xname);
2164 }
2165 #endif /* BRIDGE_DEBUG */
2166 error = ifnet_detach_protocol(ifp, PF_BRIDGE);
2167 if (error) {
2168 printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
2169 __func__, ifp->if_xname, error);
2170 }
2171
2172 return error;
2173 }
2174
2175 /*
2176 * bridge_delete_member:
2177 *
2178 * Delete the specified member interface.
2179 */
2180 static void
2181 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
2182 int gone)
2183 {
2184 struct ifnet *ifs = bif->bif_ifp, *bifp = sc->sc_ifp;
2185 int lladdr_changed = 0, error, filt_attached;
2186 uint8_t eaddr[ETHER_ADDR_LEN];
2187 u_int32_t event_code = 0;
2188 boolean_t bsd_mode;
2189
2190 BRIDGE_LOCK_ASSERT_HELD(sc);
2191 VERIFY(ifs != NULL);
2192
2193 bsd_mode = bridge_in_bsd_mode(sc);
2194
2195 /*
2196 * First, remove the member from the list first so it cannot be found anymore
2197 * when we release the bridge lock below
2198 */
2199 BRIDGE_XLOCK(sc);
2200 TAILQ_REMOVE(&sc->sc_iflist, bif, bif_next);
2201 BRIDGE_XDROP(sc);
2202
2203 if (!gone) {
2204 switch (ifs->if_type) {
2205 case IFT_ETHER:
2206 case IFT_L2VLAN:
2207 /*
2208 * Take the interface out of promiscuous mode.
2209 */
2210 if (bif->bif_flags & BIFF_PROMISC) {
2211 /*
2212 * Unlock to prevent deadlock with bridge_iff_event() in
2213 * case the driver generates an interface event
2214 */
2215 BRIDGE_UNLOCK(sc);
2216 (void) ifnet_set_promiscuous(ifs, 0);
2217 BRIDGE_LOCK(sc);
2218 }
2219 break;
2220
2221 case IFT_GIF:
2222 /* currently not supported */
2223 /* FALLTHRU */
2224 default:
2225 VERIFY(0);
2226 /* NOTREACHED */
2227 }
2228
2229 #if HAS_IF_CAP
2230 /* reneable any interface capabilities */
2231 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
2232 #endif
2233 }
2234
2235 if (bif->bif_flags & BIFF_PROTO_ATTACHED) {
2236 /* Respect lock ordering with DLIL lock */
2237 BRIDGE_UNLOCK(sc);
2238 (void) bridge_detach_protocol(ifs);
2239 BRIDGE_LOCK(sc);
2240 }
2241 #if BRIDGESTP
2242 if (bsd_mode && (bif->bif_ifflags & IFBIF_STP) != 0) {
2243 bstp_disable(&bif->bif_stp);
2244 }
2245 #endif /* BRIDGESTP */
2246
2247 /*
2248 * If removing the interface that gave the bridge its mac address, set
2249 * the mac address of the bridge to the address of the next member, or
2250 * to its default address if no members are left.
2251 */
2252 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) {
2253 ifnet_release(sc->sc_ifaddr);
2254 if (TAILQ_EMPTY(&sc->sc_iflist)) {
2255 bcopy(sc->sc_defaddr, eaddr, ETHER_ADDR_LEN);
2256 sc->sc_ifaddr = NULL;
2257 } else {
2258 struct ifnet *fif =
2259 TAILQ_FIRST(&sc->sc_iflist)->bif_ifp;
2260 bcopy(IF_LLADDR(fif), eaddr, ETHER_ADDR_LEN);
2261 sc->sc_ifaddr = fif;
2262 ifnet_reference(fif); /* for sc_ifaddr */
2263 }
2264 lladdr_changed = 1;
2265 }
2266
2267 #if HAS_IF_CAP
2268 bridge_mutecaps(sc); /* recalculate now this interface is removed */
2269 #endif /* HAS_IF_CAP */
2270
2271 error = bridge_set_tso(sc);
2272 if (error != 0) {
2273 printf("%s: bridge_set_tso failed %d\n", __func__, error);
2274 }
2275
2276 if (bsd_mode) {
2277 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
2278 }
2279
2280 KASSERT(bif->bif_addrcnt == 0,
2281 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
2282
2283 filt_attached = bif->bif_flags & BIFF_FILTER_ATTACHED;
2284
2285 /*
2286 * Update link status of the bridge based on its remaining members
2287 */
2288 event_code = bridge_updatelinkstatus(sc);
2289
2290 if (bsd_mode) {
2291 BRIDGE_UNLOCK(sc);
2292 }
2293
2294 if (lladdr_changed &&
2295 (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) {
2296 printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
2297 }
2298
2299 if (event_code != 0) {
2300 bridge_link_event(bifp, event_code);
2301 }
2302
2303 #if BRIDGESTP
2304 if (bsd_mode) {
2305 bstp_destroy(&bif->bif_stp); /* prepare to free */
2306 }
2307 #endif /* BRIDGESTP */
2308
2309 if (filt_attached) {
2310 iflt_detach(bif->bif_iff_ref);
2311 } else {
2312 _FREE(bif, M_DEVBUF);
2313 }
2314
2315 ifs->if_bridge = NULL;
2316 ifnet_release(ifs);
2317
2318 BRIDGE_LOCK(sc);
2319 }
2320
2321 /*
2322 * bridge_delete_span:
2323 *
2324 * Delete the specified span interface.
2325 */
2326 static void
2327 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
2328 {
2329 BRIDGE_LOCK_ASSERT_HELD(sc);
2330
2331 KASSERT(bif->bif_ifp->if_bridge == NULL,
2332 ("%s: not a span interface", __func__));
2333
2334 ifnet_release(bif->bif_ifp);
2335
2336 TAILQ_REMOVE(&sc->sc_spanlist, bif, bif_next);
2337 _FREE(bif, M_DEVBUF);
2338 }
2339
2340 static int
2341 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
2342 {
2343 struct ifbreq *req = arg;
2344 struct bridge_iflist *bif = NULL;
2345 struct ifnet *ifs, *bifp = sc->sc_ifp;
2346 int error = 0, lladdr_changed = 0;
2347 uint8_t eaddr[ETHER_ADDR_LEN];
2348 struct iff_filter iff;
2349 u_int32_t event_code = 0;
2350 boolean_t bsd_mode = bridge_in_bsd_mode(sc);
2351
2352 ifs = ifunit(req->ifbr_ifsname);
2353 if (ifs == NULL) {
2354 return ENOENT;
2355 }
2356 if (ifs->if_ioctl == NULL) { /* must be supported */
2357 return EINVAL;
2358 }
2359
2360 if (IFNET_IS_INTCOPROC(ifs)) {
2361 return EINVAL;
2362 }
2363
2364 if (bsd_mode) {
2365 /* If it's in the span list, it can't be a member. */
2366 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2367 if (ifs == bif->bif_ifp) {
2368 return EBUSY;
2369 }
2370 }
2371
2372 if (ifs->if_bridge == sc) {
2373 return EEXIST;
2374 }
2375
2376 if (ifs->if_bridge != NULL) {
2377 return EBUSY;
2378 }
2379
2380 switch (ifs->if_type) {
2381 case IFT_ETHER:
2382 case IFT_L2VLAN:
2383 /* permitted interface types */
2384 break;
2385 case IFT_GIF:
2386 /* currently not supported */
2387 /* FALLTHRU */
2388 default:
2389 return EINVAL;
2390 }
2391
2392 bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
2393 if (bif == NULL) {
2394 return ENOMEM;
2395 }
2396
2397 bif->bif_ifp = ifs;
2398 ifnet_reference(ifs);
2399 bif->bif_ifflags = IFBIF_LEARNING | IFBIF_DISCOVER;
2400 #if HAS_IF_CAP
2401 bif->bif_savedcaps = ifs->if_capenable;
2402 #endif /* HAS_IF_CAP */
2403 bif->bif_sc = sc;
2404
2405 /* Allow the first Ethernet member to define the MTU */
2406 if (TAILQ_EMPTY(&sc->sc_iflist)) {
2407 sc->sc_ifp->if_mtu = ifs->if_mtu;
2408 } else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
2409 printf("%s: %s: invalid MTU for %s", __func__,
2410 sc->sc_ifp->if_xname,
2411 ifs->if_xname);
2412 return EINVAL;
2413 }
2414
2415 /*
2416 * Assign the interface's MAC address to the bridge if it's the first
2417 * member and the MAC address of the bridge has not been changed from
2418 * the default (randomly) generated one.
2419 */
2420 if (bridge_inherit_mac && TAILQ_EMPTY(&sc->sc_iflist) &&
2421 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) {
2422 bcopy(IF_LLADDR(ifs), eaddr, ETHER_ADDR_LEN);
2423 sc->sc_ifaddr = ifs;
2424 ifnet_reference(ifs); /* for sc_ifaddr */
2425 lladdr_changed = 1;
2426 }
2427
2428 ifs->if_bridge = sc;
2429 #if BRIDGESTP
2430 if (bsd_mode) {
2431 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
2432 }
2433 #endif /* BRIDGESTP */
2434
2435 /*
2436 * XXX: XLOCK HERE!?!
2437 */
2438 TAILQ_INSERT_TAIL(&sc->sc_iflist, bif, bif_next);
2439
2440 #if HAS_IF_CAP
2441 /* Set interface capabilities to the intersection set of all members */
2442 bridge_mutecaps(sc);
2443 #endif /* HAS_IF_CAP */
2444
2445 bridge_set_tso(sc);
2446
2447
2448 /*
2449 * Place the interface into promiscuous mode.
2450 */
2451 switch (ifs->if_type) {
2452 case IFT_ETHER:
2453 case IFT_L2VLAN:
2454 error = ifnet_set_promiscuous(ifs, 1);
2455 if (error) {
2456 /* Ignore error when device is not up */
2457 if (error != ENETDOWN) {
2458 goto out;
2459 }
2460 error = 0;
2461 } else {
2462 bif->bif_flags |= BIFF_PROMISC;
2463 }
2464 break;
2465
2466 default:
2467 break;
2468 }
2469
2470 /*
2471 * The new member may change the link status of the bridge interface
2472 */
2473 if (interface_media_active(ifs)) {
2474 bif->bif_flags |= BIFF_MEDIA_ACTIVE;
2475 } else {
2476 bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
2477 }
2478
2479 event_code = bridge_updatelinkstatus(sc);
2480
2481 /*
2482 * Respect lock ordering with DLIL lock for the following operations
2483 */
2484 if (bsd_mode) {
2485 BRIDGE_UNLOCK(sc);
2486 }
2487
2488 /*
2489 * install an interface filter
2490 */
2491 memset(&iff, 0, sizeof(struct iff_filter));
2492 iff.iff_cookie = bif;
2493 iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
2494 if (bsd_mode) {
2495 iff.iff_input = bridge_iff_input;
2496 #if BRIDGE_MEMBER_OUT_FILTER
2497 iff.iff_output = bridge_iff_output;
2498 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2499 }
2500 iff.iff_event = bridge_iff_event;
2501 iff.iff_detached = bridge_iff_detached;
2502 error = dlil_attach_filter(ifs, &iff, &bif->bif_iff_ref,
2503 DLIL_IFF_TSO | DLIL_IFF_INTERNAL);
2504 if (error != 0) {
2505 printf("%s: iflt_attach failed %d\n", __func__, error);
2506 BRIDGE_LOCK(sc);
2507 goto out;
2508 }
2509 bif->bif_flags |= BIFF_FILTER_ATTACHED;
2510
2511 /*
2512 * install an dummy "bridge" protocol
2513 */
2514 if ((error = bridge_attach_protocol(ifs)) != 0) {
2515 if (error != 0) {
2516 printf("%s: bridge_attach_protocol failed %d\n",
2517 __func__, error);
2518 BRIDGE_LOCK(sc);
2519 goto out;
2520 }
2521 }
2522 bif->bif_flags |= BIFF_PROTO_ATTACHED;
2523
2524 if (lladdr_changed &&
2525 (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) {
2526 printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
2527 }
2528
2529 if (event_code != 0) {
2530 bridge_link_event(bifp, event_code);
2531 }
2532
2533 BRIDGE_LOCK(sc);
2534
2535 out:
2536 if (error && bif != NULL) {
2537 bridge_delete_member(sc, bif, 1);
2538 }
2539
2540 return error;
2541 }
2542
2543 static int
2544 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
2545 {
2546 struct ifbreq *req = arg;
2547 struct bridge_iflist *bif;
2548
2549 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2550 if (bif == NULL) {
2551 return ENOENT;
2552 }
2553
2554 bridge_delete_member(sc, bif, 0);
2555
2556 return 0;
2557 }
2558
2559 static int
2560 bridge_ioctl_purge(struct bridge_softc *sc, void *arg)
2561 {
2562 #pragma unused(sc, arg)
2563 return 0;
2564 }
2565
2566 static int
2567 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
2568 {
2569 struct ifbreq *req = arg;
2570 struct bridge_iflist *bif;
2571
2572 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2573 if (bif == NULL) {
2574 return ENOENT;
2575 }
2576
2577 if (bridge_in_bsd_mode(sc)) {
2578 struct bstp_port *bp;
2579
2580 bp = &bif->bif_stp;
2581 req->ifbr_state = bp->bp_state;
2582 req->ifbr_priority = bp->bp_priority;
2583 req->ifbr_path_cost = bp->bp_path_cost;
2584 req->ifbr_proto = bp->bp_protover;
2585 req->ifbr_role = bp->bp_role;
2586 req->ifbr_stpflags = bp->bp_flags;
2587 /* Copy STP state options as flags */
2588 if (bp->bp_operedge) {
2589 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
2590 }
2591 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) {
2592 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
2593 }
2594 if (bp->bp_ptp_link) {
2595 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
2596 }
2597 if (bp->bp_flags & BSTP_PORT_AUTOPTP) {
2598 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
2599 }
2600 if (bp->bp_flags & BSTP_PORT_ADMEDGE) {
2601 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
2602 }
2603 if (bp->bp_flags & BSTP_PORT_ADMCOST) {
2604 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
2605 }
2606 }
2607 req->ifbr_ifsflags = bif->bif_ifflags;
2608 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
2609 req->ifbr_addrcnt = bif->bif_addrcnt;
2610 req->ifbr_addrmax = bif->bif_addrmax;
2611 req->ifbr_addrexceeded = bif->bif_addrexceeded;
2612
2613 return 0;
2614 }
2615
2616 static int
2617 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
2618 {
2619 struct ifbreq *req = arg;
2620 struct bridge_iflist *bif;
2621 #if BRIDGESTP
2622 struct bstp_port *bp;
2623 int error;
2624 #endif /* BRIDGESTP */
2625
2626 if (!bridge_in_bsd_mode(sc)) {
2627 return EINVAL;
2628 }
2629
2630 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2631 if (bif == NULL) {
2632 return ENOENT;
2633 }
2634
2635 if (req->ifbr_ifsflags & IFBIF_SPAN) {
2636 /* SPAN is readonly */
2637 return EINVAL;
2638 }
2639
2640
2641 #if BRIDGESTP
2642 if (req->ifbr_ifsflags & IFBIF_STP) {
2643 if ((bif->bif_ifflags & IFBIF_STP) == 0) {
2644 error = bstp_enable(&bif->bif_stp);
2645 if (error) {
2646 return error;
2647 }
2648 }
2649 } else {
2650 if ((bif->bif_ifflags & IFBIF_STP) != 0) {
2651 bstp_disable(&bif->bif_stp);
2652 }
2653 }
2654
2655 /* Pass on STP flags */
2656 bp = &bif->bif_stp;
2657 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
2658 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
2659 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
2660 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
2661 #else /* !BRIDGESTP */
2662 if (req->ifbr_ifsflags & IFBIF_STP) {
2663 return EOPNOTSUPP;
2664 }
2665 #endif /* !BRIDGESTP */
2666
2667 /* Save the bits relating to the bridge */
2668 bif->bif_ifflags = req->ifbr_ifsflags & IFBIFMASK;
2669
2670
2671 return 0;
2672 }
2673
2674 static int
2675 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
2676 {
2677 struct ifbrparam *param = arg;
2678
2679 sc->sc_brtmax = param->ifbrp_csize;
2680 if (bridge_in_bsd_mode(sc)) {
2681 bridge_rttrim(sc);
2682 }
2683 return 0;
2684 }
2685
2686 static int
2687 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
2688 {
2689 struct ifbrparam *param = arg;
2690
2691 param->ifbrp_csize = sc->sc_brtmax;
2692
2693 return 0;
2694 }
2695
2696 #define BRIDGE_IOCTL_GIFS do { \
2697 struct bridge_iflist *bif; \
2698 struct ifbreq breq; \
2699 char *buf, *outbuf; \
2700 unsigned int count, buflen, len; \
2701 \
2702 count = 0; \
2703 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
2704 count++; \
2705 if (bridge_in_bsd_mode(sc)) { \
2706 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
2707 count++; \
2708 } \
2709 \
2710 buflen = sizeof (breq) * count; \
2711 if (bifc->ifbic_len == 0) { \
2712 bifc->ifbic_len = buflen; \
2713 return (0); \
2714 } \
2715 BRIDGE_UNLOCK(sc); \
2716 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2717 BRIDGE_LOCK(sc); \
2718 \
2719 count = 0; \
2720 buf = outbuf; \
2721 len = min(bifc->ifbic_len, buflen); \
2722 bzero(&breq, sizeof (breq)); \
2723 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2724 if (len < sizeof (breq)) \
2725 break; \
2726 \
2727 snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
2728 "%s", bif->bif_ifp->if_xname); \
2729 /* Fill in the ifbreq structure */ \
2730 error = bridge_ioctl_gifflags(sc, &breq); \
2731 if (error) \
2732 break; \
2733 memcpy(buf, &breq, sizeof (breq)); \
2734 count++; \
2735 buf += sizeof (breq); \
2736 len -= sizeof (breq); \
2737 } \
2738 if (bridge_in_bsd_mode(sc)) { \
2739 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
2740 if (len < sizeof (breq)) \
2741 break; \
2742 \
2743 snprintf(breq.ifbr_ifsname, \
2744 sizeof (breq.ifbr_ifsname), \
2745 "%s", bif->bif_ifp->if_xname); \
2746 breq.ifbr_ifsflags = bif->bif_ifflags; \
2747 breq.ifbr_portno \
2748 = bif->bif_ifp->if_index & 0xfff; \
2749 memcpy(buf, &breq, sizeof (breq)); \
2750 count++; \
2751 buf += sizeof (breq); \
2752 len -= sizeof (breq); \
2753 } \
2754 } \
2755 \
2756 BRIDGE_UNLOCK(sc); \
2757 bifc->ifbic_len = sizeof (breq) * count; \
2758 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
2759 BRIDGE_LOCK(sc); \
2760 _FREE(outbuf, M_TEMP); \
2761 } while (0)
2762
2763 static int
2764 bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg)
2765 {
2766 struct ifbifconf64 *bifc = arg;
2767 int error = 0;
2768
2769 BRIDGE_IOCTL_GIFS;
2770
2771 return error;
2772 }
2773
2774 static int
2775 bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg)
2776 {
2777 struct ifbifconf32 *bifc = arg;
2778 int error = 0;
2779
2780 BRIDGE_IOCTL_GIFS;
2781
2782 return error;
2783 }
2784
2785 #define BRIDGE_IOCTL_RTS do { \
2786 struct bridge_rtnode *brt; \
2787 char *buf; \
2788 char *outbuf = NULL; \
2789 unsigned int count, buflen, len; \
2790 unsigned long now; \
2791 \
2792 if (bac->ifbac_len == 0) \
2793 return (0); \
2794 \
2795 bzero(&bareq, sizeof (bareq)); \
2796 count = 0; \
2797 if (!bridge_in_bsd_mode(sc)) { \
2798 goto out; \
2799 } \
2800 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
2801 count++; \
2802 buflen = sizeof (bareq) * count; \
2803 \
2804 BRIDGE_UNLOCK(sc); \
2805 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2806 BRIDGE_LOCK(sc); \
2807 \
2808 count = 0; \
2809 buf = outbuf; \
2810 len = min(bac->ifbac_len, buflen); \
2811 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
2812 if (len < sizeof (bareq)) \
2813 goto out; \
2814 snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
2815 "%s", brt->brt_ifp->if_xname); \
2816 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2817 bareq.ifba_vlan = brt->brt_vlan; \
2818 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
2819 now = (unsigned long) net_uptime(); \
2820 if (now < brt->brt_expire) \
2821 bareq.ifba_expire = \
2822 brt->brt_expire - now; \
2823 } else \
2824 bareq.ifba_expire = 0; \
2825 bareq.ifba_flags = brt->brt_flags; \
2826 \
2827 memcpy(buf, &bareq, sizeof (bareq)); \
2828 count++; \
2829 buf += sizeof (bareq); \
2830 len -= sizeof (bareq); \
2831 } \
2832 out: \
2833 bac->ifbac_len = sizeof (bareq) * count; \
2834 if (outbuf != NULL) { \
2835 BRIDGE_UNLOCK(sc); \
2836 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
2837 _FREE(outbuf, M_TEMP); \
2838 BRIDGE_LOCK(sc); \
2839 } \
2840 return (error); \
2841 } while (0)
2842
2843 static int
2844 bridge_ioctl_rts64(struct bridge_softc *sc, void *arg)
2845 {
2846 struct ifbaconf64 *bac = arg;
2847 struct ifbareq64 bareq;
2848 int error = 0;
2849
2850 BRIDGE_IOCTL_RTS;
2851 return error;
2852 }
2853
2854 static int
2855 bridge_ioctl_rts32(struct bridge_softc *sc, void *arg)
2856 {
2857 struct ifbaconf32 *bac = arg;
2858 struct ifbareq32 bareq;
2859 int error = 0;
2860
2861 BRIDGE_IOCTL_RTS;
2862 return error;
2863 }
2864
2865 static int
2866 bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg)
2867 {
2868 struct ifbareq32 *req = arg;
2869 struct bridge_iflist *bif;
2870 int error;
2871
2872 if (!bridge_in_bsd_mode(sc)) {
2873 return 0;
2874 }
2875
2876 bif = bridge_lookup_member(sc, req->ifba_ifsname);
2877 if (bif == NULL) {
2878 return ENOENT;
2879 }
2880
2881 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2882 req->ifba_flags);
2883
2884 return error;
2885 }
2886
2887 static int
2888 bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg)
2889 {
2890 struct ifbareq64 *req = arg;
2891 struct bridge_iflist *bif;
2892 int error;
2893
2894 if (!bridge_in_bsd_mode(sc)) {
2895 return 0;
2896 }
2897
2898 bif = bridge_lookup_member(sc, req->ifba_ifsname);
2899 if (bif == NULL) {
2900 return ENOENT;
2901 }
2902
2903 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2904 req->ifba_flags);
2905
2906 return error;
2907 }
2908
2909 static int
2910 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
2911 {
2912 struct ifbrparam *param = arg;
2913
2914 sc->sc_brttimeout = param->ifbrp_ctime;
2915 return 0;
2916 }
2917
2918 static int
2919 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
2920 {
2921 struct ifbrparam *param = arg;
2922
2923 param->ifbrp_ctime = sc->sc_brttimeout;
2924 return 0;
2925 }
2926
2927 static int
2928 bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg)
2929 {
2930 struct ifbareq32 *req = arg;
2931
2932 if (!bridge_in_bsd_mode(sc)) {
2933 return 0;
2934 }
2935 return bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan);
2936 }
2937
2938 static int
2939 bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg)
2940 {
2941 struct ifbareq64 *req = arg;
2942
2943 if (!bridge_in_bsd_mode(sc)) {
2944 return 0;
2945 }
2946 return bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan);
2947 }
2948
2949 static int
2950 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
2951 {
2952 struct ifbreq *req = arg;
2953
2954 if (!bridge_in_bsd_mode(sc)) {
2955 return 0;
2956 }
2957 bridge_rtflush(sc, req->ifbr_ifsflags);
2958 return 0;
2959 }
2960
2961 static int
2962 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
2963 {
2964 struct ifbrparam *param = arg;
2965 struct bstp_state *bs = &sc->sc_stp;
2966
2967 if (!bridge_in_bsd_mode(sc)) {
2968 return 0;
2969 }
2970 param->ifbrp_prio = bs->bs_bridge_priority;
2971 return 0;
2972 }
2973
2974 static int
2975 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
2976 {
2977 #if BRIDGESTP
2978 struct ifbrparam *param = arg;
2979
2980 if (!bridge_in_bsd_mode(sc)) {
2981 return EOPNOTSUPP;
2982 }
2983 return bstp_set_priority(&sc->sc_stp, param->ifbrp_prio);
2984 #else /* !BRIDGESTP */
2985 #pragma unused(sc, arg)
2986 return EOPNOTSUPP;
2987 #endif /* !BRIDGESTP */
2988 }
2989
2990 static int
2991 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
2992 {
2993 struct ifbrparam *param = arg;
2994 struct bstp_state *bs = &sc->sc_stp;
2995
2996 if (!bridge_in_bsd_mode(sc)) {
2997 return 0;
2998 }
2999 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
3000 return 0;
3001 }
3002
3003 static int
3004 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
3005 {
3006 #if BRIDGESTP
3007 struct ifbrparam *param = arg;
3008
3009 if (!bridge_in_bsd_mode(sc)) {
3010 return EOPNOTSUPP;
3011 }
3012 return bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime);
3013 #else /* !BRIDGESTP */
3014 #pragma unused(sc, arg)
3015 return EOPNOTSUPP;
3016 #endif /* !BRIDGESTP */
3017 }
3018
3019 static int
3020 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
3021 {
3022 struct ifbrparam *param;
3023 struct bstp_state *bs;
3024
3025 if (!bridge_in_bsd_mode(sc)) {
3026 return 0;
3027 }
3028 param = arg;
3029 bs = &sc->sc_stp;
3030 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
3031 return 0;
3032 }
3033
3034 static int
3035 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
3036 {
3037 #if BRIDGESTP
3038 struct ifbrparam *param = arg;
3039
3040 if (!bridge_in_bsd_mode(sc)) {
3041 return EOPNOTSUPP;
3042 }
3043 return bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay);
3044 #else /* !BRIDGESTP */
3045 #pragma unused(sc, arg)
3046 return EOPNOTSUPP;
3047 #endif /* !BRIDGESTP */
3048 }
3049
3050 static int
3051 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
3052 {
3053 struct ifbrparam *param;
3054 struct bstp_state *bs;
3055
3056 if (!bridge_in_bsd_mode(sc)) {
3057 return EOPNOTSUPP;
3058 }
3059 param = arg;
3060 bs = &sc->sc_stp;
3061 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
3062 return 0;
3063 }
3064
3065 static int
3066 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
3067 {
3068 #if BRIDGESTP
3069 struct ifbrparam *param = arg;
3070
3071 if (!bridge_in_bsd_mode(sc)) {
3072 return EOPNOTSUPP;
3073 }
3074 return bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage);
3075 #else /* !BRIDGESTP */
3076 #pragma unused(sc, arg)
3077 return EOPNOTSUPP;
3078 #endif /* !BRIDGESTP */
3079 }
3080
3081 static int
3082 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
3083 {
3084 #if BRIDGESTP
3085 struct ifbreq *req = arg;
3086 struct bridge_iflist *bif;
3087
3088 if (!bridge_in_bsd_mode(sc)) {
3089 return EOPNOTSUPP;
3090 }
3091 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
3092 if (bif == NULL) {
3093 return ENOENT;
3094 }
3095
3096 return bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority);
3097 #else /* !BRIDGESTP */
3098 #pragma unused(sc, arg)
3099 return EOPNOTSUPP;
3100 #endif /* !BRIDGESTP */
3101 }
3102
3103 static int
3104 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
3105 {
3106 #if BRIDGESTP
3107 struct ifbreq *req = arg;
3108 struct bridge_iflist *bif;
3109
3110 if (!bridge_in_bsd_mode(sc)) {
3111 return EOPNOTSUPP;
3112 }
3113 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
3114 if (bif == NULL) {
3115 return ENOENT;
3116 }
3117
3118 return bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost);
3119 #else /* !BRIDGESTP */
3120 #pragma unused(sc, arg)
3121 return EOPNOTSUPP;
3122 #endif /* !BRIDGESTP */
3123 }
3124
3125 static int
3126 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
3127 {
3128 struct ifbrparam *param = arg;
3129
3130 param->ifbrp_filter = sc->sc_filter_flags;
3131
3132 return 0;
3133 }
3134
3135 static int
3136 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
3137 {
3138 struct ifbrparam *param = arg;
3139
3140 if (param->ifbrp_filter & ~IFBF_FILT_MASK) {
3141 return EINVAL;
3142 }
3143
3144 #ifndef BRIDGE_IPF
3145 if (param->ifbrp_filter & IFBF_FILT_USEIPF) {
3146 return EINVAL;
3147 }
3148 #endif
3149
3150 sc->sc_filter_flags = param->ifbrp_filter;
3151
3152 return 0;
3153 }
3154
3155 static int
3156 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
3157 {
3158 struct ifbreq *req = arg;
3159 struct bridge_iflist *bif;
3160
3161 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
3162 if (bif == NULL) {
3163 return ENOENT;
3164 }
3165
3166 bif->bif_addrmax = req->ifbr_addrmax;
3167 return 0;
3168 }
3169
3170 static int
3171 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
3172 {
3173 struct ifbreq *req = arg;
3174 struct bridge_iflist *bif = NULL;
3175 struct ifnet *ifs;
3176
3177 if (!bridge_in_bsd_mode(sc)) {
3178 return EOPNOTSUPP;
3179 }
3180 ifs = ifunit(req->ifbr_ifsname);
3181 if (ifs == NULL) {
3182 return ENOENT;
3183 }
3184
3185 if (IFNET_IS_INTCOPROC(ifs)) {
3186 return EINVAL;
3187 }
3188
3189 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3190 if (ifs == bif->bif_ifp) {
3191 return EBUSY;
3192 }
3193
3194 if (ifs->if_bridge != NULL) {
3195 return EBUSY;
3196 }
3197
3198 switch (ifs->if_type) {
3199 case IFT_ETHER:
3200 case IFT_L2VLAN:
3201 break;
3202 case IFT_GIF:
3203 /* currently not supported */
3204 /* FALLTHRU */
3205 default:
3206 return EINVAL;
3207 }
3208
3209 bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3210 if (bif == NULL) {
3211 return ENOMEM;
3212 }
3213
3214 bif->bif_ifp = ifs;
3215 bif->bif_ifflags = IFBIF_SPAN;
3216
3217 ifnet_reference(bif->bif_ifp);
3218
3219 TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
3220
3221 return 0;
3222 }
3223
3224 static int
3225 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
3226 {
3227 struct ifbreq *req = arg;
3228 struct bridge_iflist *bif;
3229 struct ifnet *ifs;
3230
3231 if (!bridge_in_bsd_mode(sc)) {
3232 return EOPNOTSUPP;
3233 }
3234 ifs = ifunit(req->ifbr_ifsname);
3235 if (ifs == NULL) {
3236 return ENOENT;
3237 }
3238
3239 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3240 if (ifs == bif->bif_ifp) {
3241 break;
3242 }
3243
3244 if (bif == NULL) {
3245 return ENOENT;
3246 }
3247
3248 bridge_delete_span(sc, bif);
3249
3250 return 0;
3251 }
3252
3253 #define BRIDGE_IOCTL_GBPARAM do { \
3254 struct bstp_state *bs = &sc->sc_stp; \
3255 struct bstp_port *root_port; \
3256 \
3257 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
3258 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
3259 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
3260 \
3261 root_port = bs->bs_root_port; \
3262 if (root_port == NULL) \
3263 req->ifbop_root_port = 0; \
3264 else \
3265 req->ifbop_root_port = root_port->bp_ifp->if_index; \
3266 \
3267 req->ifbop_holdcount = bs->bs_txholdcount; \
3268 req->ifbop_priority = bs->bs_bridge_priority; \
3269 req->ifbop_protocol = bs->bs_protover; \
3270 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
3271 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
3272 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
3273 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
3274 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
3275 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
3276 } while (0)
3277
3278 static int
3279 bridge_ioctl_gbparam32(struct bridge_softc *sc, void *arg)
3280 {
3281 struct ifbropreq32 *req = arg;
3282
3283 if (bridge_in_bsd_mode(sc)) {
3284 BRIDGE_IOCTL_GBPARAM;
3285 }
3286 return 0;
3287 }
3288
3289 static int
3290 bridge_ioctl_gbparam64(struct bridge_softc *sc, void *arg)
3291 {
3292 struct ifbropreq64 *req = arg;
3293
3294 if (bridge_in_bsd_mode(sc)) {
3295 BRIDGE_IOCTL_GBPARAM;
3296 }
3297 return 0;
3298 }
3299
3300 static int
3301 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
3302 {
3303 struct ifbrparam *param = arg;
3304
3305 param->ifbrp_cexceeded = sc->sc_brtexceeded;
3306 return 0;
3307 }
3308
3309 #define BRIDGE_IOCTL_GIFSSTP do { \
3310 struct bridge_iflist *bif; \
3311 struct bstp_port *bp; \
3312 struct ifbpstpreq bpreq; \
3313 char *buf, *outbuf; \
3314 unsigned int count, buflen, len; \
3315 \
3316 count = 0; \
3317 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3318 if ((bif->bif_ifflags & IFBIF_STP) != 0) \
3319 count++; \
3320 } \
3321 \
3322 buflen = sizeof (bpreq) * count; \
3323 if (bifstp->ifbpstp_len == 0) { \
3324 bifstp->ifbpstp_len = buflen; \
3325 return (0); \
3326 } \
3327 \
3328 BRIDGE_UNLOCK(sc); \
3329 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
3330 BRIDGE_LOCK(sc); \
3331 \
3332 count = 0; \
3333 buf = outbuf; \
3334 len = min(bifstp->ifbpstp_len, buflen); \
3335 bzero(&bpreq, sizeof (bpreq)); \
3336 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3337 if (len < sizeof (bpreq)) \
3338 break; \
3339 \
3340 if ((bif->bif_ifflags & IFBIF_STP) == 0) \
3341 continue; \
3342 \
3343 bp = &bif->bif_stp; \
3344 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
3345 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
3346 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
3347 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
3348 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
3349 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
3350 \
3351 memcpy(buf, &bpreq, sizeof (bpreq)); \
3352 count++; \
3353 buf += sizeof (bpreq); \
3354 len -= sizeof (bpreq); \
3355 } \
3356 \
3357 BRIDGE_UNLOCK(sc); \
3358 bifstp->ifbpstp_len = sizeof (bpreq) * count; \
3359 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
3360 BRIDGE_LOCK(sc); \
3361 _FREE(outbuf, M_TEMP); \
3362 return (error); \
3363 } while (0)
3364
3365 static int
3366 bridge_ioctl_gifsstp32(struct bridge_softc *sc, void *arg)
3367 {
3368 struct ifbpstpconf32 *bifstp = arg;
3369 int error = 0;
3370
3371 if (bridge_in_bsd_mode(sc)) {
3372 BRIDGE_IOCTL_GIFSSTP;
3373 }
3374 return error;
3375 }
3376
3377 static int
3378 bridge_ioctl_gifsstp64(struct bridge_softc *sc, void *arg)
3379 {
3380 struct ifbpstpconf64 *bifstp = arg;
3381 int error = 0;
3382
3383 if (bridge_in_bsd_mode(sc)) {
3384 BRIDGE_IOCTL_GIFSSTP;
3385 }
3386 return error;
3387 }
3388
3389 static int
3390 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
3391 {
3392 #if BRIDGESTP
3393 struct ifbrparam *param = arg;
3394
3395 if (!bridge_in_bsd_mode(sc)) {
3396 return EOPNOTSUPP;
3397 }
3398 return bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto);
3399 #else /* !BRIDGESTP */
3400 #pragma unused(sc, arg)
3401 return EOPNOTSUPP;
3402 #endif /* !BRIDGESTP */
3403 }
3404
3405 static int
3406 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
3407 {
3408 #if BRIDGESTP
3409 struct ifbrparam *param = arg;
3410
3411 if (!bridge_in_bsd_mode(sc)) {
3412 return EOPNOTSUPP;
3413 }
3414 return bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc);
3415 #else /* !BRIDGESTP */
3416 #pragma unused(sc, arg)
3417 return EOPNOTSUPP;
3418 #endif /* !BRIDGESTP */
3419 }
3420
3421
3422 static int
3423 bridge_ioctl_ghostfilter(struct bridge_softc *sc, void *arg)
3424 {
3425 struct ifbrhostfilter *req = arg;
3426 struct bridge_iflist *bif;
3427
3428 bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
3429 if (bif == NULL) {
3430 return ENOENT;
3431 }
3432
3433 bzero(req, sizeof(struct ifbrhostfilter));
3434 if (bif->bif_flags & BIFF_HOST_FILTER) {
3435 req->ifbrhf_flags |= IFBRHF_ENABLED;
3436 bcopy(bif->bif_hf_hwsrc, req->ifbrhf_hwsrca,
3437 ETHER_ADDR_LEN);
3438 req->ifbrhf_ipsrc = bif->bif_hf_ipsrc.s_addr;
3439 }
3440 return 0;
3441 }
3442
3443 static int
3444 bridge_ioctl_shostfilter(struct bridge_softc *sc, void *arg)
3445 {
3446 struct ifbrhostfilter *req = arg;
3447 struct bridge_iflist *bif;
3448
3449 bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
3450 if (bif == NULL) {
3451 return ENOENT;
3452 }
3453
3454 INC_ATOMIC_INT64_LIM(net_api_stats.nas_vmnet_total);
3455
3456 if (req->ifbrhf_flags & IFBRHF_ENABLED) {
3457 bif->bif_flags |= BIFF_HOST_FILTER;
3458
3459 if (req->ifbrhf_flags & IFBRHF_HWSRC) {
3460 bcopy(req->ifbrhf_hwsrca, bif->bif_hf_hwsrc,
3461 ETHER_ADDR_LEN);
3462 if (bcmp(req->ifbrhf_hwsrca, ethernulladdr,
3463 ETHER_ADDR_LEN) != 0) {
3464 bif->bif_flags |= BIFF_HF_HWSRC;
3465 } else {
3466 bif->bif_flags &= ~BIFF_HF_HWSRC;
3467 }
3468 }
3469 if (req->ifbrhf_flags & IFBRHF_IPSRC) {
3470 bif->bif_hf_ipsrc.s_addr = req->ifbrhf_ipsrc;
3471 if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY) {
3472 bif->bif_flags |= BIFF_HF_IPSRC;
3473 } else {
3474 bif->bif_flags &= ~BIFF_HF_IPSRC;
3475 }
3476 }
3477 } else {
3478 bif->bif_flags &= ~(BIFF_HOST_FILTER | BIFF_HF_HWSRC |
3479 BIFF_HF_IPSRC);
3480 bzero(bif->bif_hf_hwsrc, ETHER_ADDR_LEN);
3481 bif->bif_hf_ipsrc.s_addr = INADDR_ANY;
3482 }
3483
3484 return 0;
3485 }
3486
3487
3488 /*
3489 * bridge_ifdetach:
3490 *
3491 * Detach an interface from a bridge. Called when a member
3492 * interface is detaching.
3493 */
3494 __private_extern__ void
3495 bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
3496 {
3497 struct bridge_softc *sc = ifp->if_bridge;
3498
3499 #if BRIDGE_DEBUG
3500 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
3501 printf("%s: %s\n", __func__, ifp->if_xname);
3502 }
3503 #endif /* BRIDGE_DEBUG */
3504
3505 /* Check if the interface is a bridge member */
3506 if (sc != NULL) {
3507 BRIDGE_LOCK(sc);
3508 bif = bridge_lookup_member_if(sc, ifp);
3509 if (bif != NULL) {
3510 bridge_delete_member(sc, bif, 1);
3511 }
3512 BRIDGE_UNLOCK(sc);
3513 return;
3514 }
3515 /* Check if the interface is a span port */
3516 lck_mtx_lock(&bridge_list_mtx);
3517 LIST_FOREACH(sc, &bridge_list, sc_list) {
3518 if (bridge_in_bsd_mode(sc)) {
3519 BRIDGE_LOCK(sc);
3520 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3521 if (ifp == bif->bif_ifp) {
3522 bridge_delete_span(sc, bif);
3523 break;
3524 }
3525 BRIDGE_UNLOCK(sc);
3526 }
3527 }
3528 lck_mtx_unlock(&bridge_list_mtx);
3529 }
3530
3531 /*
3532 * interface_media_active:
3533 *
3534 * Tells if an interface media is active.
3535 */
3536 static int
3537 interface_media_active(struct ifnet *ifp)
3538 {
3539 struct ifmediareq ifmr;
3540 int status = 0;
3541
3542 bzero(&ifmr, sizeof(ifmr));
3543 if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) {
3544 if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0) {
3545 status = ifmr.ifm_status & IFM_ACTIVE ? 1 : 0;
3546 }
3547 }
3548
3549 return status;
3550 }
3551
3552 /*
3553 * bridge_updatelinkstatus:
3554 *
3555 * Update the media active status of the bridge based on the
3556 * media active status of its member.
3557 * If changed, return the corresponding onf/off link event.
3558 */
3559 static u_int32_t
3560 bridge_updatelinkstatus(struct bridge_softc *sc)
3561 {
3562 struct bridge_iflist *bif;
3563 int active_member = 0;
3564 u_int32_t event_code = 0;
3565
3566 BRIDGE_LOCK_ASSERT_HELD(sc);
3567
3568 /*
3569 * Find out if we have an active interface
3570 */
3571 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
3572 if (bif->bif_flags & BIFF_MEDIA_ACTIVE) {
3573 active_member = 1;
3574 break;
3575 }
3576 }
3577
3578 if (active_member && !(sc->sc_flags & SCF_MEDIA_ACTIVE)) {
3579 sc->sc_flags |= SCF_MEDIA_ACTIVE;
3580 event_code = KEV_DL_LINK_ON;
3581 } else if (!active_member && (sc->sc_flags & SCF_MEDIA_ACTIVE)) {
3582 sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
3583 event_code = KEV_DL_LINK_OFF;
3584 }
3585
3586 return event_code;
3587 }
3588
3589 /*
3590 * bridge_iflinkevent:
3591 */
3592 static void
3593 bridge_iflinkevent(struct ifnet *ifp)
3594 {
3595 struct bridge_softc *sc = ifp->if_bridge;
3596 struct bridge_iflist *bif;
3597 u_int32_t event_code = 0;
3598
3599 #if BRIDGE_DEBUG
3600 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
3601 printf("%s: %s\n", __func__, ifp->if_xname);
3602 }
3603 #endif /* BRIDGE_DEBUG */
3604
3605 /* Check if the interface is a bridge member */
3606 if (sc == NULL) {
3607 return;
3608 }
3609
3610 BRIDGE_LOCK(sc);
3611 bif = bridge_lookup_member_if(sc, ifp);
3612 if (bif != NULL) {
3613 if (interface_media_active(ifp)) {
3614 bif->bif_flags |= BIFF_MEDIA_ACTIVE;
3615 } else {
3616 bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
3617 }
3618
3619 event_code = bridge_updatelinkstatus(sc);
3620 }
3621 BRIDGE_UNLOCK(sc);
3622
3623 if (event_code != 0) {
3624 bridge_link_event(sc->sc_ifp, event_code);
3625 }
3626 }
3627
3628 /*
3629 * bridge_delayed_callback:
3630 *
3631 * Makes a delayed call
3632 */
3633 static void
3634 bridge_delayed_callback(void *param)
3635 {
3636 struct bridge_delayed_call *call = (struct bridge_delayed_call *)param;
3637 struct bridge_softc *sc = call->bdc_sc;
3638
3639 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3640 if (bridge_delayed_callback_delay > 0) {
3641 struct timespec ts;
3642
3643 ts.tv_sec = bridge_delayed_callback_delay;
3644 ts.tv_nsec = 0;
3645
3646 printf("%s: sleeping for %d seconds\n",
3647 __func__, bridge_delayed_callback_delay);
3648
3649 msleep(&bridge_delayed_callback_delay, NULL, PZERO,
3650 __func__, &ts);
3651
3652 printf("%s: awoken\n", __func__);
3653 }
3654 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3655
3656 BRIDGE_LOCK(sc);
3657
3658 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3659 if (if_bridge_debug & BR_DBGF_DELAYED_CALL) {
3660 printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3661 sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3662 call->bdc_flags);
3663 }
3664 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3665
3666 if (call->bdc_flags & BDCF_CANCELLING) {
3667 wakeup(call);
3668 } else {
3669 if ((sc->sc_flags & SCF_DETACHING) == 0) {
3670 (*call->bdc_func)(sc);
3671 }
3672 }
3673 call->bdc_flags &= ~BDCF_OUTSTANDING;
3674 BRIDGE_UNLOCK(sc);
3675 }
3676
3677 /*
3678 * bridge_schedule_delayed_call:
3679 *
3680 * Schedule a function to be called on a separate thread
3681 * The actual call may be scheduled to run at a given time or ASAP.
3682 */
3683 static void
3684 bridge_schedule_delayed_call(struct bridge_delayed_call *call)
3685 {
3686 uint64_t deadline = 0;
3687 struct bridge_softc *sc = call->bdc_sc;
3688
3689 BRIDGE_LOCK_ASSERT_HELD(sc);
3690
3691 if ((sc->sc_flags & SCF_DETACHING) ||
3692 (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING))) {
3693 return;
3694 }
3695
3696 if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) {
3697 nanoseconds_to_absolutetime(
3698 (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC +
3699 call->bdc_ts.tv_nsec, &deadline);
3700 clock_absolutetime_interval_to_deadline(deadline, &deadline);
3701 }
3702
3703 call->bdc_flags = BDCF_OUTSTANDING;
3704
3705 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3706 if (if_bridge_debug & BR_DBGF_DELAYED_CALL) {
3707 printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3708 sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3709 call->bdc_flags);
3710 }
3711 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3712
3713 if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) {
3714 thread_call_func_delayed(
3715 (thread_call_func_t)bridge_delayed_callback,
3716 call, deadline);
3717 } else {
3718 if (call->bdc_thread_call == NULL) {
3719 call->bdc_thread_call = thread_call_allocate(
3720 (thread_call_func_t)bridge_delayed_callback,
3721 call);
3722 }
3723 thread_call_enter(call->bdc_thread_call);
3724 }
3725 }
3726
3727 /*
3728 * bridge_cancel_delayed_call:
3729 *
3730 * Cancel a queued or running delayed call.
3731 * If call is running, does not return until the call is done to
3732 * prevent race condition with the brigde interface getting destroyed
3733 */
3734 static void
3735 bridge_cancel_delayed_call(struct bridge_delayed_call *call)
3736 {
3737 boolean_t result;
3738 struct bridge_softc *sc = call->bdc_sc;
3739
3740 /*
3741 * The call was never scheduled
3742 */
3743 if (sc == NULL) {
3744 return;
3745 }
3746
3747 BRIDGE_LOCK_ASSERT_HELD(sc);
3748
3749 call->bdc_flags |= BDCF_CANCELLING;
3750
3751 while (call->bdc_flags & BDCF_OUTSTANDING) {
3752 #if BRIDGE_DEBUG
3753 if (if_bridge_debug & BR_DBGF_DELAYED_CALL) {
3754 printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3755 sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3756 call->bdc_flags);
3757 }
3758 #endif /* BRIDGE_DEBUG */
3759 result = thread_call_func_cancel(
3760 (thread_call_func_t)bridge_delayed_callback, call, FALSE);
3761
3762 if (result) {
3763 /*
3764 * We managed to dequeue the delayed call
3765 */
3766 call->bdc_flags &= ~BDCF_OUTSTANDING;
3767 } else {
3768 /*
3769 * Wait for delayed call do be done running
3770 */
3771 msleep(call, &sc->sc_mtx, PZERO, __func__, NULL);
3772 }
3773 }
3774 call->bdc_flags &= ~BDCF_CANCELLING;
3775 }
3776
3777 /*
3778 * bridge_cleanup_delayed_call:
3779 *
3780 * Dispose resource allocated for a delayed call
3781 * Assume the delayed call is not queued or running .
3782 */
3783 static void
3784 bridge_cleanup_delayed_call(struct bridge_delayed_call *call)
3785 {
3786 boolean_t result;
3787 struct bridge_softc *sc = call->bdc_sc;
3788
3789 /*
3790 * The call was never scheduled
3791 */
3792 if (sc == NULL) {
3793 return;
3794 }
3795
3796 BRIDGE_LOCK_ASSERT_HELD(sc);
3797
3798 VERIFY((call->bdc_flags & BDCF_OUTSTANDING) == 0);
3799 VERIFY((call->bdc_flags & BDCF_CANCELLING) == 0);
3800
3801 if (call->bdc_thread_call != NULL) {
3802 result = thread_call_free(call->bdc_thread_call);
3803 if (result == FALSE) {
3804 panic("%s thread_call_free() failed for call %p",
3805 __func__, call);
3806 }
3807 call->bdc_thread_call = NULL;
3808 }
3809 }
3810
3811 /*
3812 * bridge_init:
3813 *
3814 * Initialize a bridge interface.
3815 */
3816 static int
3817 bridge_init(struct ifnet *ifp)
3818 {
3819 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
3820 errno_t error;
3821
3822 BRIDGE_LOCK_ASSERT_HELD(sc);
3823
3824 if ((ifnet_flags(ifp) & IFF_RUNNING)) {
3825 return 0;
3826 }
3827
3828 error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
3829
3830 if (bridge_in_bsd_mode(sc)) {
3831 /*
3832 * Calling bridge_aging_timer() is OK as there are no entries to
3833 * age so we're just going to arm the timer
3834 */
3835 bridge_aging_timer(sc);
3836 #if BRIDGESTP
3837 if (error == 0) {
3838 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
3839 }
3840 #endif /* BRIDGESTP */
3841 }
3842 return error;
3843 }
3844
3845 /*
3846 * bridge_ifstop:
3847 *
3848 * Stop the bridge interface.
3849 */
3850 static void
3851 bridge_ifstop(struct ifnet *ifp, int disable)
3852 {
3853 #pragma unused(disable)
3854 struct bridge_softc *sc = ifp->if_softc;
3855
3856 BRIDGE_LOCK_ASSERT_HELD(sc);
3857
3858 if ((ifnet_flags(ifp) & IFF_RUNNING) == 0) {
3859 return;
3860 }
3861
3862 if (bridge_in_bsd_mode(sc)) {
3863 bridge_cancel_delayed_call(&sc->sc_aging_timer);
3864
3865 #if BRIDGESTP
3866 bstp_stop(&sc->sc_stp);
3867 #endif /* BRIDGESTP */
3868
3869 bridge_rtflush(sc, IFBF_FLUSHDYN);
3870 }
3871 (void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
3872 }
3873
3874 /*
3875 * bridge_enqueue:
3876 *
3877 * Enqueue a packet on a bridge member interface.
3878 *
3879 */
3880 static int
3881 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
3882 {
3883 int len, error = 0;
3884 short mflags;
3885 struct mbuf *m0;
3886
3887 VERIFY(dst_ifp != NULL);
3888
3889 /*
3890 * We may be sending a fragment so traverse the mbuf
3891 *
3892 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
3893 */
3894 for (; m; m = m0) {
3895 errno_t _error;
3896 struct flowadv adv = { FADV_SUCCESS };
3897
3898 m0 = m->m_nextpkt;
3899 m->m_nextpkt = NULL;
3900
3901 len = m->m_pkthdr.len;
3902 mflags = m->m_flags;
3903 m->m_flags |= M_PROTO1; /* set to avoid loops */
3904
3905 bridge_finalize_cksum(dst_ifp, m);
3906
3907 #if HAS_IF_CAP
3908 /*
3909 * If underlying interface can not do VLAN tag insertion itself
3910 * then attach a packet tag that holds it.
3911 */
3912 if ((m->m_flags & M_VLANTAG) &&
3913 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
3914 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
3915 if (m == NULL) {
3916 printf("%s: %s: unable to prepend VLAN "
3917 "header\n", __func__, dst_ifp->if_xname);
3918 (void) ifnet_stat_increment_out(dst_ifp,
3919 0, 0, 1);
3920 continue;
3921 }
3922 m->m_flags &= ~M_VLANTAG;
3923 }
3924 #endif /* HAS_IF_CAP */
3925
3926 _error = dlil_output(dst_ifp, 0, m, NULL, NULL, 1, &adv);
3927
3928 /* Preserve existing error value */
3929 if (error == 0) {
3930 if (_error != 0) {
3931 error = _error;
3932 } else if (adv.code == FADV_FLOW_CONTROLLED) {
3933 error = EQFULL;
3934 } else if (adv.code == FADV_SUSPENDED) {
3935 error = EQSUSPENDED;
3936 }
3937 }
3938
3939 if (_error == 0) {
3940 (void) ifnet_stat_increment_out(sc->sc_ifp, 1, len, 0);
3941 } else {
3942 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
3943 }
3944 }
3945
3946 return error;
3947 }
3948
3949 #if HAS_BRIDGE_DUMMYNET
3950 /*
3951 * bridge_dummynet:
3952 *
3953 * Receive a queued packet from dummynet and pass it on to the output
3954 * interface.
3955 *
3956 * The mbuf has the Ethernet header already attached.
3957 */
3958 static void
3959 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
3960 {
3961 struct bridge_softc *sc;
3962
3963 sc = ifp->if_bridge;
3964
3965 /*
3966 * The packet didnt originate from a member interface. This should only
3967 * ever happen if a member interface is removed while packets are
3968 * queued for it.
3969 */
3970 if (sc == NULL) {
3971 m_freem(m);
3972 return;
3973 }
3974
3975 if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
3976 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) {
3977 return;
3978 }
3979 if (m == NULL) {
3980 return;
3981 }
3982 }
3983
3984 (void) bridge_enqueue(sc, ifp, m);
3985 }
3986 #endif /* HAS_BRIDGE_DUMMYNET */
3987
3988 #if BRIDGE_MEMBER_OUT_FILTER
3989 /*
3990 * bridge_member_output:
3991 *
3992 * Send output from a bridge member interface. This
3993 * performs the bridging function for locally originated
3994 * packets.
3995 *
3996 * The mbuf has the Ethernet header already attached. We must
3997 * enqueue or free the mbuf before returning.
3998 */
3999 static int
4000 bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
4001 struct rtentry *rt)
4002 {
4003 #pragma unused(sa, rt)
4004 struct ether_header *eh;
4005 struct ifnet *dst_if;
4006 struct bridge_softc *sc;
4007 uint16_t vlan;
4008
4009 #if BRIDGE_DEBUG
4010 if (if_bridge_debug & BR_DBGF_OUTPUT) {
4011 printf("%s: ifp %s\n", __func__, ifp->if_xname);
4012 }
4013 #endif /* BRIDGE_DEBUG */
4014
4015 if (m->m_len < ETHER_HDR_LEN) {
4016 m = m_pullup(m, ETHER_HDR_LEN);
4017 if (m == NULL) {
4018 return 0;
4019 }
4020 }
4021
4022 eh = mtod(m, struct ether_header *);
4023 sc = ifp->if_bridge;
4024 vlan = VLANTAGOF(m);
4025
4026 BRIDGE_LOCK(sc);
4027
4028 /*
4029 * APPLE MODIFICATION
4030 * If the packet is an 802.1X ethertype, then only send on the
4031 * original output interface.
4032 */
4033 if (eh->ether_type == htons(ETHERTYPE_PAE)) {
4034 dst_if = ifp;
4035 goto sendunicast;
4036 }
4037
4038 /*
4039 * If bridge is down, but the original output interface is up,
4040 * go ahead and send out that interface. Otherwise, the packet
4041 * is dropped below.
4042 */
4043 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
4044 dst_if = ifp;
4045 goto sendunicast;
4046 }
4047
4048 /*
4049 * If the packet is a multicast, or we don't know a better way to
4050 * get there, send to all interfaces.
4051 */
4052 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
4053 dst_if = NULL;
4054 } else {
4055 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
4056 }
4057 if (dst_if == NULL) {
4058 struct bridge_iflist *bif;
4059 struct mbuf *mc;
4060 int error = 0, used = 0;
4061
4062 bridge_span(sc, m);
4063
4064 BRIDGE_LOCK2REF(sc, error);
4065 if (error) {
4066 m_freem(m);
4067 return 0;
4068 }
4069
4070 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
4071 dst_if = bif->bif_ifp;
4072
4073 if (dst_if->if_type == IFT_GIF) {
4074 continue;
4075 }
4076 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
4077 continue;
4078 }
4079
4080 /*
4081 * If this is not the original output interface,
4082 * and the interface is participating in spanning
4083 * tree, make sure the port is in a state that
4084 * allows forwarding.
4085 */
4086 if (dst_if != ifp && (bif->bif_ifflags & IFBIF_STP) &&
4087 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4088 continue;
4089 }
4090
4091 if (LIST_NEXT(bif, bif_next) == NULL) {
4092 used = 1;
4093 mc = m;
4094 } else {
4095 mc = m_copypacket(m, M_DONTWAIT);
4096 if (mc == NULL) {
4097 (void) ifnet_stat_increment_out(
4098 sc->sc_ifp, 0, 0, 1);
4099 continue;
4100 }
4101 }
4102
4103 (void) bridge_enqueue(sc, dst_if, mc);
4104 }
4105 if (used == 0) {
4106 m_freem(m);
4107 }
4108 BRIDGE_UNREF(sc);
4109 return 0;
4110 }
4111
4112 sendunicast:
4113 /*
4114 * XXX Spanning tree consideration here?
4115 */
4116
4117 bridge_span(sc, m);
4118 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
4119 m_freem(m);
4120 BRIDGE_UNLOCK(sc);
4121 return 0;
4122 }
4123
4124 BRIDGE_UNLOCK(sc);
4125 (void) bridge_enqueue(sc, dst_if, m);
4126 return 0;
4127 }
4128 #endif /* BRIDGE_MEMBER_OUT_FILTER */
4129
4130 /*
4131 * Output callback.
4132 *
4133 * This routine is called externally from above only when if_bridge_txstart
4134 * is disabled; otherwise it is called internally by bridge_start().
4135 */
4136 static int
4137 bridge_output(struct ifnet *ifp, struct mbuf *m)
4138 {
4139 struct bridge_softc *sc = ifnet_softc(ifp);
4140 struct ether_header *eh;
4141 struct ifnet *dst_if;
4142 int error = 0;
4143
4144 eh = mtod(m, struct ether_header *);
4145 dst_if = NULL;
4146
4147 BRIDGE_LOCK(sc);
4148 ASSERT(bridge_in_bsd_mode(sc));
4149
4150 if (!(m->m_flags & (M_BCAST | M_MCAST))) {
4151 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0);
4152 }
4153
4154 (void) ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0);
4155
4156 #if NBPFILTER > 0
4157 if (sc->sc_bpf_output) {
4158 bridge_bpf_output(ifp, m);
4159 }
4160 #endif
4161
4162 if (dst_if == NULL) {
4163 /* callee will unlock */
4164 bridge_broadcast(sc, ifp, m, 0);
4165 } else {
4166 BRIDGE_UNLOCK(sc);
4167 error = bridge_enqueue(sc, dst_if, m);
4168 }
4169
4170 return error;
4171 }
4172
4173 static void
4174 bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m)
4175 {
4176 struct ether_header *eh = mtod(m, struct ether_header *);
4177 uint32_t sw_csum, hwcap;
4178
4179 if (ifp != NULL) {
4180 hwcap = (ifp->if_hwassist | CSUM_DATA_VALID);
4181 } else {
4182 hwcap = 0;
4183 }
4184
4185 /* do in software what the hardware cannot */
4186 sw_csum = m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_FLAGS(hwcap);
4187 sw_csum &= IF_HWASSIST_CSUM_MASK;
4188
4189 switch (ntohs(eh->ether_type)) {
4190 case ETHERTYPE_IP:
4191 if ((hwcap & CSUM_PARTIAL) && !(sw_csum & CSUM_DELAY_DATA) &&
4192 (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
4193 if (m->m_pkthdr.csum_flags & CSUM_TCP) {
4194 uint16_t start =
4195 sizeof(*eh) + sizeof(struct ip);
4196 uint16_t ulpoff =
4197 m->m_pkthdr.csum_data & 0xffff;
4198 m->m_pkthdr.csum_flags |=
4199 (CSUM_DATA_VALID | CSUM_PARTIAL);
4200 m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
4201 m->m_pkthdr.csum_tx_start = start;
4202 } else {
4203 sw_csum |= (CSUM_DELAY_DATA &
4204 m->m_pkthdr.csum_flags);
4205 }
4206 }
4207 (void) in_finalize_cksum(m, sizeof(*eh), sw_csum);
4208 break;
4209
4210 #if INET6
4211 case ETHERTYPE_IPV6:
4212 if ((hwcap & CSUM_PARTIAL) &&
4213 !(sw_csum & CSUM_DELAY_IPV6_DATA) &&
4214 (m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) {
4215 if (m->m_pkthdr.csum_flags & CSUM_TCPIPV6) {
4216 uint16_t start =
4217 sizeof(*eh) + sizeof(struct ip6_hdr);
4218 uint16_t ulpoff =
4219 m->m_pkthdr.csum_data & 0xffff;
4220 m->m_pkthdr.csum_flags |=
4221 (CSUM_DATA_VALID | CSUM_PARTIAL);
4222 m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
4223 m->m_pkthdr.csum_tx_start = start;
4224 } else {
4225 sw_csum |= (CSUM_DELAY_IPV6_DATA &
4226 m->m_pkthdr.csum_flags);
4227 }
4228 }
4229 (void) in6_finalize_cksum(m, sizeof(*eh), -1, -1, sw_csum);
4230 break;
4231 #endif /* INET6 */
4232 }
4233 }
4234
4235 /*
4236 * bridge_start:
4237 *
4238 * Start output on a bridge.
4239 *
4240 * This routine is invoked by the start worker thread; because we never call
4241 * it directly, there is no need do deploy any serialization mechanism other
4242 * than what's already used by the worker thread, i.e. this is already single
4243 * threaded.
4244 *
4245 * This routine is called only when if_bridge_txstart is enabled.
4246 */
4247 static void
4248 bridge_start(struct ifnet *ifp)
4249 {
4250 struct mbuf *m;
4251
4252 for (;;) {
4253 if (ifnet_dequeue(ifp, &m) != 0) {
4254 break;
4255 }
4256
4257 (void) bridge_output(ifp, m);
4258 }
4259 }
4260
4261 /*
4262 * bridge_forward:
4263 *
4264 * The forwarding function of the bridge.
4265 *
4266 * NOTE: Releases the lock on return.
4267 */
4268 static void
4269 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
4270 struct mbuf *m)
4271 {
4272 struct bridge_iflist *dbif;
4273 struct ifnet *src_if, *dst_if, *ifp;
4274 struct ether_header *eh;
4275 uint16_t vlan;
4276 uint8_t *dst;
4277 int error;
4278
4279 BRIDGE_LOCK_ASSERT_HELD(sc);
4280 ASSERT(bridge_in_bsd_mode(sc));
4281
4282 #if BRIDGE_DEBUG
4283 if (if_bridge_debug & BR_DBGF_OUTPUT) {
4284 printf("%s: %s m 0x%llx\n", __func__, sc->sc_ifp->if_xname,
4285 (uint64_t)VM_KERNEL_ADDRPERM(m));
4286 }
4287 #endif /* BRIDGE_DEBUG */
4288
4289 src_if = m->m_pkthdr.rcvif;
4290 ifp = sc->sc_ifp;
4291
4292 (void) ifnet_stat_increment_in(ifp, 1, m->m_pkthdr.len, 0);
4293 vlan = VLANTAGOF(m);
4294
4295
4296 if ((sbif->bif_ifflags & IFBIF_STP) &&
4297 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4298 goto drop;
4299 }
4300
4301 eh = mtod(m, struct ether_header *);
4302 dst = eh->ether_dhost;
4303
4304 /* If the interface is learning, record the address. */
4305 if (sbif->bif_ifflags & IFBIF_LEARNING) {
4306 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
4307 sbif, 0, IFBAF_DYNAMIC);
4308 /*
4309 * If the interface has addresses limits then deny any source
4310 * that is not in the cache.
4311 */
4312 if (error && sbif->bif_addrmax) {
4313 goto drop;
4314 }
4315 }
4316
4317 if ((sbif->bif_ifflags & IFBIF_STP) != 0 &&
4318 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) {
4319 goto drop;
4320 }
4321
4322 /*
4323 * At this point, the port either doesn't participate
4324 * in spanning tree or it is in the forwarding state.
4325 */
4326
4327 /*
4328 * If the packet is unicast, destined for someone on
4329 * "this" side of the bridge, drop it.
4330 */
4331 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0) {
4332 dst_if = bridge_rtlookup(sc, dst, vlan);
4333 if (src_if == dst_if) {
4334 goto drop;
4335 }
4336 } else {
4337 /*
4338 * Check if its a reserved multicast address, any address
4339 * listed in 802.1D section 7.12.6 may not be forwarded by the
4340 * bridge.
4341 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
4342 */
4343 if (dst[0] == 0x01 && dst[1] == 0x80 &&
4344 dst[2] == 0xc2 && dst[3] == 0x00 &&
4345 dst[4] == 0x00 && dst[5] <= 0x0f) {
4346 goto drop;
4347 }
4348
4349
4350 /* ...forward it to all interfaces. */
4351 atomic_add_64(&ifp->if_imcasts, 1);
4352 dst_if = NULL;
4353 }
4354
4355 /*
4356 * If we have a destination interface which is a member of our bridge,
4357 * OR this is a unicast packet, push it through the bpf(4) machinery.
4358 * For broadcast or multicast packets, don't bother because it will
4359 * be reinjected into ether_input. We do this before we pass the packets
4360 * through the pfil(9) framework, as it is possible that pfil(9) will
4361 * drop the packet, or possibly modify it, making it difficult to debug
4362 * firewall issues on the bridge.
4363 */
4364 #if NBPFILTER > 0
4365 if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) ||
4366 dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
4367 m->m_pkthdr.rcvif = ifp;
4368 if (sc->sc_bpf_input) {
4369 bridge_bpf_input(ifp, m);
4370 }
4371 }
4372 #endif /* NBPFILTER */
4373
4374 #if defined(PFIL_HOOKS)
4375 /* run the packet filter */
4376 if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
4377 BRIDGE_UNLOCK(sc);
4378 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) {
4379 return;
4380 }
4381 if (m == NULL) {
4382 return;
4383 }
4384 BRIDGE_LOCK(sc);
4385 }
4386 #endif /* PFIL_HOOKS */
4387
4388 if (dst_if == NULL) {
4389 bridge_broadcast(sc, src_if, m, 1);
4390 return;
4391 }
4392
4393 /*
4394 * At this point, we're dealing with a unicast frame
4395 * going to a different interface.
4396 */
4397 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
4398 goto drop;
4399 }
4400
4401 dbif = bridge_lookup_member_if(sc, dst_if);
4402 if (dbif == NULL) {
4403 /* Not a member of the bridge (anymore?) */
4404 goto drop;
4405 }
4406
4407 /* Private segments can not talk to each other */
4408 if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE) {
4409 goto drop;
4410 }
4411
4412 if ((dbif->bif_ifflags & IFBIF_STP) &&
4413 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4414 goto drop;
4415 }
4416
4417 #if HAS_DHCPRA_MASK
4418 /* APPLE MODIFICATION <rdar:6985737> */
4419 if ((dst_if->if_extflags & IFEXTF_DHCPRA_MASK) != 0) {
4420 m = ip_xdhcpra_output(dst_if, m);
4421 if (!m) {
4422 ++sc->sc_sc.sc_ifp.if_xdhcpra;
4423 return;
4424 }
4425 }
4426 #endif /* HAS_DHCPRA_MASK */
4427
4428 BRIDGE_UNLOCK(sc);
4429
4430 #if defined(PFIL_HOOKS)
4431 if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
4432 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) {
4433 return;
4434 }
4435 if (m == NULL) {
4436 return;
4437 }
4438 }
4439 #endif /* PFIL_HOOKS */
4440
4441 (void) bridge_enqueue(sc, dst_if, m);
4442 return;
4443
4444 drop:
4445 BRIDGE_UNLOCK(sc);
4446 m_freem(m);
4447 }
4448
4449 #if BRIDGE_DEBUG
4450
4451 char *ether_ntop(char *, size_t, const u_char *);
4452
4453 __private_extern__ char *
4454 ether_ntop(char *buf, size_t len, const u_char *ap)
4455 {
4456 snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
4457 ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
4458
4459 return buf;
4460 }
4461
4462 #endif /* BRIDGE_DEBUG */
4463
4464 /*
4465 * bridge_input:
4466 *
4467 * Filter input from a member interface. Queue the packet for
4468 * bridging if it is not for us.
4469 */
4470 __private_extern__ errno_t
4471 bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header)
4472 {
4473 struct bridge_softc *sc = ifp->if_bridge;
4474 struct bridge_iflist *bif, *bif2;
4475 struct ifnet *bifp;
4476 struct ether_header *eh;
4477 struct mbuf *mc, *mc2;
4478 uint16_t vlan;
4479 int error;
4480
4481 ASSERT(bridge_in_bsd_mode(sc));
4482 #if BRIDGE_DEBUG
4483 if (if_bridge_debug & BR_DBGF_INPUT) {
4484 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
4485 sc->sc_ifp->if_xname, ifp->if_xname,
4486 (uint64_t)VM_KERNEL_ADDRPERM(m),
4487 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
4488 }
4489 #endif /* BRIDGE_DEBUG */
4490
4491 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
4492 #if BRIDGE_DEBUG
4493 if (if_bridge_debug & BR_DBGF_INPUT) {
4494 printf("%s: %s not running passing along\n",
4495 __func__, sc->sc_ifp->if_xname);
4496 }
4497 #endif /* BRIDGE_DEBUG */
4498 return 0;
4499 }
4500
4501 bifp = sc->sc_ifp;
4502 vlan = VLANTAGOF(m);
4503
4504 #ifdef IFF_MONITOR
4505 /*
4506 * Implement support for bridge monitoring. If this flag has been
4507 * set on this interface, discard the packet once we push it through
4508 * the bpf(4) machinery, but before we do, increment the byte and
4509 * packet counters associated with this interface.
4510 */
4511 if ((bifp->if_flags & IFF_MONITOR) != 0) {
4512 m->m_pkthdr.rcvif = bifp;
4513 BRIDGE_BPF_MTAP_INPUT(sc, m);
4514 (void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0);
4515 m_freem(m);
4516 return EJUSTRETURN;
4517 }
4518 #endif /* IFF_MONITOR */
4519
4520 /*
4521 * Need to clear the promiscous flags otherwise it will be
4522 * dropped by DLIL after processing filters
4523 */
4524 if ((mbuf_flags(m) & MBUF_PROMISC)) {
4525 mbuf_setflags_mask(m, 0, MBUF_PROMISC);
4526 }
4527
4528 BRIDGE_LOCK(sc);
4529 bif = bridge_lookup_member_if(sc, ifp);
4530 if (bif == NULL) {
4531 BRIDGE_UNLOCK(sc);
4532 #if BRIDGE_DEBUG
4533 if (if_bridge_debug & BR_DBGF_INPUT) {
4534 printf("%s: %s bridge_lookup_member_if failed\n",
4535 __func__, sc->sc_ifp->if_xname);
4536 }
4537 #endif /* BRIDGE_DEBUG */
4538 return 0;
4539 }
4540
4541 if (bif->bif_flags & BIFF_HOST_FILTER) {
4542 error = bridge_host_filter(bif, m);
4543 if (error != 0) {
4544 if (if_bridge_debug & BR_DBGF_INPUT) {
4545 printf("%s: %s bridge_host_filter failed\n",
4546 __func__, bif->bif_ifp->if_xname);
4547 }
4548 BRIDGE_UNLOCK(sc);
4549 return EJUSTRETURN;
4550 }
4551 }
4552
4553 eh = mtod(m, struct ether_header *);
4554
4555 bridge_span(sc, m);
4556
4557 if (m->m_flags & (M_BCAST | M_MCAST)) {
4558 #if BRIDGE_DEBUG
4559 if (if_bridge_debug & BR_DBGF_MCAST) {
4560 if ((m->m_flags & M_MCAST)) {
4561 printf("%s: multicast: "
4562 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4563 __func__,
4564 eh->ether_dhost[0], eh->ether_dhost[1],
4565 eh->ether_dhost[2], eh->ether_dhost[3],
4566 eh->ether_dhost[4], eh->ether_dhost[5]);
4567 }
4568 }
4569 #endif /* BRIDGE_DEBUG */
4570
4571 /* Tap off 802.1D packets; they do not get forwarded. */
4572 if (memcmp(eh->ether_dhost, bstp_etheraddr,
4573 ETHER_ADDR_LEN) == 0) {
4574 #if BRIDGESTP
4575 m = bstp_input(&bif->bif_stp, ifp, m);
4576 #else /* !BRIDGESTP */
4577 m_freem(m);
4578 m = NULL;
4579 #endif /* !BRIDGESTP */
4580 if (m == NULL) {
4581 BRIDGE_UNLOCK(sc);
4582 return EJUSTRETURN;
4583 }
4584 }
4585
4586 if ((bif->bif_ifflags & IFBIF_STP) &&
4587 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4588 BRIDGE_UNLOCK(sc);
4589 return 0;
4590 }
4591
4592 /*
4593 * Make a deep copy of the packet and enqueue the copy
4594 * for bridge processing; return the original packet for
4595 * local processing.
4596 */
4597 mc = m_dup(m, M_DONTWAIT);
4598 if (mc == NULL) {
4599 BRIDGE_UNLOCK(sc);
4600 return 0;
4601 }
4602
4603 /*
4604 * Perform the bridge forwarding function with the copy.
4605 *
4606 * Note that bridge_forward calls BRIDGE_UNLOCK
4607 */
4608 bridge_forward(sc, bif, mc);
4609
4610 /*
4611 * Reinject the mbuf as arriving on the bridge so we have a
4612 * chance at claiming multicast packets. We can not loop back
4613 * here from ether_input as a bridge is never a member of a
4614 * bridge.
4615 */
4616 VERIFY(bifp->if_bridge == NULL);
4617 mc2 = m_dup(m, M_DONTWAIT);
4618 if (mc2 != NULL) {
4619 /* Keep the layer3 header aligned */
4620 int i = min(mc2->m_pkthdr.len, max_protohdr);
4621 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
4622 }
4623 if (mc2 != NULL) {
4624 /* mark packet as arriving on the bridge */
4625 mc2->m_pkthdr.rcvif = bifp;
4626 mc2->m_pkthdr.pkt_hdr = mbuf_data(mc2);
4627
4628 #if NBPFILTER > 0
4629 if (sc->sc_bpf_input) {
4630 bridge_bpf_input(bifp, mc2);
4631 }
4632 #endif /* NBPFILTER */
4633 (void) mbuf_setdata(mc2,
4634 (char *)mbuf_data(mc2) + ETHER_HDR_LEN,
4635 mbuf_len(mc2) - ETHER_HDR_LEN);
4636 (void) mbuf_pkthdr_adjustlen(mc2, -ETHER_HDR_LEN);
4637
4638 (void) ifnet_stat_increment_in(bifp, 1,
4639 mbuf_pkthdr_len(mc2), 0);
4640
4641 #if BRIDGE_DEBUG
4642 if (if_bridge_debug & BR_DBGF_MCAST) {
4643 printf("%s: %s mcast for us\n", __func__,
4644 sc->sc_ifp->if_xname);
4645 }
4646 #endif /* BRIDGE_DEBUG */
4647
4648 dlil_input_packet_list(bifp, mc2);
4649 }
4650
4651 /* Return the original packet for local processing. */
4652 return 0;
4653 }
4654
4655 if ((bif->bif_ifflags & IFBIF_STP) &&
4656 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4657 BRIDGE_UNLOCK(sc);
4658 return 0;
4659 }
4660
4661 #ifdef DEV_CARP
4662 #define CARP_CHECK_WE_ARE_DST(iface) \
4663 ((iface)->if_carp &&\
4664 carp_forus((iface)->if_carp, eh->ether_dhost))
4665 #define CARP_CHECK_WE_ARE_SRC(iface) \
4666 ((iface)->if_carp &&\
4667 carp_forus((iface)->if_carp, eh->ether_shost))
4668 #else
4669 #define CARP_CHECK_WE_ARE_DST(iface) 0
4670 #define CARP_CHECK_WE_ARE_SRC(iface) 0
4671 #endif
4672
4673 #ifdef INET6
4674 #define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
4675 #else
4676 #define PFIL_HOOKED_INET6 0
4677 #endif
4678
4679 #if defined(PFIL_HOOKS)
4680 #define PFIL_PHYS(sc, ifp, m) do { \
4681 if (pfil_local_phys && \
4682 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \
4683 if (bridge_pfil(&m, NULL, ifp, \
4684 PFIL_IN) != 0 || m == NULL) { \
4685 BRIDGE_UNLOCK(sc); \
4686 return (NULL); \
4687 } \
4688 } \
4689 } while (0)
4690 #else /* PFIL_HOOKS */
4691 #define PFIL_PHYS(sc, ifp, m)
4692 #endif /* PFIL_HOOKS */
4693
4694 #define GRAB_OUR_PACKETS(iface) \
4695 if ((iface)->if_type == IFT_GIF) \
4696 continue; \
4697 /* It is destined for us. */ \
4698 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
4699 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
4700 if ((iface)->if_type == IFT_BRIDGE) { \
4701 BRIDGE_BPF_MTAP_INPUT(sc, m); \
4702 /* Filter on the physical interface. */ \
4703 PFIL_PHYS(sc, iface, m); \
4704 } \
4705 if (bif->bif_ifflags & IFBIF_LEARNING) { \
4706 error = bridge_rtupdate(sc, eh->ether_shost, \
4707 vlan, bif, 0, IFBAF_DYNAMIC); \
4708 if (error && bif->bif_addrmax) { \
4709 BRIDGE_UNLOCK(sc); \
4710 return (EJUSTRETURN); \
4711 } \
4712 } \
4713 m->m_pkthdr.rcvif = iface; \
4714 BRIDGE_UNLOCK(sc); \
4715 return (0); \
4716 } \
4717 \
4718 /* We just received a packet that we sent out. */ \
4719 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
4720 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
4721 BRIDGE_UNLOCK(sc); \
4722 return (EJUSTRETURN); \
4723 }
4724
4725 /*
4726 * Unicast.
4727 */
4728 /*
4729 * If the packet is for us, set the packets source as the
4730 * bridge, and return the packet back to ether_input for
4731 * local processing.
4732 */
4733 if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
4734 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bifp)) {
4735 /* Mark the packet as arriving on the bridge interface */
4736 (void) mbuf_pkthdr_setrcvif(m, bifp);
4737 mbuf_pkthdr_setheader(m, frame_header);
4738
4739 /*
4740 * If the interface is learning, and the source
4741 * address is valid and not multicast, record
4742 * the address.
4743 */
4744 if (bif->bif_ifflags & IFBIF_LEARNING) {
4745 (void) bridge_rtupdate(sc, eh->ether_shost,
4746 vlan, bif, 0, IFBAF_DYNAMIC);
4747 }
4748
4749 BRIDGE_BPF_MTAP_INPUT(sc, m);
4750
4751 (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
4752 mbuf_len(m) - ETHER_HDR_LEN);
4753 (void) mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN);
4754
4755 (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
4756
4757 BRIDGE_UNLOCK(sc);
4758
4759 #if BRIDGE_DEBUG
4760 if (if_bridge_debug & BR_DBGF_INPUT) {
4761 printf("%s: %s packet for bridge\n", __func__,
4762 sc->sc_ifp->if_xname);
4763 }
4764 #endif /* BRIDGE_DEBUG */
4765
4766 dlil_input_packet_list(bifp, m);
4767
4768 return EJUSTRETURN;
4769 }
4770
4771 /*
4772 * if the destination of the packet is for the MAC address of
4773 * the member interface itself, then we don't need to forward
4774 * it -- just pass it back. Note that it'll likely just be
4775 * dropped by the stack, but if something else is bound to
4776 * the interface directly (for example, the wireless stats
4777 * protocol -- although that actually uses BPF right now),
4778 * then it will consume the packet
4779 *
4780 * ALSO, note that we do this check AFTER checking for the
4781 * bridge's own MAC address, because the bridge may be
4782 * using the SAME MAC address as one of its interfaces
4783 */
4784 if (memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) {
4785
4786 #ifdef VERY_VERY_VERY_DIAGNOSTIC
4787 printf("%s: not forwarding packet bound for member "
4788 "interface\n", __func__);
4789 #endif
4790 BRIDGE_UNLOCK(sc);
4791 return 0;
4792 }
4793
4794 /* Now check the all bridge members. */
4795 TAILQ_FOREACH(bif2, &sc->sc_iflist, bif_next) {
4796 GRAB_OUR_PACKETS(bif2->bif_ifp)
4797 }
4798
4799 #undef CARP_CHECK_WE_ARE_DST
4800 #undef CARP_CHECK_WE_ARE_SRC
4801 #undef GRAB_OUR_PACKETS
4802
4803 /*
4804 * Perform the bridge forwarding function.
4805 *
4806 * Note that bridge_forward calls BRIDGE_UNLOCK
4807 */
4808 bridge_forward(sc, bif, m);
4809
4810 return EJUSTRETURN;
4811 }
4812
4813 /*
4814 * bridge_broadcast:
4815 *
4816 * Send a frame to all interfaces that are members of
4817 * the bridge, except for the one on which the packet
4818 * arrived.
4819 *
4820 * NOTE: Releases the lock on return.
4821 */
4822 static void
4823 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
4824 struct mbuf *m, int runfilt)
4825 {
4826 #ifndef PFIL_HOOKS
4827 #pragma unused(runfilt)
4828 #endif
4829 struct bridge_iflist *dbif, *sbif;
4830 struct mbuf *mc;
4831 struct ifnet *dst_if;
4832 int error = 0, used = 0;
4833
4834 sbif = bridge_lookup_member_if(sc, src_if);
4835
4836 BRIDGE_LOCK2REF(sc, error);
4837 if (error) {
4838 m_freem(m);
4839 return;
4840 }
4841
4842 #ifdef PFIL_HOOKS
4843 /* Filter on the bridge interface before broadcasting */
4844 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
4845 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) {
4846 goto out;
4847 }
4848 if (m == NULL) {
4849 goto out;
4850 }
4851 }
4852 #endif /* PFIL_HOOKS */
4853
4854 TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) {
4855 dst_if = dbif->bif_ifp;
4856 if (dst_if == src_if) {
4857 continue;
4858 }
4859
4860 /* Private segments can not talk to each other */
4861 if (sbif &&
4862 (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)) {
4863 continue;
4864 }
4865
4866 if ((dbif->bif_ifflags & IFBIF_STP) &&
4867 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4868 continue;
4869 }
4870
4871 if ((dbif->bif_ifflags & IFBIF_DISCOVER) == 0 &&
4872 (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
4873 continue;
4874 }
4875
4876 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
4877 continue;
4878 }
4879
4880 if (!(dbif->bif_flags & BIFF_MEDIA_ACTIVE)) {
4881 continue;
4882 }
4883
4884 if (TAILQ_NEXT(dbif, bif_next) == NULL) {
4885 mc = m;
4886 used = 1;
4887 } else {
4888 mc = m_dup(m, M_DONTWAIT);
4889 if (mc == NULL) {
4890 (void) ifnet_stat_increment_out(sc->sc_ifp,
4891 0, 0, 1);
4892 continue;
4893 }
4894 }
4895
4896 #ifdef PFIL_HOOKS
4897 /*
4898 * Filter on the output interface. Pass a NULL bridge interface
4899 * pointer so we do not redundantly filter on the bridge for
4900 * each interface we broadcast on.
4901 */
4902 if (runfilt &&
4903 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
4904 if (used == 0) {
4905 /* Keep the layer3 header aligned */
4906 int i = min(mc->m_pkthdr.len, max_protohdr);
4907 mc = m_copyup(mc, i, ETHER_ALIGN);
4908 if (mc == NULL) {
4909 (void) ifnet_stat_increment_out(
4910 sc->sc_ifp, 0, 0, 1);
4911 continue;
4912 }
4913 }
4914 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) {
4915 continue;
4916 }
4917 if (mc == NULL) {
4918 continue;
4919 }
4920 }
4921 #endif /* PFIL_HOOKS */
4922
4923 (void) bridge_enqueue(sc, dst_if, mc);
4924 }
4925 if (used == 0) {
4926 m_freem(m);
4927 }
4928
4929 #ifdef PFIL_HOOKS
4930 out:
4931 #endif /* PFIL_HOOKS */
4932
4933 BRIDGE_UNREF(sc);
4934 }
4935
4936 /*
4937 * bridge_span:
4938 *
4939 * Duplicate a packet out one or more interfaces that are in span mode,
4940 * the original mbuf is unmodified.
4941 */
4942 static void
4943 bridge_span(struct bridge_softc *sc, struct mbuf *m)
4944 {
4945 struct bridge_iflist *bif;
4946 struct ifnet *dst_if;
4947 struct mbuf *mc;
4948
4949 if (TAILQ_EMPTY(&sc->sc_spanlist)) {
4950 return;
4951 }
4952
4953 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
4954 dst_if = bif->bif_ifp;
4955
4956 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
4957 continue;
4958 }
4959
4960 mc = m_copypacket(m, M_DONTWAIT);
4961 if (mc == NULL) {
4962 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
4963 continue;
4964 }
4965
4966 (void) bridge_enqueue(sc, dst_if, mc);
4967 }
4968 }
4969
4970
4971 /*
4972 * bridge_rtupdate:
4973 *
4974 * Add a bridge routing entry.
4975 */
4976 static int
4977 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
4978 struct bridge_iflist *bif, int setflags, uint8_t flags)
4979 {
4980 struct bridge_rtnode *brt;
4981 int error;
4982
4983 BRIDGE_LOCK_ASSERT_HELD(sc);
4984 ASSERT(bridge_in_bsd_mode(sc));
4985
4986 /* Check the source address is valid and not multicast. */
4987 if (ETHER_IS_MULTICAST(dst) ||
4988 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
4989 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) {
4990 return EINVAL;
4991 }
4992
4993
4994 /* 802.1p frames map to vlan 1 */
4995 if (vlan == 0) {
4996 vlan = 1;
4997 }
4998
4999 /*
5000 * A route for this destination might already exist. If so,
5001 * update it, otherwise create a new one.
5002 */
5003 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
5004 if (sc->sc_brtcnt >= sc->sc_brtmax) {
5005 sc->sc_brtexceeded++;
5006 return ENOSPC;
5007 }
5008 /* Check per interface address limits (if enabled) */
5009 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
5010 bif->bif_addrexceeded++;
5011 return ENOSPC;
5012 }
5013
5014 /*
5015 * Allocate a new bridge forwarding node, and
5016 * initialize the expiration time and Ethernet
5017 * address.
5018 */
5019 brt = zalloc_noblock(bridge_rtnode_pool);
5020 if (brt == NULL) {
5021 return ENOMEM;
5022 }
5023 bzero(brt, sizeof(struct bridge_rtnode));
5024
5025 if (bif->bif_ifflags & IFBIF_STICKY) {
5026 brt->brt_flags = IFBAF_STICKY;
5027 } else {
5028 brt->brt_flags = IFBAF_DYNAMIC;
5029 }
5030
5031 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
5032 brt->brt_vlan = vlan;
5033
5034
5035 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
5036 zfree(bridge_rtnode_pool, brt);
5037 return error;
5038 }
5039 brt->brt_dst = bif;
5040 bif->bif_addrcnt++;
5041 #if BRIDGE_DEBUG
5042 if (if_bridge_debug & BR_DBGF_RT_TABLE) {
5043 printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
5044 "on %s count %u hashsize %u\n", __func__,
5045 dst[0], dst[1], dst[2], dst[3], dst[4], dst[5],
5046 sc->sc_ifp->if_xname, sc->sc_brtcnt,
5047 sc->sc_rthash_size);
5048 }
5049 #endif
5050 }
5051
5052 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
5053 brt->brt_dst != bif) {
5054 brt->brt_dst->bif_addrcnt--;
5055 brt->brt_dst = bif;
5056 brt->brt_dst->bif_addrcnt++;
5057 }
5058
5059 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
5060 unsigned long now;
5061
5062 now = (unsigned long) net_uptime();
5063 brt->brt_expire = now + sc->sc_brttimeout;
5064 }
5065 if (setflags) {
5066 brt->brt_flags = flags;
5067 }
5068
5069
5070 return 0;
5071 }
5072
5073 /*
5074 * bridge_rtlookup:
5075 *
5076 * Lookup the destination interface for an address.
5077 */
5078 static struct ifnet *
5079 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
5080 {
5081 struct bridge_rtnode *brt;
5082
5083 BRIDGE_LOCK_ASSERT_HELD(sc);
5084
5085 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) {
5086 return NULL;
5087 }
5088
5089 return brt->brt_ifp;
5090 }
5091
5092 /*
5093 * bridge_rttrim:
5094 *
5095 * Trim the routine table so that we have a number
5096 * of routing entries less than or equal to the
5097 * maximum number.
5098 */
5099 static void
5100 bridge_rttrim(struct bridge_softc *sc)
5101 {
5102 struct bridge_rtnode *brt, *nbrt;
5103
5104 BRIDGE_LOCK_ASSERT_HELD(sc);
5105
5106 /* Make sure we actually need to do this. */
5107 if (sc->sc_brtcnt <= sc->sc_brtmax) {
5108 return;
5109 }
5110
5111 /* Force an aging cycle; this might trim enough addresses. */
5112 bridge_rtage(sc);
5113 if (sc->sc_brtcnt <= sc->sc_brtmax) {
5114 return;
5115 }
5116
5117 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5118 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
5119 bridge_rtnode_destroy(sc, brt);
5120 if (sc->sc_brtcnt <= sc->sc_brtmax) {
5121 return;
5122 }
5123 }
5124 }
5125 }
5126
5127 /*
5128 * bridge_aging_timer:
5129 *
5130 * Aging periodic timer for the bridge routing table.
5131 */
5132 static void
5133 bridge_aging_timer(struct bridge_softc *sc)
5134 {
5135 BRIDGE_LOCK_ASSERT_HELD(sc);
5136
5137 bridge_rtage(sc);
5138
5139 if ((sc->sc_ifp->if_flags & IFF_RUNNING) &&
5140 (sc->sc_flags & SCF_DETACHING) == 0) {
5141 sc->sc_aging_timer.bdc_sc = sc;
5142 sc->sc_aging_timer.bdc_func = bridge_aging_timer;
5143 sc->sc_aging_timer.bdc_ts.tv_sec = bridge_rtable_prune_period;
5144 bridge_schedule_delayed_call(&sc->sc_aging_timer);
5145 }
5146 }
5147
5148 /*
5149 * bridge_rtage:
5150 *
5151 * Perform an aging cycle.
5152 */
5153 static void
5154 bridge_rtage(struct bridge_softc *sc)
5155 {
5156 struct bridge_rtnode *brt, *nbrt;
5157 unsigned long now;
5158
5159 BRIDGE_LOCK_ASSERT_HELD(sc);
5160
5161 now = (unsigned long) net_uptime();
5162
5163 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5164 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
5165 if (now >= brt->brt_expire) {
5166 bridge_rtnode_destroy(sc, brt);
5167 }
5168 }
5169 }
5170 }
5171
5172 /*
5173 * bridge_rtflush:
5174 *
5175 * Remove all dynamic addresses from the bridge.
5176 */
5177 static void
5178 bridge_rtflush(struct bridge_softc *sc, int full)
5179 {
5180 struct bridge_rtnode *brt, *nbrt;
5181
5182 BRIDGE_LOCK_ASSERT_HELD(sc);
5183
5184 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5185 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
5186 bridge_rtnode_destroy(sc, brt);
5187 }
5188 }
5189 }
5190
5191 /*
5192 * bridge_rtdaddr:
5193 *
5194 * Remove an address from the table.
5195 */
5196 static int
5197 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
5198 {
5199 struct bridge_rtnode *brt;
5200 int found = 0;
5201
5202 BRIDGE_LOCK_ASSERT_HELD(sc);
5203
5204 /*
5205 * If vlan is zero then we want to delete for all vlans so the lookup
5206 * may return more than one.
5207 */
5208 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
5209 bridge_rtnode_destroy(sc, brt);
5210 found = 1;
5211 }
5212
5213 return found ? 0 : ENOENT;
5214 }
5215
5216 /*
5217 * bridge_rtdelete:
5218 *
5219 * Delete routes to a speicifc member interface.
5220 */
5221 static void
5222 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
5223 {
5224 struct bridge_rtnode *brt, *nbrt;
5225
5226 BRIDGE_LOCK_ASSERT_HELD(sc);
5227
5228 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5229 if (brt->brt_ifp == ifp && (full ||
5230 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
5231 bridge_rtnode_destroy(sc, brt);
5232 }
5233 }
5234 }
5235
5236 /*
5237 * bridge_rtable_init:
5238 *
5239 * Initialize the route table for this bridge.
5240 */
5241 static int
5242 bridge_rtable_init(struct bridge_softc *sc)
5243 {
5244 u_int32_t i;
5245
5246 ASSERT(bridge_in_bsd_mode(sc));
5247
5248 sc->sc_rthash = _MALLOC(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
5249 M_DEVBUF, M_WAITOK | M_ZERO);
5250 if (sc->sc_rthash == NULL) {
5251 printf("%s: no memory\n", __func__);
5252 return ENOMEM;
5253 }
5254 sc->sc_rthash_size = BRIDGE_RTHASH_SIZE;
5255
5256 for (i = 0; i < sc->sc_rthash_size; i++) {
5257 LIST_INIT(&sc->sc_rthash[i]);
5258 }
5259
5260 sc->sc_rthash_key = RandomULong();
5261
5262 LIST_INIT(&sc->sc_rtlist);
5263
5264 return 0;
5265 }
5266
5267 /*
5268 * bridge_rthash_delayed_resize:
5269 *
5270 * Resize the routing table hash on a delayed thread call.
5271 */
5272 static void
5273 bridge_rthash_delayed_resize(struct bridge_softc *sc)
5274 {
5275 u_int32_t new_rthash_size;
5276 struct _bridge_rtnode_list *new_rthash = NULL;
5277 struct _bridge_rtnode_list *old_rthash = NULL;
5278 u_int32_t i;
5279 struct bridge_rtnode *brt;
5280 int error = 0;
5281
5282 BRIDGE_LOCK_ASSERT_HELD(sc);
5283
5284 /*
5285 * Four entries per hash bucket is our ideal load factor
5286 */
5287 if (sc->sc_brtcnt < sc->sc_rthash_size * 4) {
5288 goto out;
5289 }
5290
5291 /*
5292 * Doubling the number of hash buckets may be too simplistic
5293 * especially when facing a spike of new entries
5294 */
5295 new_rthash_size = sc->sc_rthash_size * 2;
5296
5297 sc->sc_flags |= SCF_RESIZING;
5298 BRIDGE_UNLOCK(sc);
5299
5300 new_rthash = _MALLOC(sizeof(*sc->sc_rthash) * new_rthash_size,
5301 M_DEVBUF, M_WAITOK | M_ZERO);
5302
5303 BRIDGE_LOCK(sc);
5304 sc->sc_flags &= ~SCF_RESIZING;
5305
5306 if (new_rthash == NULL) {
5307 error = ENOMEM;
5308 goto out;
5309 }
5310 if ((sc->sc_flags & SCF_DETACHING)) {
5311 error = ENODEV;
5312 goto out;
5313 }
5314 /*
5315 * Fail safe from here on
5316 */
5317 old_rthash = sc->sc_rthash;
5318 sc->sc_rthash = new_rthash;
5319 sc->sc_rthash_size = new_rthash_size;
5320
5321 /*
5322 * Get a new key to force entries to be shuffled around to reduce
5323 * the likelihood they will land in the same buckets
5324 */
5325 sc->sc_rthash_key = RandomULong();
5326
5327 for (i = 0; i < sc->sc_rthash_size; i++) {
5328 LIST_INIT(&sc->sc_rthash[i]);
5329 }
5330
5331 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
5332 LIST_REMOVE(brt, brt_hash);
5333 (void) bridge_rtnode_hash(sc, brt);
5334 }
5335 out:
5336 if (error == 0) {
5337 #if BRIDGE_DEBUG
5338 if (if_bridge_debug & BR_DBGF_RT_TABLE) {
5339 printf("%s: %s new size %u\n", __func__,
5340 sc->sc_ifp->if_xname, sc->sc_rthash_size);
5341 }
5342 #endif /* BRIDGE_DEBUG */
5343 if (old_rthash) {
5344 _FREE(old_rthash, M_DEVBUF);
5345 }
5346 } else {
5347 #if BRIDGE_DEBUG
5348 printf("%s: %s failed %d\n", __func__,
5349 sc->sc_ifp->if_xname, error);
5350 #endif /* BRIDGE_DEBUG */
5351 if (new_rthash != NULL) {
5352 _FREE(new_rthash, M_DEVBUF);
5353 }
5354 }
5355 }
5356
5357 /*
5358 * Resize the number of hash buckets based on the load factor
5359 * Currently only grow
5360 * Failing to resize the hash table is not fatal
5361 */
5362 static void
5363 bridge_rthash_resize(struct bridge_softc *sc)
5364 {
5365 BRIDGE_LOCK_ASSERT_HELD(sc);
5366
5367 if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING)) {
5368 return;
5369 }
5370
5371 /*
5372 * Four entries per hash bucket is our ideal load factor
5373 */
5374 if (sc->sc_brtcnt < sc->sc_rthash_size * 4) {
5375 return;
5376 }
5377 /*
5378 * Hard limit on the size of the routing hash table
5379 */
5380 if (sc->sc_rthash_size >= bridge_rtable_hash_size_max) {
5381 return;
5382 }
5383
5384 sc->sc_resize_call.bdc_sc = sc;
5385 sc->sc_resize_call.bdc_func = bridge_rthash_delayed_resize;
5386 bridge_schedule_delayed_call(&sc->sc_resize_call);
5387 }
5388
5389 /*
5390 * bridge_rtable_fini:
5391 *
5392 * Deconstruct the route table for this bridge.
5393 */
5394 static void
5395 bridge_rtable_fini(struct bridge_softc *sc)
5396 {
5397 KASSERT(sc->sc_brtcnt == 0,
5398 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
5399 if (sc->sc_rthash) {
5400 _FREE(sc->sc_rthash, M_DEVBUF);
5401 sc->sc_rthash = NULL;
5402 }
5403 }
5404
5405 /*
5406 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
5407 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
5408 */
5409 #define mix(a, b, c) \
5410 do { \
5411 a -= b; a -= c; a ^= (c >> 13); \
5412 b -= c; b -= a; b ^= (a << 8); \
5413 c -= a; c -= b; c ^= (b >> 13); \
5414 a -= b; a -= c; a ^= (c >> 12); \
5415 b -= c; b -= a; b ^= (a << 16); \
5416 c -= a; c -= b; c ^= (b >> 5); \
5417 a -= b; a -= c; a ^= (c >> 3); \
5418 b -= c; b -= a; b ^= (a << 10); \
5419 c -= a; c -= b; c ^= (b >> 15); \
5420 } while ( /*CONSTCOND*/ 0)
5421
5422 static __inline uint32_t
5423 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
5424 {
5425 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
5426
5427 b += addr[5] << 8;
5428 b += addr[4];
5429 a += addr[3] << 24;
5430 a += addr[2] << 16;
5431 a += addr[1] << 8;
5432 a += addr[0];
5433
5434 mix(a, b, c);
5435
5436 return c & BRIDGE_RTHASH_MASK(sc);
5437 }
5438
5439 #undef mix
5440
5441 static int
5442 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
5443 {
5444 int i, d;
5445
5446 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
5447 d = ((int)a[i]) - ((int)b[i]);
5448 }
5449
5450 return d;
5451 }
5452
5453 /*
5454 * bridge_rtnode_lookup:
5455 *
5456 * Look up a bridge route node for the specified destination. Compare the
5457 * vlan id or if zero then just return the first match.
5458 */
5459 static struct bridge_rtnode *
5460 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
5461 uint16_t vlan)
5462 {
5463 struct bridge_rtnode *brt;
5464 uint32_t hash;
5465 int dir;
5466
5467 BRIDGE_LOCK_ASSERT_HELD(sc);
5468 ASSERT(bridge_in_bsd_mode(sc));
5469
5470 hash = bridge_rthash(sc, addr);
5471 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
5472 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
5473 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) {
5474 return brt;
5475 }
5476 if (dir > 0) {
5477 return NULL;
5478 }
5479 }
5480
5481 return NULL;
5482 }
5483
5484 /*
5485 * bridge_rtnode_hash:
5486 *
5487 * Insert the specified bridge node into the route hash table.
5488 * This is used when adding a new node or to rehash when resizing
5489 * the hash table
5490 */
5491 static int
5492 bridge_rtnode_hash(struct bridge_softc *sc, struct bridge_rtnode *brt)
5493 {
5494 struct bridge_rtnode *lbrt;
5495 uint32_t hash;
5496 int dir;
5497
5498 BRIDGE_LOCK_ASSERT_HELD(sc);
5499
5500 hash = bridge_rthash(sc, brt->brt_addr);
5501
5502 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
5503 if (lbrt == NULL) {
5504 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
5505 goto out;
5506 }
5507
5508 do {
5509 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
5510 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) {
5511 #if BRIDGE_DEBUG
5512 if (if_bridge_debug & BR_DBGF_RT_TABLE) {
5513 printf("%s: %s EEXIST "
5514 "%02x:%02x:%02x:%02x:%02x:%02x\n",
5515 __func__, sc->sc_ifp->if_xname,
5516 brt->brt_addr[0], brt->brt_addr[1],
5517 brt->brt_addr[2], brt->brt_addr[3],
5518 brt->brt_addr[4], brt->brt_addr[5]);
5519 }
5520 #endif
5521 return EEXIST;
5522 }
5523 if (dir > 0) {
5524 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
5525 goto out;
5526 }
5527 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
5528 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
5529 goto out;
5530 }
5531 lbrt = LIST_NEXT(lbrt, brt_hash);
5532 } while (lbrt != NULL);
5533
5534 #if BRIDGE_DEBUG
5535 if (if_bridge_debug & BR_DBGF_RT_TABLE) {
5536 printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
5537 __func__, sc->sc_ifp->if_xname,
5538 brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
5539 brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5]);
5540 }
5541 #endif
5542
5543 out:
5544 return 0;
5545 }
5546
5547 /*
5548 * bridge_rtnode_insert:
5549 *
5550 * Insert the specified bridge node into the route table. We
5551 * assume the entry is not already in the table.
5552 */
5553 static int
5554 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
5555 {
5556 int error;
5557
5558 error = bridge_rtnode_hash(sc, brt);
5559 if (error != 0) {
5560 return error;
5561 }
5562
5563 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
5564 sc->sc_brtcnt++;
5565
5566 bridge_rthash_resize(sc);
5567
5568 return 0;
5569 }
5570
5571 /*
5572 * bridge_rtnode_destroy:
5573 *
5574 * Destroy a bridge rtnode.
5575 */
5576 static void
5577 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
5578 {
5579 BRIDGE_LOCK_ASSERT_HELD(sc);
5580
5581 LIST_REMOVE(brt, brt_hash);
5582
5583 LIST_REMOVE(brt, brt_list);
5584 sc->sc_brtcnt--;
5585 brt->brt_dst->bif_addrcnt--;
5586 zfree(bridge_rtnode_pool, brt);
5587 }
5588
5589 #if BRIDGESTP
5590 /*
5591 * bridge_rtable_expire:
5592 *
5593 * Set the expiry time for all routes on an interface.
5594 */
5595 static void
5596 bridge_rtable_expire(struct ifnet *ifp, int age)
5597 {
5598 struct bridge_softc *sc = ifp->if_bridge;
5599 struct bridge_rtnode *brt;
5600
5601 BRIDGE_LOCK(sc);
5602
5603 /*
5604 * If the age is zero then flush, otherwise set all the expiry times to
5605 * age for the interface
5606 */
5607 if (age == 0) {
5608 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
5609 } else {
5610 unsigned long now;
5611
5612 now = (unsigned long) net_uptime();
5613
5614 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
5615 /* Cap the expiry time to 'age' */
5616 if (brt->brt_ifp == ifp &&
5617 brt->brt_expire > now + age &&
5618 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
5619 brt->brt_expire = now + age;
5620 }
5621 }
5622 }
5623 BRIDGE_UNLOCK(sc);
5624 }
5625
5626 /*
5627 * bridge_state_change:
5628 *
5629 * Callback from the bridgestp code when a port changes states.
5630 */
5631 static void
5632 bridge_state_change(struct ifnet *ifp, int state)
5633 {
5634 struct bridge_softc *sc = ifp->if_bridge;
5635 static const char *stpstates[] = {
5636 "disabled",
5637 "listening",
5638 "learning",
5639 "forwarding",
5640 "blocking",
5641 "discarding"
5642 };
5643
5644 if (log_stp) {
5645 log(LOG_NOTICE, "%s: state changed to %s on %s\n",
5646 sc->sc_ifp->if_xname,
5647 stpstates[state], ifp->if_xname);
5648 }
5649 }
5650 #endif /* BRIDGESTP */
5651
5652 #ifdef PFIL_HOOKS
5653 /*
5654 * Send bridge packets through pfil if they are one of the types pfil can deal
5655 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
5656 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
5657 * that interface.
5658 */
5659 static int
5660 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
5661 {
5662 int snap, error, i, hlen;
5663 struct ether_header *eh1, eh2;
5664 struct ip_fw_args args;
5665 struct ip *ip;
5666 struct llc llc1;
5667 u_int16_t ether_type;
5668
5669 snap = 0;
5670 error = -1; /* Default error if not error == 0 */
5671
5672 #if 0
5673 /* we may return with the IP fields swapped, ensure its not shared */
5674 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
5675 #endif
5676
5677 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) {
5678 return 0; /* filtering is disabled */
5679 }
5680 i = min((*mp)->m_pkthdr.len, max_protohdr);
5681 if ((*mp)->m_len < i) {
5682 *mp = m_pullup(*mp, i);
5683 if (*mp == NULL) {
5684 printf("%s: m_pullup failed\n", __func__);
5685 return -1;
5686 }
5687 }
5688
5689 eh1 = mtod(*mp, struct ether_header *);
5690 ether_type = ntohs(eh1->ether_type);
5691
5692 /*
5693 * Check for SNAP/LLC.
5694 */
5695 if (ether_type < ETHERMTU) {
5696 struct llc *llc2 = (struct llc *)(eh1 + 1);
5697
5698 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
5699 llc2->llc_dsap == LLC_SNAP_LSAP &&
5700 llc2->llc_ssap == LLC_SNAP_LSAP &&
5701 llc2->llc_control == LLC_UI) {
5702 ether_type = htons(llc2->llc_un.type_snap.ether_type);
5703 snap = 1;
5704 }
5705 }
5706
5707 /*
5708 * If we're trying to filter bridge traffic, don't look at anything
5709 * other than IP and ARP traffic. If the filter doesn't understand
5710 * IPv6, don't allow IPv6 through the bridge either. This is lame
5711 * since if we really wanted, say, an AppleTalk filter, we are hosed,
5712 * but of course we don't have an AppleTalk filter to begin with.
5713 * (Note that since pfil doesn't understand ARP it will pass *ALL*
5714 * ARP traffic.)
5715 */
5716 switch (ether_type) {
5717 case ETHERTYPE_ARP:
5718 case ETHERTYPE_REVARP:
5719 if (pfil_ipfw_arp == 0) {
5720 return 0; /* Automatically pass */
5721 }
5722 break;
5723
5724 case ETHERTYPE_IP:
5725 #if INET6
5726 case ETHERTYPE_IPV6:
5727 #endif /* INET6 */
5728 break;
5729 default:
5730 /*
5731 * Check to see if the user wants to pass non-ip
5732 * packets, these will not be checked by pfil(9) and
5733 * passed unconditionally so the default is to drop.
5734 */
5735 if (pfil_onlyip) {
5736 goto bad;
5737 }
5738 }
5739
5740 /* Strip off the Ethernet header and keep a copy. */
5741 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2);
5742 m_adj(*mp, ETHER_HDR_LEN);
5743
5744 /* Strip off snap header, if present */
5745 if (snap) {
5746 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t)&llc1);
5747 m_adj(*mp, sizeof(struct llc));
5748 }
5749
5750 /*
5751 * Check the IP header for alignment and errors
5752 */
5753 if (dir == PFIL_IN) {
5754 switch (ether_type) {
5755 case ETHERTYPE_IP:
5756 error = bridge_ip_checkbasic(mp);
5757 break;
5758 #if INET6
5759 case ETHERTYPE_IPV6:
5760 error = bridge_ip6_checkbasic(mp);
5761 break;
5762 #endif /* INET6 */
5763 default:
5764 error = 0;
5765 }
5766 if (error) {
5767 goto bad;
5768 }
5769 }
5770
5771 if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
5772 error = -1;
5773 args.rule = ip_dn_claim_rule(*mp);
5774 if (args.rule != NULL && fw_one_pass) {
5775 goto ipfwpass; /* packet already partially processed */
5776 }
5777 args.m = *mp;
5778 args.oif = ifp;
5779 args.next_hop = NULL;
5780 args.eh = &eh2;
5781 args.inp = NULL; /* used by ipfw uid/gid/jail rules */
5782 i = ip_fw_chk_ptr(&args);
5783 *mp = args.m;
5784
5785 if (*mp == NULL) {
5786 return error;
5787 }
5788
5789 if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
5790 /* put the Ethernet header back on */
5791 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
5792 if (*mp == NULL) {
5793 return error;
5794 }
5795 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
5796
5797 /*
5798 * Pass the pkt to dummynet, which consumes it. The
5799 * packet will return to us via bridge_dummynet().
5800 */
5801 args.oif = ifp;
5802 ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW);
5803 return error;
5804 }
5805
5806 if (i != IP_FW_PASS) { /* drop */
5807 goto bad;
5808 }
5809 }
5810
5811 ipfwpass:
5812 error = 0;
5813
5814 /*
5815 * Run the packet through pfil
5816 */
5817 switch (ether_type) {
5818 case ETHERTYPE_IP:
5819 /*
5820 * before calling the firewall, swap fields the same as
5821 * IP does. here we assume the header is contiguous
5822 */
5823 ip = mtod(*mp, struct ip *);
5824
5825 ip->ip_len = ntohs(ip->ip_len);
5826 ip->ip_off = ntohs(ip->ip_off);
5827
5828 /*
5829 * Run pfil on the member interface and the bridge, both can
5830 * be skipped by clearing pfil_member or pfil_bridge.
5831 *
5832 * Keep the order:
5833 * in_if -> bridge_if -> out_if
5834 */
5835 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
5836 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
5837 dir, NULL);
5838 }
5839
5840 if (*mp == NULL || error != 0) { /* filter may consume */
5841 break;
5842 }
5843
5844 if (pfil_member && ifp != NULL) {
5845 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
5846 dir, NULL);
5847 }
5848
5849 if (*mp == NULL || error != 0) { /* filter may consume */
5850 break;
5851 }
5852
5853 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
5854 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
5855 dir, NULL);
5856 }
5857
5858 if (*mp == NULL || error != 0) { /* filter may consume */
5859 break;
5860 }
5861
5862 /* check if we need to fragment the packet */
5863 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
5864 i = (*mp)->m_pkthdr.len;
5865 if (i > ifp->if_mtu) {
5866 error = bridge_fragment(ifp, *mp, &eh2, snap,
5867 &llc1);
5868 return error;
5869 }
5870 }
5871
5872 /* Recalculate the ip checksum and restore byte ordering */
5873 ip = mtod(*mp, struct ip *);
5874 hlen = ip->ip_hl << 2;
5875 if (hlen < sizeof(struct ip)) {
5876 goto bad;
5877 }
5878 if (hlen > (*mp)->m_len) {
5879 if ((*mp = m_pullup(*mp, hlen)) == 0) {
5880 goto bad;
5881 }
5882 ip = mtod(*mp, struct ip *);
5883 if (ip == NULL) {
5884 goto bad;
5885 }
5886 }
5887 ip->ip_len = htons(ip->ip_len);
5888 ip->ip_off = htons(ip->ip_off);
5889 ip->ip_sum = 0;
5890 if (hlen == sizeof(struct ip)) {
5891 ip->ip_sum = in_cksum_hdr(ip);
5892 } else {
5893 ip->ip_sum = in_cksum(*mp, hlen);
5894 }
5895
5896 break;
5897 #if INET6
5898 case ETHERTYPE_IPV6:
5899 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
5900 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
5901 dir, NULL);
5902 }
5903
5904 if (*mp == NULL || error != 0) { /* filter may consume */
5905 break;
5906 }
5907
5908 if (pfil_member && ifp != NULL) {
5909 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
5910 dir, NULL);
5911 }
5912
5913 if (*mp == NULL || error != 0) { /* filter may consume */
5914 break;
5915 }
5916
5917 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
5918 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
5919 dir, NULL);
5920 }
5921 break;
5922 #endif
5923 default:
5924 error = 0;
5925 break;
5926 }
5927
5928 if (*mp == NULL) {
5929 return error;
5930 }
5931 if (error != 0) {
5932 goto bad;
5933 }
5934
5935 error = -1;
5936
5937 /*
5938 * Finally, put everything back the way it was and return
5939 */
5940 if (snap) {
5941 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT, 0);
5942 if (*mp == NULL) {
5943 return error;
5944 }
5945 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
5946 }
5947
5948 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
5949 if (*mp == NULL) {
5950 return error;
5951 }
5952 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
5953
5954 return 0;
5955
5956 bad:
5957 m_freem(*mp);
5958 *mp = NULL;
5959 return error;
5960 }
5961
5962 /*
5963 * Perform basic checks on header size since
5964 * pfil assumes ip_input has already processed
5965 * it for it. Cut-and-pasted from ip_input.c.
5966 * Given how simple the IPv6 version is,
5967 * does the IPv4 version really need to be
5968 * this complicated?
5969 *
5970 * XXX Should we update ipstat here, or not?
5971 * XXX Right now we update ipstat but not
5972 * XXX csum_counter.
5973 */
5974 static int
5975 bridge_ip_checkbasic(struct mbuf **mp)
5976 {
5977 struct mbuf *m = *mp;
5978 struct ip *ip;
5979 int len, hlen;
5980 u_short sum;
5981
5982 if (*mp == NULL) {
5983 return -1;
5984 }
5985
5986 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5987 /* max_linkhdr is already rounded up to nearest 4-byte */
5988 if ((m = m_copyup(m, sizeof(struct ip),
5989 max_linkhdr)) == NULL) {
5990 /* XXXJRT new stat, please */
5991 ipstat.ips_toosmall++;
5992 goto bad;
5993 }
5994 } else if (__predict_false(m->m_len < sizeof(struct ip))) {
5995 if ((m = m_pullup(m, sizeof(struct ip))) == NULL) {
5996 ipstat.ips_toosmall++;
5997 goto bad;
5998 }
5999 }
6000 ip = mtod(m, struct ip *);
6001 if (ip == NULL) {
6002 goto bad;
6003 }
6004
6005 if (ip->ip_v != IPVERSION) {
6006 ipstat.ips_badvers++;
6007 goto bad;
6008 }
6009 hlen = ip->ip_hl << 2;
6010 if (hlen < sizeof(struct ip)) { /* minimum header length */
6011 ipstat.ips_badhlen++;
6012 goto bad;
6013 }
6014 if (hlen > m->m_len) {
6015 if ((m = m_pullup(m, hlen)) == 0) {
6016 ipstat.ips_badhlen++;
6017 goto bad;
6018 }
6019 ip = mtod(m, struct ip *);
6020 if (ip == NULL) {
6021 goto bad;
6022 }
6023 }
6024
6025 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
6026 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
6027 } else {
6028 if (hlen == sizeof(struct ip)) {
6029 sum = in_cksum_hdr(ip);
6030 } else {
6031 sum = in_cksum(m, hlen);
6032 }
6033 }
6034 if (sum) {
6035 ipstat.ips_badsum++;
6036 goto bad;
6037 }
6038
6039 /* Retrieve the packet length. */
6040 len = ntohs(ip->ip_len);
6041
6042 /*
6043 * Check for additional length bogosity
6044 */
6045 if (len < hlen) {
6046 ipstat.ips_badlen++;
6047 goto bad;
6048 }
6049
6050 /*
6051 * Check that the amount of data in the buffers
6052 * is as at least much as the IP header would have us expect.
6053 * Drop packet if shorter than we expect.
6054 */
6055 if (m->m_pkthdr.len < len) {
6056 ipstat.ips_tooshort++;
6057 goto bad;
6058 }
6059
6060 /* Checks out, proceed */
6061 *mp = m;
6062 return 0;
6063
6064 bad:
6065 *mp = m;
6066 return -1;
6067 }
6068
6069 #if INET6
6070 /*
6071 * Same as above, but for IPv6.
6072 * Cut-and-pasted from ip6_input.c.
6073 * XXX Should we update ip6stat, or not?
6074 */
6075 static int
6076 bridge_ip6_checkbasic(struct mbuf **mp)
6077 {
6078 struct mbuf *m = *mp;
6079 struct ip6_hdr *ip6;
6080
6081 /*
6082 * If the IPv6 header is not aligned, slurp it up into a new
6083 * mbuf with space for link headers, in the event we forward
6084 * it. Otherwise, if it is aligned, make sure the entire base
6085 * IPv6 header is in the first mbuf of the chain.
6086 */
6087 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
6088 struct ifnet *inifp = m->m_pkthdr.rcvif;
6089 /* max_linkhdr is already rounded up to nearest 4-byte */
6090 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
6091 max_linkhdr)) == NULL) {
6092 /* XXXJRT new stat, please */
6093 ip6stat.ip6s_toosmall++;
6094 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
6095 goto bad;
6096 }
6097 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
6098 struct ifnet *inifp = m->m_pkthdr.rcvif;
6099 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
6100 ip6stat.ip6s_toosmall++;
6101 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
6102 goto bad;
6103 }
6104 }
6105
6106 ip6 = mtod(m, struct ip6_hdr *);
6107
6108 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
6109 ip6stat.ip6s_badvers++;
6110 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
6111 goto bad;
6112 }
6113
6114 /* Checks out, proceed */
6115 *mp = m;
6116 return 0;
6117
6118 bad:
6119 *mp = m;
6120 return -1;
6121 }
6122 #endif /* INET6 */
6123
6124 /*
6125 * bridge_fragment:
6126 *
6127 * Return a fragmented mbuf chain.
6128 */
6129 static int
6130 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
6131 int snap, struct llc *llc)
6132 {
6133 struct mbuf *m0;
6134 struct ip *ip;
6135 int error = -1;
6136
6137 if (m->m_len < sizeof(struct ip) &&
6138 (m = m_pullup(m, sizeof(struct ip))) == NULL) {
6139 goto out;
6140 }
6141 ip = mtod(m, struct ip *);
6142
6143 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
6144 CSUM_DELAY_IP);
6145 if (error) {
6146 goto out;
6147 }
6148
6149 /* walk the chain and re-add the Ethernet header */
6150 for (m0 = m; m0; m0 = m0->m_nextpkt) {
6151 if (error == 0) {
6152 if (snap) {
6153 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT, 0);
6154 if (m0 == NULL) {
6155 error = ENOBUFS;
6156 continue;
6157 }
6158 bcopy(llc, mtod(m0, caddr_t),
6159 sizeof(struct llc));
6160 }
6161 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT, 0);
6162 if (m0 == NULL) {
6163 error = ENOBUFS;
6164 continue;
6165 }
6166 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
6167 } else {
6168 m_freem(m);
6169 }
6170 }
6171
6172 if (error == 0) {
6173 ipstat.ips_fragmented++;
6174 }
6175
6176 return error;
6177
6178 out:
6179 if (m != NULL) {
6180 m_freem(m);
6181 }
6182 return error;
6183 }
6184 #endif /* PFIL_HOOKS */
6185
6186 /*
6187 * bridge_set_bpf_tap:
6188 *
6189 * Sets ups the BPF callbacks.
6190 */
6191 static errno_t
6192 bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
6193 {
6194 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6195
6196 /* TBD locking */
6197 if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
6198 return ENODEV;
6199 }
6200 ASSERT(bridge_in_bsd_mode(sc));
6201 switch (mode) {
6202 case BPF_TAP_DISABLE:
6203 sc->sc_bpf_input = sc->sc_bpf_output = NULL;
6204 break;
6205
6206 case BPF_TAP_INPUT:
6207 sc->sc_bpf_input = bpf_callback;
6208 break;
6209
6210 case BPF_TAP_OUTPUT:
6211 sc->sc_bpf_output = bpf_callback;
6212 break;
6213
6214 case BPF_TAP_INPUT_OUTPUT:
6215 sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
6216 break;
6217
6218 default:
6219 break;
6220 }
6221
6222 return 0;
6223 }
6224
6225 /*
6226 * bridge_detach:
6227 *
6228 * Callback when interface has been detached.
6229 */
6230 static void
6231 bridge_detach(ifnet_t ifp)
6232 {
6233 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6234
6235 #if BRIDGESTP
6236 bstp_detach(&sc->sc_stp);
6237 #endif /* BRIDGESTP */
6238
6239 if (bridge_in_bsd_mode(sc)) {
6240 /* Tear down the routing table. */
6241 bridge_rtable_fini(sc);
6242 }
6243
6244 lck_mtx_lock(&bridge_list_mtx);
6245 LIST_REMOVE(sc, sc_list);
6246 lck_mtx_unlock(&bridge_list_mtx);
6247
6248 ifnet_release(ifp);
6249
6250 lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp);
6251 if_clone_softc_deallocate(&bridge_cloner, sc);
6252 }
6253
6254 /*
6255 * bridge_bpf_input:
6256 *
6257 * Invoke the input BPF callback if enabled
6258 */
6259 __private_extern__ errno_t
6260 bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
6261 {
6262 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6263
6264 ASSERT(bridge_in_bsd_mode(sc));
6265 if (sc->sc_bpf_input) {
6266 if (mbuf_pkthdr_rcvif(m) != ifp) {
6267 printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__,
6268 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
6269 (uint64_t)VM_KERNEL_ADDRPERM(ifp));
6270 }
6271 (*sc->sc_bpf_input)(ifp, m);
6272 }
6273 return 0;
6274 }
6275
6276 /*
6277 * bridge_bpf_output:
6278 *
6279 * Invoke the output BPF callback if enabled
6280 */
6281 __private_extern__ errno_t
6282 bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
6283 {
6284 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6285
6286 ASSERT(bridge_in_bsd_mode(sc));
6287 if (sc->sc_bpf_output) {
6288 (*sc->sc_bpf_output)(ifp, m);
6289 }
6290 return 0;
6291 }
6292
6293 /*
6294 * bridge_link_event:
6295 *
6296 * Report a data link event on an interface
6297 */
6298 static void
6299 bridge_link_event(struct ifnet *ifp, u_int32_t event_code)
6300 {
6301 struct {
6302 struct kern_event_msg header;
6303 u_int32_t unit;
6304 char if_name[IFNAMSIZ];
6305 } event;
6306
6307 #if BRIDGE_DEBUG
6308 if (if_bridge_debug & BR_DBGF_LIFECYCLE) {
6309 printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname,
6310 event_code, dlil_kev_dl_code_str(event_code));
6311 }
6312 #endif /* BRIDGE_DEBUG */
6313
6314 bzero(&event, sizeof(event));
6315 event.header.total_size = sizeof(event);
6316 event.header.vendor_code = KEV_VENDOR_APPLE;
6317 event.header.kev_class = KEV_NETWORK_CLASS;
6318 event.header.kev_subclass = KEV_DL_SUBCLASS;
6319 event.header.event_code = event_code;
6320 event.header.event_data[0] = ifnet_family(ifp);
6321 event.unit = (u_int32_t)ifnet_unit(ifp);
6322 strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ);
6323 ifnet_event(ifp, &event.header);
6324 }
6325
6326 #define BRIDGE_HF_DROP(reason, func, line) { \
6327 bridge_hostfilter_stats.reason++; \
6328 if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
6329 printf("%s.%d" #reason, func, line); \
6330 error = EINVAL; \
6331 }
6332
6333 /*
6334 * Make sure this is a DHCP or Bootp request that match the host filter
6335 */
6336 static int
6337 bridge_dhcp_filter(struct bridge_iflist *bif, struct mbuf *m, size_t offset)
6338 {
6339 int error = EINVAL;
6340 struct dhcp dhcp;
6341
6342 /*
6343 * Note: We use the dhcp structure because bootp structure definition
6344 * is larger and some vendors do not pad the request
6345 */
6346 error = mbuf_copydata(m, offset, sizeof(struct dhcp), &dhcp);
6347 if (error != 0) {
6348 BRIDGE_HF_DROP(brhf_dhcp_too_small, __func__, __LINE__);
6349 goto done;
6350 }
6351 if (dhcp.dp_op != BOOTREQUEST) {
6352 BRIDGE_HF_DROP(brhf_dhcp_bad_op, __func__, __LINE__);
6353 goto done;
6354 }
6355 /*
6356 * The hardware address must be an exact match
6357 */
6358 if (dhcp.dp_htype != ARPHRD_ETHER) {
6359 BRIDGE_HF_DROP(brhf_dhcp_bad_htype, __func__, __LINE__);
6360 goto done;
6361 }
6362 if (dhcp.dp_hlen != ETHER_ADDR_LEN) {
6363 BRIDGE_HF_DROP(brhf_dhcp_bad_hlen, __func__, __LINE__);
6364 goto done;
6365 }
6366 if (bcmp(dhcp.dp_chaddr, bif->bif_hf_hwsrc,
6367 ETHER_ADDR_LEN) != 0) {
6368 BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr, __func__, __LINE__);
6369 goto done;
6370 }
6371 /*
6372 * Client address must match the host address or be not specified
6373 */
6374 if (dhcp.dp_ciaddr.s_addr != bif->bif_hf_ipsrc.s_addr &&
6375 dhcp.dp_ciaddr.s_addr != INADDR_ANY) {
6376 BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr, __func__, __LINE__);
6377 goto done;
6378 }
6379 error = 0;
6380 done:
6381 return error;
6382 }
6383
6384 static int
6385 bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m)
6386 {
6387 int error = EINVAL;
6388 struct ether_header *eh;
6389 static struct in_addr inaddr_any = { .s_addr = INADDR_ANY };
6390
6391 /*
6392 * Check the Ethernet header is large enough
6393 */
6394 if (mbuf_pkthdr_len(m) < sizeof(struct ether_header)) {
6395 BRIDGE_HF_DROP(brhf_ether_too_small, __func__, __LINE__);
6396 goto done;
6397 }
6398 if (mbuf_len(m) < sizeof(struct ether_header) &&
6399 mbuf_pullup(&m, sizeof(struct ether_header)) != 0) {
6400 BRIDGE_HF_DROP(brhf_ether_pullup_failed, __func__, __LINE__);
6401 goto done;
6402 }
6403 eh = mtod(m, struct ether_header *);
6404
6405 /*
6406 * Restrict the source hardware address
6407 */
6408 if ((bif->bif_flags & BIFF_HF_HWSRC) == 0 ||
6409 bcmp(eh->ether_shost, bif->bif_hf_hwsrc,
6410 ETHER_ADDR_LEN) != 0) {
6411 BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr, __func__, __LINE__);
6412 goto done;
6413 }
6414
6415 /*
6416 * Restrict Ethernet protocols to ARP and IP
6417 */
6418 if (eh->ether_type == htons(ETHERTYPE_ARP)) {
6419 struct ether_arp *ea;
6420 size_t minlen = sizeof(struct ether_header) +
6421 sizeof(struct ether_arp);
6422
6423 /*
6424 * Make the Ethernet and ARP headers contiguous
6425 */
6426 if (mbuf_pkthdr_len(m) < minlen) {
6427 BRIDGE_HF_DROP(brhf_arp_too_small, __func__, __LINE__);
6428 goto done;
6429 }
6430 if (mbuf_len(m) < minlen && mbuf_pullup(&m, minlen) != 0) {
6431 BRIDGE_HF_DROP(brhf_arp_pullup_failed,
6432 __func__, __LINE__);
6433 goto done;
6434 }
6435 /*
6436 * Verify this is an ethernet/ip arp
6437 */
6438 eh = mtod(m, struct ether_header *);
6439 ea = (struct ether_arp *)(eh + 1);
6440 if (ea->arp_hrd != htons(ARPHRD_ETHER)) {
6441 BRIDGE_HF_DROP(brhf_arp_bad_hw_type,
6442 __func__, __LINE__);
6443 goto done;
6444 }
6445 if (ea->arp_pro != htons(ETHERTYPE_IP)) {
6446 BRIDGE_HF_DROP(brhf_arp_bad_pro_type,
6447 __func__, __LINE__);
6448 goto done;
6449 }
6450 /*
6451 * Verify the address lengths are correct
6452 */
6453 if (ea->arp_hln != ETHER_ADDR_LEN) {
6454 BRIDGE_HF_DROP(brhf_arp_bad_hw_len, __func__, __LINE__);
6455 goto done;
6456 }
6457 if (ea->arp_pln != sizeof(struct in_addr)) {
6458 BRIDGE_HF_DROP(brhf_arp_bad_pro_len,
6459 __func__, __LINE__);
6460 goto done;
6461 }
6462
6463 /*
6464 * Allow only ARP request or ARP reply
6465 */
6466 if (ea->arp_op != htons(ARPOP_REQUEST) &&
6467 ea->arp_op != htons(ARPOP_REPLY)) {
6468 BRIDGE_HF_DROP(brhf_arp_bad_op, __func__, __LINE__);
6469 goto done;
6470 }
6471 /*
6472 * Verify source hardware address matches
6473 */
6474 if (bcmp(ea->arp_sha, bif->bif_hf_hwsrc,
6475 ETHER_ADDR_LEN) != 0) {
6476 BRIDGE_HF_DROP(brhf_arp_bad_sha, __func__, __LINE__);
6477 goto done;
6478 }
6479 /*
6480 * Verify source protocol address:
6481 * May be null for an ARP probe
6482 */
6483 if (bcmp(ea->arp_spa, &bif->bif_hf_ipsrc.s_addr,
6484 sizeof(struct in_addr)) != 0 &&
6485 bcmp(ea->arp_spa, &inaddr_any,
6486 sizeof(struct in_addr)) != 0) {
6487 BRIDGE_HF_DROP(brhf_arp_bad_spa, __func__, __LINE__);
6488 goto done;
6489 }
6490 /*
6491 *
6492 */
6493 bridge_hostfilter_stats.brhf_arp_ok += 1;
6494 error = 0;
6495 } else if (eh->ether_type == htons(ETHERTYPE_IP)) {
6496 size_t minlen = sizeof(struct ether_header) + sizeof(struct ip);
6497 struct ip iphdr;
6498 size_t offset;
6499
6500 /*
6501 * Make the Ethernet and IP headers contiguous
6502 */
6503 if (mbuf_pkthdr_len(m) < minlen) {
6504 BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
6505 goto done;
6506 }
6507 offset = sizeof(struct ether_header);
6508 error = mbuf_copydata(m, offset, sizeof(struct ip), &iphdr);
6509 if (error != 0) {
6510 BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
6511 goto done;
6512 }
6513 /*
6514 * Verify the source IP address
6515 */
6516 if (iphdr.ip_p == IPPROTO_UDP) {
6517 struct udphdr udp;
6518
6519 minlen += sizeof(struct udphdr);
6520 if (mbuf_pkthdr_len(m) < minlen) {
6521 BRIDGE_HF_DROP(brhf_ip_too_small,
6522 __func__, __LINE__);
6523 goto done;
6524 }
6525
6526 /*
6527 * Allow all zero addresses for DHCP requests
6528 */
6529 if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr &&
6530 iphdr.ip_src.s_addr != INADDR_ANY) {
6531 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
6532 __func__, __LINE__);
6533 goto done;
6534 }
6535 offset = sizeof(struct ether_header) +
6536 (IP_VHL_HL(iphdr.ip_vhl) << 2);
6537 error = mbuf_copydata(m, offset,
6538 sizeof(struct udphdr), &udp);
6539 if (error != 0) {
6540 BRIDGE_HF_DROP(brhf_ip_too_small,
6541 __func__, __LINE__);
6542 goto done;
6543 }
6544 /*
6545 * Either it's a Bootp/DHCP packet that we like or
6546 * it's a UDP packet from the host IP as source address
6547 */
6548 if (udp.uh_sport == htons(IPPORT_BOOTPC) &&
6549 udp.uh_dport == htons(IPPORT_BOOTPS)) {
6550 minlen += sizeof(struct dhcp);
6551 if (mbuf_pkthdr_len(m) < minlen) {
6552 BRIDGE_HF_DROP(brhf_ip_too_small,
6553 __func__, __LINE__);
6554 goto done;
6555 }
6556 offset += sizeof(struct udphdr);
6557 error = bridge_dhcp_filter(bif, m, offset);
6558 if (error != 0) {
6559 goto done;
6560 }
6561 } else if (iphdr.ip_src.s_addr == INADDR_ANY) {
6562 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
6563 __func__, __LINE__);
6564 goto done;
6565 }
6566 } else if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr ||
6567 bif->bif_hf_ipsrc.s_addr == INADDR_ANY) {
6568 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, __func__, __LINE__);
6569 goto done;
6570 }
6571 /*
6572 * Allow only boring IP protocols
6573 */
6574 if (iphdr.ip_p != IPPROTO_TCP &&
6575 iphdr.ip_p != IPPROTO_UDP &&
6576 iphdr.ip_p != IPPROTO_ICMP &&
6577 iphdr.ip_p != IPPROTO_ESP &&
6578 iphdr.ip_p != IPPROTO_AH &&
6579 iphdr.ip_p != IPPROTO_GRE) {
6580 BRIDGE_HF_DROP(brhf_ip_bad_proto, __func__, __LINE__);
6581 goto done;
6582 }
6583 bridge_hostfilter_stats.brhf_ip_ok += 1;
6584 error = 0;
6585 } else {
6586 BRIDGE_HF_DROP(brhf_bad_ether_type, __func__, __LINE__);
6587 goto done;
6588 }
6589 done:
6590 if (error != 0) {
6591 if (if_bridge_debug & BR_DBGF_HOSTFILTER) {
6592 if (m) {
6593 printf_mbuf_data(m, 0,
6594 sizeof(struct ether_header) +
6595 sizeof(struct ip));
6596 }
6597 printf("\n");
6598 }
6599
6600 if (m != NULL) {
6601 m_freem(m);
6602 }
6603 }
6604 return error;
6605 }
6606
6607