2 * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
72 * 1. Redistributions of source code must retain the above copyright
73 * notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 * notice, this list of conditions and the following disclaimer in the
76 * documentation and/or other materials provided with the distribution.
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
94 * Network interface bridge support.
98 * - Currently only supports Ethernet-like interfaces (Ethernet,
99 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
100 * to bridge other types of interfaces (FDDI-FDDI, and maybe
101 * consider heterogenous bridges).
103 * - GIF isn't handled due to the lack of IPPROTO_ETHERIP support.
106 #include <sys/cdefs.h>
108 #define BRIDGE_DEBUG 1
110 #include <sys/param.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/protosw.h>
114 #include <sys/systm.h>
115 #include <sys/time.h>
116 #include <sys/socket.h> /* for net/if.h */
117 #include <sys/sockio.h>
118 #include <sys/kernel.h>
119 #include <sys/random.h>
120 #include <sys/syslog.h>
121 #include <sys/sysctl.h>
122 #include <sys/proc.h>
123 #include <sys/lock.h>
124 #include <sys/mcache.h>
126 #include <sys/kauth.h>
128 #include <kern/thread_call.h>
130 #include <libkern/libkern.h>
132 #include <kern/zalloc.h>
138 #include <net/if_dl.h>
139 #include <net/if_types.h>
140 #include <net/if_var.h>
141 #include <net/if_media.h>
142 #include <net/net_api_stats.h>
144 #include <netinet/in.h> /* for struct arpcom */
145 #include <netinet/in_systm.h>
146 #include <netinet/in_var.h>
148 #include <netinet/ip.h>
149 #include <netinet/ip_var.h>
151 #include <netinet/ip6.h>
152 #include <netinet6/ip6_var.h>
155 #include <netinet/ip_carp.h>
157 #include <netinet/if_ether.h> /* for struct arpcom */
158 #include <net/bridgestp.h>
159 #include <net/if_bridgevar.h>
160 #include <net/if_llc.h>
162 #include <net/if_vlan_var.h>
163 #endif /* NVLAN > 0 */
165 #include <net/if_ether.h>
166 #include <net/dlil.h>
167 #include <net/kpi_interfacefilter.h>
169 #include <net/route.h>
171 #include <netinet/ip_fw2.h>
172 #include <netinet/ip_dummynet.h>
173 #endif /* PFIL_HOOKS */
174 #include <dev/random/randomdev.h>
176 #include <netinet/bootp.h>
177 #include <netinet/dhcp.h>
181 #define BR_DBGF_LIFECYCLE 0x0001
182 #define BR_DBGF_INPUT 0x0002
183 #define BR_DBGF_OUTPUT 0x0004
184 #define BR_DBGF_RT_TABLE 0x0008
185 #define BR_DBGF_DELAYED_CALL 0x0010
186 #define BR_DBGF_IOCTL 0x0020
187 #define BR_DBGF_MBUF 0x0040
188 #define BR_DBGF_MCAST 0x0080
189 #define BR_DBGF_HOSTFILTER 0x0100
190 #endif /* BRIDGE_DEBUG */
192 #define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
193 #define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
194 #define BRIDGE_LOCK_ASSERT_HELD(_sc) \
195 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
196 #define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
197 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
201 #define BR_LCKDBG_MAX 4
203 #define BRIDGE_LOCK(_sc) bridge_lock(_sc)
204 #define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
205 #define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
206 #define BRIDGE_UNREF(_sc) bridge_unref(_sc)
207 #define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
208 #define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
210 #else /* !BRIDGE_DEBUG */
212 #define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
213 #define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
214 #define BRIDGE_LOCK2REF(_sc, _err) do { \
215 BRIDGE_LOCK_ASSERT_HELD(_sc); \
216 if ((_sc)->sc_iflist_xcnt > 0) \
219 (_sc)->sc_iflist_ref++; \
220 _BRIDGE_UNLOCK(_sc); \
222 #define BRIDGE_UNREF(_sc) do { \
224 (_sc)->sc_iflist_ref--; \
225 if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
226 _BRIDGE_UNLOCK(_sc); \
227 wakeup(&(_sc)->sc_cv); \
229 _BRIDGE_UNLOCK(_sc); \
231 #define BRIDGE_XLOCK(_sc) do { \
232 BRIDGE_LOCK_ASSERT_HELD(_sc); \
233 (_sc)->sc_iflist_xcnt++; \
234 while ((_sc)->sc_iflist_ref > 0) \
235 msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
236 "BRIDGE_XLOCK", NULL); \
238 #define BRIDGE_XDROP(_sc) do { \
239 BRIDGE_LOCK_ASSERT_HELD(_sc); \
240 (_sc)->sc_iflist_xcnt--; \
243 #endif /* BRIDGE_DEBUG */
246 #define BRIDGE_BPF_MTAP_INPUT(sc, m) \
247 if (sc->sc_bpf_input) \
248 bridge_bpf_input(sc->sc_ifp, m)
249 #else /* NBPFILTER */
250 #define BRIDGE_BPF_MTAP_INPUT(ifp, m)
251 #endif /* NBPFILTER */
254 * Initial size of the route hash table. Must be a power of two.
256 #ifndef BRIDGE_RTHASH_SIZE
257 #define BRIDGE_RTHASH_SIZE 16
261 * Maximum size of the routing hash table
263 #define BRIDGE_RTHASH_SIZE_MAX 2048
265 #define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
268 * Maximum number of addresses to cache.
270 #ifndef BRIDGE_RTABLE_MAX
271 #define BRIDGE_RTABLE_MAX 100
276 * Timeout (in seconds) for entries learned dynamically.
278 #ifndef BRIDGE_RTABLE_TIMEOUT
279 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
283 * Number of seconds between walks of the route list.
285 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
286 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
290 * List of capabilities to possibly mask on the member interface.
292 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
294 * List of capabilities to disable on the member interface.
296 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
299 * Bridge interface list entry.
301 struct bridge_iflist
{
302 TAILQ_ENTRY(bridge_iflist
) bif_next
;
303 struct ifnet
*bif_ifp
; /* member if */
304 struct bstp_port bif_stp
; /* STP state */
305 uint32_t bif_ifflags
; /* member if flags */
306 int bif_savedcaps
; /* saved capabilities */
307 uint32_t bif_addrmax
; /* max # of addresses */
308 uint32_t bif_addrcnt
; /* cur. # of addresses */
309 uint32_t bif_addrexceeded
; /* # of address violations */
311 interface_filter_t bif_iff_ref
;
312 struct bridge_softc
*bif_sc
;
315 struct in_addr bif_hf_ipsrc
;
316 uint8_t bif_hf_hwsrc
[ETHER_ADDR_LEN
];
319 #define BIFF_PROMISC 0x01 /* promiscuous mode set */
320 #define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
321 #define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
322 #define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
323 #define BIFF_HOST_FILTER 0x10 /* host filter enabled */
324 #define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
325 #define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
330 struct bridge_rtnode
{
331 LIST_ENTRY(bridge_rtnode
) brt_hash
; /* hash table linkage */
332 LIST_ENTRY(bridge_rtnode
) brt_list
; /* list linkage */
333 struct bridge_iflist
*brt_dst
; /* destination if */
334 unsigned long brt_expire
; /* expiration time */
335 uint8_t brt_flags
; /* address flags */
336 uint8_t brt_addr
[ETHER_ADDR_LEN
];
337 uint16_t brt_vlan
; /* vlan id */
340 #define brt_ifp brt_dst->bif_ifp
343 * Bridge delayed function call context
345 typedef void (*bridge_delayed_func_t
)(struct bridge_softc
*);
347 struct bridge_delayed_call
{
348 struct bridge_softc
*bdc_sc
;
349 bridge_delayed_func_t bdc_func
; /* Function to call */
350 struct timespec bdc_ts
; /* Time to call */
352 thread_call_t bdc_thread_call
;
355 #define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
356 #define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
360 * Software state for each bridge.
362 LIST_HEAD(_bridge_rtnode_list
, bridge_rtnode
);
365 struct _bridge_rtnode_list
*bb_rthash
; /* our forwarding table */
366 struct _bridge_rtnode_list bb_rtlist
; /* list version of above */
367 uint32_t bb_rthash_key
; /* key for hash */
368 uint32_t bb_rthash_size
; /* size of the hash table */
369 struct bridge_delayed_call bb_aging_timer
;
370 struct bridge_delayed_call bb_resize_call
;
371 TAILQ_HEAD(, bridge_iflist
) bb_spanlist
; /* span ports list */
372 struct bstp_state bb_stp
; /* STP state */
373 bpf_packet_func bb_bpf_input
;
374 bpf_packet_func bb_bpf_output
;
375 } bridge_bsd
, *bridge_bsd_t
;
377 #define sc_rthash sc_u.scu_bsd.bb_rthash
378 #define sc_rtlist sc_u.scu_bsd.bb_rtlist
379 #define sc_rthash_key sc_u.scu_bsd.bb_rthash_key
380 #define sc_rthash_size sc_u.scu_bsd.bb_rthash_size
381 #define sc_aging_timer sc_u.scu_bsd.bb_aging_timer
382 #define sc_resize_call sc_u.scu_bsd.bb_resize_call
383 #define sc_spanlist sc_u.scu_bsd.bb_spanlist
384 #define sc_stp sc_u.scu_bsd.bb_stp
385 #define sc_bpf_input sc_u.scu_bsd.bb_bpf_input
386 #define sc_bpf_output sc_u.scu_bsd.bb_bpf_output
388 struct bridge_softc
{
389 struct ifnet
*sc_ifp
; /* make this an interface */
394 LIST_ENTRY(bridge_softc
) sc_list
;
395 decl_lck_mtx_data(, sc_mtx
);
397 uint32_t sc_brtmax
; /* max # of addresses */
398 uint32_t sc_brtcnt
; /* cur. # of addresses */
399 uint32_t sc_brttimeout
; /* rt timeout in seconds */
400 uint32_t sc_iflist_ref
; /* refcount for sc_iflist */
401 uint32_t sc_iflist_xcnt
; /* refcount for sc_iflist */
402 TAILQ_HEAD(, bridge_iflist
) sc_iflist
; /* member interface list */
403 uint32_t sc_brtexceeded
; /* # of cache drops */
404 uint32_t sc_filter_flags
; /* ipf and flags */
405 struct ifnet
*sc_ifaddr
; /* member mac copied from */
406 u_char sc_defaddr
[6]; /* Default MAC address */
407 char sc_if_xname
[IFNAMSIZ
];
411 * Locking and unlocking calling history
413 void *lock_lr
[BR_LCKDBG_MAX
];
415 void *unlock_lr
[BR_LCKDBG_MAX
];
417 #endif /* BRIDGE_DEBUG */
420 #define SCF_DETACHING 0x01
421 #define SCF_RESIZING 0x02
422 #define SCF_MEDIA_ACTIVE 0x04
423 #define SCF_BSD_MODE 0x08
426 bridge_set_bsd_mode(struct bridge_softc
* sc
)
428 sc
->sc_flags
|= SCF_BSD_MODE
;
431 static inline boolean_t
432 bridge_in_bsd_mode(const struct bridge_softc
* sc
)
434 return ((sc
->sc_flags
& SCF_BSD_MODE
) != 0);
437 struct bridge_hostfilter_stats bridge_hostfilter_stats
;
439 decl_lck_mtx_data(static, bridge_list_mtx
);
441 static int bridge_rtable_prune_period
= BRIDGE_RTABLE_PRUNE_PERIOD
;
443 static zone_t bridge_rtnode_pool
= NULL
;
445 static int bridge_clone_create(struct if_clone
*, uint32_t, void *);
446 static int bridge_clone_destroy(struct ifnet
*);
448 static errno_t
bridge_ioctl(struct ifnet
*, u_long
, void *);
450 static void bridge_mutecaps(struct bridge_softc
*);
451 static void bridge_set_ifcap(struct bridge_softc
*, struct bridge_iflist
*,
454 static errno_t
bridge_set_tso(struct bridge_softc
*);
455 __private_extern__
void bridge_ifdetach(struct bridge_iflist
*, struct ifnet
*);
456 static int bridge_init(struct ifnet
*);
457 #if HAS_BRIDGE_DUMMYNET
458 static void bridge_dummynet(struct mbuf
*, struct ifnet
*);
460 static void bridge_ifstop(struct ifnet
*, int);
461 static int bridge_output(struct ifnet
*, struct mbuf
*);
462 static void bridge_finalize_cksum(struct ifnet
*, struct mbuf
*);
463 static void bridge_start(struct ifnet
*);
464 __private_extern__ errno_t
bridge_input(struct ifnet
*, struct mbuf
*, void *);
465 #if BRIDGE_MEMBER_OUT_FILTER
466 static errno_t
bridge_iff_output(void *, ifnet_t
, protocol_family_t
,
468 static int bridge_member_output(struct ifnet
*, struct mbuf
*,
469 struct sockaddr
*, struct rtentry
*);
471 static int bridge_enqueue(struct bridge_softc
*, struct ifnet
*,
473 static void bridge_rtdelete(struct bridge_softc
*, struct ifnet
*ifp
, int);
475 static void bridge_forward(struct bridge_softc
*, struct bridge_iflist
*,
478 static void bridge_aging_timer(struct bridge_softc
*sc
);
480 static void bridge_broadcast(struct bridge_softc
*, struct ifnet
*,
482 static void bridge_span(struct bridge_softc
*, struct mbuf
*);
484 static int bridge_rtupdate(struct bridge_softc
*, const uint8_t *,
485 uint16_t, struct bridge_iflist
*, int, uint8_t);
486 static struct ifnet
*bridge_rtlookup(struct bridge_softc
*, const uint8_t *,
488 static void bridge_rttrim(struct bridge_softc
*);
489 static void bridge_rtage(struct bridge_softc
*);
490 static void bridge_rtflush(struct bridge_softc
*, int);
491 static int bridge_rtdaddr(struct bridge_softc
*, const uint8_t *,
494 static int bridge_rtable_init(struct bridge_softc
*);
495 static void bridge_rtable_fini(struct bridge_softc
*);
497 static void bridge_rthash_resize(struct bridge_softc
*);
499 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
500 static struct bridge_rtnode
*bridge_rtnode_lookup(struct bridge_softc
*,
501 const uint8_t *, uint16_t);
502 static int bridge_rtnode_hash(struct bridge_softc
*,
503 struct bridge_rtnode
*);
504 static int bridge_rtnode_insert(struct bridge_softc
*,
505 struct bridge_rtnode
*);
506 static void bridge_rtnode_destroy(struct bridge_softc
*,
507 struct bridge_rtnode
*);
509 static void bridge_rtable_expire(struct ifnet
*, int);
510 static void bridge_state_change(struct ifnet
*, int);
511 #endif /* BRIDGESTP */
513 static struct bridge_iflist
*bridge_lookup_member(struct bridge_softc
*,
515 static struct bridge_iflist
*bridge_lookup_member_if(struct bridge_softc
*,
517 static void bridge_delete_member(struct bridge_softc
*,
518 struct bridge_iflist
*, int);
519 static void bridge_delete_span(struct bridge_softc
*,
520 struct bridge_iflist
*);
522 static int bridge_ioctl_add(struct bridge_softc
*, void *);
523 static int bridge_ioctl_del(struct bridge_softc
*, void *);
524 static int bridge_ioctl_gifflags(struct bridge_softc
*, void *);
525 static int bridge_ioctl_sifflags(struct bridge_softc
*, void *);
526 static int bridge_ioctl_scache(struct bridge_softc
*, void *);
527 static int bridge_ioctl_gcache(struct bridge_softc
*, void *);
528 static int bridge_ioctl_gifs32(struct bridge_softc
*, void *);
529 static int bridge_ioctl_gifs64(struct bridge_softc
*, void *);
530 static int bridge_ioctl_rts32(struct bridge_softc
*, void *);
531 static int bridge_ioctl_rts64(struct bridge_softc
*, void *);
532 static int bridge_ioctl_saddr32(struct bridge_softc
*, void *);
533 static int bridge_ioctl_saddr64(struct bridge_softc
*, void *);
534 static int bridge_ioctl_sto(struct bridge_softc
*, void *);
535 static int bridge_ioctl_gto(struct bridge_softc
*, void *);
536 static int bridge_ioctl_daddr32(struct bridge_softc
*, void *);
537 static int bridge_ioctl_daddr64(struct bridge_softc
*, void *);
538 static int bridge_ioctl_flush(struct bridge_softc
*, void *);
539 static int bridge_ioctl_gpri(struct bridge_softc
*, void *);
540 static int bridge_ioctl_spri(struct bridge_softc
*, void *);
541 static int bridge_ioctl_ght(struct bridge_softc
*, void *);
542 static int bridge_ioctl_sht(struct bridge_softc
*, void *);
543 static int bridge_ioctl_gfd(struct bridge_softc
*, void *);
544 static int bridge_ioctl_sfd(struct bridge_softc
*, void *);
545 static int bridge_ioctl_gma(struct bridge_softc
*, void *);
546 static int bridge_ioctl_sma(struct bridge_softc
*, void *);
547 static int bridge_ioctl_sifprio(struct bridge_softc
*, void *);
548 static int bridge_ioctl_sifcost(struct bridge_softc
*, void *);
549 static int bridge_ioctl_sifmaxaddr(struct bridge_softc
*, void *);
550 static int bridge_ioctl_addspan(struct bridge_softc
*, void *);
551 static int bridge_ioctl_delspan(struct bridge_softc
*, void *);
552 static int bridge_ioctl_gbparam32(struct bridge_softc
*, void *);
553 static int bridge_ioctl_gbparam64(struct bridge_softc
*, void *);
554 static int bridge_ioctl_grte(struct bridge_softc
*, void *);
555 static int bridge_ioctl_gifsstp32(struct bridge_softc
*, void *);
556 static int bridge_ioctl_gifsstp64(struct bridge_softc
*, void *);
557 static int bridge_ioctl_sproto(struct bridge_softc
*, void *);
558 static int bridge_ioctl_stxhc(struct bridge_softc
*, void *);
559 static int bridge_ioctl_purge(struct bridge_softc
*sc
, void *);
560 static int bridge_ioctl_gfilt(struct bridge_softc
*, void *);
561 static int bridge_ioctl_sfilt(struct bridge_softc
*, void *);
562 static int bridge_ioctl_ghostfilter(struct bridge_softc
*, void *);
563 static int bridge_ioctl_shostfilter(struct bridge_softc
*, void *);
565 static int bridge_pfil(struct mbuf
**, struct ifnet
*, struct ifnet
*,
567 static int bridge_ip_checkbasic(struct mbuf
**);
569 static int bridge_ip6_checkbasic(struct mbuf
**);
571 static int bridge_fragment(struct ifnet
*, struct mbuf
*,
572 struct ether_header
*, int, struct llc
*);
573 #endif /* PFIL_HOOKS */
575 static errno_t
bridge_set_bpf_tap(ifnet_t
, bpf_tap_mode
, bpf_packet_func
);
576 __private_extern__ errno_t
bridge_bpf_input(ifnet_t
, struct mbuf
*);
577 __private_extern__ errno_t
bridge_bpf_output(ifnet_t
, struct mbuf
*);
579 static void bridge_detach(ifnet_t
);
580 static void bridge_link_event(struct ifnet
*, u_int32_t
);
581 static void bridge_iflinkevent(struct ifnet
*);
582 static u_int32_t
bridge_updatelinkstatus(struct bridge_softc
*);
583 static int interface_media_active(struct ifnet
*);
584 static void bridge_schedule_delayed_call(struct bridge_delayed_call
*);
585 static void bridge_cancel_delayed_call(struct bridge_delayed_call
*);
586 static void bridge_cleanup_delayed_call(struct bridge_delayed_call
*);
587 static int bridge_host_filter(struct bridge_iflist
*, struct mbuf
*);
590 #define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
592 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
593 #define VLANTAGOF(_m) 0
595 u_int8_t bstp_etheraddr
[ETHER_ADDR_LEN
] =
596 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
598 static u_int8_t ethernulladdr
[ETHER_ADDR_LEN
] =
599 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
602 static struct bstp_cb_ops bridge_ops
= {
603 .bcb_state
= bridge_state_change
,
604 .bcb_rtage
= bridge_rtable_expire
606 #endif /* BRIDGESTP */
608 SYSCTL_DECL(_net_link
);
609 SYSCTL_NODE(_net_link
, IFT_BRIDGE
, bridge
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0,
612 static int bridge_inherit_mac
= 0; /* share MAC with first bridge member */
613 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, inherit_mac
,
614 CTLFLAG_RW
|CTLFLAG_LOCKED
,
615 &bridge_inherit_mac
, 0,
616 "Inherit MAC address from the first bridge member");
618 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, rtable_prune_period
,
619 CTLFLAG_RW
|CTLFLAG_LOCKED
,
620 &bridge_rtable_prune_period
, 0,
621 "Interval between pruning of routing table");
623 static unsigned int bridge_rtable_hash_size_max
= BRIDGE_RTHASH_SIZE_MAX
;
624 SYSCTL_UINT(_net_link_bridge
, OID_AUTO
, rtable_hash_size_max
,
625 CTLFLAG_RW
|CTLFLAG_LOCKED
,
626 &bridge_rtable_hash_size_max
, 0,
627 "Maximum size of the routing hash table");
629 #if BRIDGE_DEBUG_DELAYED_CALLBACK
630 static int bridge_delayed_callback_delay
= 0;
631 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, delayed_callback_delay
,
632 CTLFLAG_RW
|CTLFLAG_LOCKED
,
633 &bridge_delayed_callback_delay
, 0,
634 "Delay before calling delayed function");
637 static int bridge_bsd_mode
= 1;
638 #if (DEVELOPMENT || DEBUG)
639 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, bsd_mode
,
640 CTLFLAG_RW
|CTLFLAG_LOCKED
,
642 "Bridge using bsd mode");
643 #endif /* (DEVELOPMENT || DEBUG) */
645 SYSCTL_STRUCT(_net_link_bridge
, OID_AUTO
,
646 hostfilterstats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
647 &bridge_hostfilter_stats
, bridge_hostfilter_stats
, "");
649 #if defined(PFIL_HOOKS)
650 static int pfil_onlyip
= 1; /* only pass IP[46] packets when pfil is enabled */
651 static int pfil_bridge
= 1; /* run pfil hooks on the bridge interface */
652 static int pfil_member
= 1; /* run pfil hooks on the member interface */
653 static int pfil_ipfw
= 0; /* layer2 filter with ipfw */
654 static int pfil_ipfw_arp
= 0; /* layer2 filter with ipfw */
655 static int pfil_local_phys
= 0; /* run pfil hooks on the physical interface */
656 /* for locally destined packets */
657 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_onlyip
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
658 &pfil_onlyip
, 0, "Only pass IP packets when pfil is enabled");
659 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, ipfw_arp
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
660 &pfil_ipfw_arp
, 0, "Filter ARP packets through IPFW layer2");
661 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_bridge
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
662 &pfil_bridge
, 0, "Packet filter on the bridge interface");
663 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_member
, CTLFLAG_RW
|CTLFLAG_LOCKED
,
664 &pfil_member
, 0, "Packet filter on the member interface");
665 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_local_phys
,
666 CTLFLAG_RW
|CTLFLAG_LOCKED
, &pfil_local_phys
, 0,
667 "Packet filter on the physical interface for locally destined packets");
668 #endif /* PFIL_HOOKS */
671 static int log_stp
= 0; /* log STP state changes */
672 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, log_stp
, CTLFLAG_RW
,
673 &log_stp
, 0, "Log STP state changes");
674 #endif /* BRIDGESTP */
676 struct bridge_control
{
677 int (*bc_func
)(struct bridge_softc
*, void *);
678 unsigned int bc_argsize
;
679 unsigned int bc_flags
;
682 #define BC_F_COPYIN 0x01 /* copy arguments in */
683 #define BC_F_COPYOUT 0x02 /* copy arguments out */
684 #define BC_F_SUSER 0x04 /* do super-user check */
686 static const struct bridge_control bridge_control_table32
[] = {
687 { bridge_ioctl_add
, sizeof (struct ifbreq
), /* 0 */
688 BC_F_COPYIN
|BC_F_SUSER
},
689 { bridge_ioctl_del
, sizeof (struct ifbreq
),
690 BC_F_COPYIN
|BC_F_SUSER
},
692 { bridge_ioctl_gifflags
, sizeof (struct ifbreq
),
693 BC_F_COPYIN
|BC_F_COPYOUT
},
694 { bridge_ioctl_sifflags
, sizeof (struct ifbreq
),
695 BC_F_COPYIN
|BC_F_SUSER
},
697 { bridge_ioctl_scache
, sizeof (struct ifbrparam
),
698 BC_F_COPYIN
|BC_F_SUSER
},
699 { bridge_ioctl_gcache
, sizeof (struct ifbrparam
),
702 { bridge_ioctl_gifs32
, sizeof (struct ifbifconf32
),
703 BC_F_COPYIN
|BC_F_COPYOUT
},
704 { bridge_ioctl_rts32
, sizeof (struct ifbaconf32
),
705 BC_F_COPYIN
|BC_F_COPYOUT
},
707 { bridge_ioctl_saddr32
, sizeof (struct ifbareq32
),
708 BC_F_COPYIN
|BC_F_SUSER
},
710 { bridge_ioctl_sto
, sizeof (struct ifbrparam
),
711 BC_F_COPYIN
|BC_F_SUSER
},
712 { bridge_ioctl_gto
, sizeof (struct ifbrparam
), /* 10 */
715 { bridge_ioctl_daddr32
, sizeof (struct ifbareq32
),
716 BC_F_COPYIN
|BC_F_SUSER
},
718 { bridge_ioctl_flush
, sizeof (struct ifbreq
),
719 BC_F_COPYIN
|BC_F_SUSER
},
721 { bridge_ioctl_gpri
, sizeof (struct ifbrparam
),
723 { bridge_ioctl_spri
, sizeof (struct ifbrparam
),
724 BC_F_COPYIN
|BC_F_SUSER
},
726 { bridge_ioctl_ght
, sizeof (struct ifbrparam
),
728 { bridge_ioctl_sht
, sizeof (struct ifbrparam
),
729 BC_F_COPYIN
|BC_F_SUSER
},
731 { bridge_ioctl_gfd
, sizeof (struct ifbrparam
),
733 { bridge_ioctl_sfd
, sizeof (struct ifbrparam
),
734 BC_F_COPYIN
|BC_F_SUSER
},
736 { bridge_ioctl_gma
, sizeof (struct ifbrparam
),
738 { bridge_ioctl_sma
, sizeof (struct ifbrparam
), /* 20 */
739 BC_F_COPYIN
|BC_F_SUSER
},
741 { bridge_ioctl_sifprio
, sizeof (struct ifbreq
),
742 BC_F_COPYIN
|BC_F_SUSER
},
744 { bridge_ioctl_sifcost
, sizeof (struct ifbreq
),
745 BC_F_COPYIN
|BC_F_SUSER
},
747 { bridge_ioctl_gfilt
, sizeof (struct ifbrparam
),
749 { bridge_ioctl_sfilt
, sizeof (struct ifbrparam
),
750 BC_F_COPYIN
|BC_F_SUSER
},
752 { bridge_ioctl_purge
, sizeof (struct ifbreq
),
753 BC_F_COPYIN
|BC_F_SUSER
},
755 { bridge_ioctl_addspan
, sizeof (struct ifbreq
),
756 BC_F_COPYIN
|BC_F_SUSER
},
757 { bridge_ioctl_delspan
, sizeof (struct ifbreq
),
758 BC_F_COPYIN
|BC_F_SUSER
},
760 { bridge_ioctl_gbparam32
, sizeof (struct ifbropreq32
),
763 { bridge_ioctl_grte
, sizeof (struct ifbrparam
),
766 { bridge_ioctl_gifsstp32
, sizeof (struct ifbpstpconf32
), /* 30 */
767 BC_F_COPYIN
|BC_F_COPYOUT
},
769 { bridge_ioctl_sproto
, sizeof (struct ifbrparam
),
770 BC_F_COPYIN
|BC_F_SUSER
},
772 { bridge_ioctl_stxhc
, sizeof (struct ifbrparam
),
773 BC_F_COPYIN
|BC_F_SUSER
},
775 { bridge_ioctl_sifmaxaddr
, sizeof (struct ifbreq
),
776 BC_F_COPYIN
|BC_F_SUSER
},
778 { bridge_ioctl_ghostfilter
, sizeof (struct ifbrhostfilter
),
779 BC_F_COPYIN
|BC_F_COPYOUT
},
780 { bridge_ioctl_shostfilter
, sizeof (struct ifbrhostfilter
),
781 BC_F_COPYIN
|BC_F_SUSER
},
784 static const struct bridge_control bridge_control_table64
[] = {
785 { bridge_ioctl_add
, sizeof (struct ifbreq
), /* 0 */
786 BC_F_COPYIN
|BC_F_SUSER
},
787 { bridge_ioctl_del
, sizeof (struct ifbreq
),
788 BC_F_COPYIN
|BC_F_SUSER
},
790 { bridge_ioctl_gifflags
, sizeof (struct ifbreq
),
791 BC_F_COPYIN
|BC_F_COPYOUT
},
792 { bridge_ioctl_sifflags
, sizeof (struct ifbreq
),
793 BC_F_COPYIN
|BC_F_SUSER
},
795 { bridge_ioctl_scache
, sizeof (struct ifbrparam
),
796 BC_F_COPYIN
|BC_F_SUSER
},
797 { bridge_ioctl_gcache
, sizeof (struct ifbrparam
),
800 { bridge_ioctl_gifs64
, sizeof (struct ifbifconf64
),
801 BC_F_COPYIN
|BC_F_COPYOUT
},
802 { bridge_ioctl_rts64
, sizeof (struct ifbaconf64
),
803 BC_F_COPYIN
|BC_F_COPYOUT
},
805 { bridge_ioctl_saddr64
, sizeof (struct ifbareq64
),
806 BC_F_COPYIN
|BC_F_SUSER
},
808 { bridge_ioctl_sto
, sizeof (struct ifbrparam
),
809 BC_F_COPYIN
|BC_F_SUSER
},
810 { bridge_ioctl_gto
, sizeof (struct ifbrparam
), /* 10 */
813 { bridge_ioctl_daddr64
, sizeof (struct ifbareq64
),
814 BC_F_COPYIN
|BC_F_SUSER
},
816 { bridge_ioctl_flush
, sizeof (struct ifbreq
),
817 BC_F_COPYIN
|BC_F_SUSER
},
819 { bridge_ioctl_gpri
, sizeof (struct ifbrparam
),
821 { bridge_ioctl_spri
, sizeof (struct ifbrparam
),
822 BC_F_COPYIN
|BC_F_SUSER
},
824 { bridge_ioctl_ght
, sizeof (struct ifbrparam
),
826 { bridge_ioctl_sht
, sizeof (struct ifbrparam
),
827 BC_F_COPYIN
|BC_F_SUSER
},
829 { bridge_ioctl_gfd
, sizeof (struct ifbrparam
),
831 { bridge_ioctl_sfd
, sizeof (struct ifbrparam
),
832 BC_F_COPYIN
|BC_F_SUSER
},
834 { bridge_ioctl_gma
, sizeof (struct ifbrparam
),
836 { bridge_ioctl_sma
, sizeof (struct ifbrparam
), /* 20 */
837 BC_F_COPYIN
|BC_F_SUSER
},
839 { bridge_ioctl_sifprio
, sizeof (struct ifbreq
),
840 BC_F_COPYIN
|BC_F_SUSER
},
842 { bridge_ioctl_sifcost
, sizeof (struct ifbreq
),
843 BC_F_COPYIN
|BC_F_SUSER
},
845 { bridge_ioctl_gfilt
, sizeof (struct ifbrparam
),
847 { bridge_ioctl_sfilt
, sizeof (struct ifbrparam
),
848 BC_F_COPYIN
|BC_F_SUSER
},
850 { bridge_ioctl_purge
, sizeof (struct ifbreq
),
851 BC_F_COPYIN
|BC_F_SUSER
},
853 { bridge_ioctl_addspan
, sizeof (struct ifbreq
),
854 BC_F_COPYIN
|BC_F_SUSER
},
855 { bridge_ioctl_delspan
, sizeof (struct ifbreq
),
856 BC_F_COPYIN
|BC_F_SUSER
},
858 { bridge_ioctl_gbparam64
, sizeof (struct ifbropreq64
),
861 { bridge_ioctl_grte
, sizeof (struct ifbrparam
),
864 { bridge_ioctl_gifsstp64
, sizeof (struct ifbpstpconf64
), /* 30 */
865 BC_F_COPYIN
|BC_F_COPYOUT
},
867 { bridge_ioctl_sproto
, sizeof (struct ifbrparam
),
868 BC_F_COPYIN
|BC_F_SUSER
},
870 { bridge_ioctl_stxhc
, sizeof (struct ifbrparam
),
871 BC_F_COPYIN
|BC_F_SUSER
},
873 { bridge_ioctl_sifmaxaddr
, sizeof (struct ifbreq
),
874 BC_F_COPYIN
|BC_F_SUSER
},
876 { bridge_ioctl_ghostfilter
, sizeof (struct ifbrhostfilter
),
877 BC_F_COPYIN
|BC_F_COPYOUT
},
878 { bridge_ioctl_shostfilter
, sizeof (struct ifbrhostfilter
),
879 BC_F_COPYIN
|BC_F_SUSER
},
882 static const unsigned int bridge_control_table_size
=
883 sizeof (bridge_control_table32
) / sizeof (bridge_control_table32
[0]);
885 static LIST_HEAD(, bridge_softc
) bridge_list
=
886 LIST_HEAD_INITIALIZER(bridge_list
);
888 static lck_grp_t
*bridge_lock_grp
= NULL
;
889 static lck_attr_t
*bridge_lock_attr
= NULL
;
891 static if_clone_t bridge_cloner
= NULL
;
893 static int if_bridge_txstart
= 0;
894 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, txstart
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
895 &if_bridge_txstart
, 0, "Bridge interface uses TXSTART model");
898 static int if_bridge_debug
= 0;
899 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
900 &if_bridge_debug
, 0, "Bridge debug");
902 static void printf_ether_header(struct ether_header
*);
903 static void printf_mbuf_data(mbuf_t
, size_t, size_t);
904 static void printf_mbuf_pkthdr(mbuf_t
, const char *, const char *);
905 static void printf_mbuf(mbuf_t
, const char *, const char *);
906 static void link_print(struct bridge_softc
* sc
);
908 static void bridge_lock(struct bridge_softc
*);
909 static void bridge_unlock(struct bridge_softc
*);
910 static int bridge_lock2ref(struct bridge_softc
*);
911 static void bridge_unref(struct bridge_softc
*);
912 static void bridge_xlock(struct bridge_softc
*);
913 static void bridge_xdrop(struct bridge_softc
*);
916 bridge_lock(struct bridge_softc
*sc
)
918 void *lr_saved
= __builtin_return_address(0);
920 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
924 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
925 sc
->next_lock_lr
= (sc
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
929 bridge_unlock(struct bridge_softc
*sc
)
931 void *lr_saved
= __builtin_return_address(0);
933 BRIDGE_LOCK_ASSERT_HELD(sc
);
935 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
936 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
942 bridge_lock2ref(struct bridge_softc
*sc
)
945 void *lr_saved
= __builtin_return_address(0);
947 BRIDGE_LOCK_ASSERT_HELD(sc
);
949 if (sc
->sc_iflist_xcnt
> 0)
954 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
955 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
963 bridge_unref(struct bridge_softc
*sc
)
965 void *lr_saved
= __builtin_return_address(0);
967 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
970 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
971 sc
->next_lock_lr
= (sc
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
975 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
976 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
977 if ((sc
->sc_iflist_xcnt
> 0) && (sc
->sc_iflist_ref
== 0)) {
985 bridge_xlock(struct bridge_softc
*sc
)
987 void *lr_saved
= __builtin_return_address(0);
989 BRIDGE_LOCK_ASSERT_HELD(sc
);
991 sc
->sc_iflist_xcnt
++;
992 while (sc
->sc_iflist_ref
> 0) {
993 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
994 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
996 msleep(&sc
->sc_cv
, &sc
->sc_mtx
, PZERO
, "BRIDGE_XLOCK", NULL
);
998 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
999 sc
->next_lock_lr
= (sc
->next_lock_lr
+1) % SO_LCKDBG_MAX
;
1004 bridge_xdrop(struct bridge_softc
*sc
)
1006 BRIDGE_LOCK_ASSERT_HELD(sc
);
1008 sc
->sc_iflist_xcnt
--;
1012 printf_mbuf_pkthdr(mbuf_t m
, const char *prefix
, const char *suffix
)
1015 printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
1016 "nextpkt: 0x%llx%s",
1017 prefix
? prefix
: "", (unsigned int)mbuf_pkthdr_len(m
),
1018 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m
)),
1019 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m
)),
1020 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m
)),
1021 suffix
? suffix
: "");
1023 printf("%s<NULL>%s\n", prefix
, suffix
);
1027 printf_mbuf(mbuf_t m
, const char *prefix
, const char *suffix
)
1030 printf("%s0x%llx type: %u flags: 0x%x len: %u data: 0x%llx "
1031 "maxlen: %u datastart: 0x%llx next: 0x%llx%s",
1032 prefix
? prefix
: "", (uint64_t)VM_KERNEL_ADDRPERM(m
),
1033 mbuf_type(m
), mbuf_flags(m
), (unsigned int)mbuf_len(m
),
1034 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)),
1035 (unsigned int)mbuf_maxlen(m
),
1036 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m
)),
1037 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m
)),
1038 !suffix
|| (mbuf_flags(m
) & MBUF_PKTHDR
) ? "" : suffix
);
1039 if ((mbuf_flags(m
) & MBUF_PKTHDR
))
1040 printf_mbuf_pkthdr(m
, " ", suffix
);
1042 printf("%s<NULL>%s\n", prefix
, suffix
);
1046 printf_mbuf_data(mbuf_t m
, size_t offset
, size_t len
)
1050 size_t pktlen
, mlen
, maxlen
;
1053 pktlen
= mbuf_pkthdr_len(m
);
1055 if (offset
> pktlen
)
1058 maxlen
= (pktlen
- offset
> len
) ? len
: pktlen
- offset
;
1062 for (i
= 0, j
= 0; i
< maxlen
; i
++, j
++) {
1072 printf("%02x%s", ptr
[j
], i
% 2 ? " " : "");
1078 printf_ether_header(struct ether_header
*eh
)
1080 printf("%02x:%02x:%02x:%02x:%02x:%02x > "
1081 "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
1082 eh
->ether_shost
[0], eh
->ether_shost
[1], eh
->ether_shost
[2],
1083 eh
->ether_shost
[3], eh
->ether_shost
[4], eh
->ether_shost
[5],
1084 eh
->ether_dhost
[0], eh
->ether_dhost
[1], eh
->ether_dhost
[2],
1085 eh
->ether_dhost
[3], eh
->ether_dhost
[4], eh
->ether_dhost
[5],
1086 ntohs(eh
->ether_type
));
1090 link_print(struct bridge_softc
* sc
)
1093 uint32_t sdl_buffer
[offsetof(struct sockaddr_dl
, sdl_data
) +
1094 IFNAMSIZ
+ ETHER_ADDR_LEN
];
1095 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)sdl_buffer
;
1097 memset(sdl
, 0, sizeof (sdl_buffer
));
1098 sdl
->sdl_family
= AF_LINK
;
1099 sdl
->sdl_nlen
= strlen(sc
->sc_if_xname
);
1100 sdl
->sdl_alen
= ETHER_ADDR_LEN
;
1101 sdl
->sdl_len
= offsetof(struct sockaddr_dl
, sdl_data
);
1102 memcpy(sdl
->sdl_data
, sc
->sc_if_xname
, sdl
->sdl_nlen
);
1103 memcpy(LLADDR(sdl
), sc
->sc_defaddr
, ETHER_ADDR_LEN
);
1106 printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
1107 " slen %d addr ", sdl
->sdl_len
, sdl
->sdl_index
,
1108 sdl
->sdl_family
, sdl
->sdl_type
, sdl
->sdl_nlen
,
1109 sdl
->sdl_alen
, sdl
->sdl_slen
);
1111 for (i
= 0; i
< sdl
->sdl_alen
; i
++)
1112 printf("%s%x", i
? ":" : "", (CONST_LLADDR(sdl
))[i
]);
1116 #endif /* BRIDGE_DEBUG */
1121 * Pseudo-device attach routine.
1123 __private_extern__
int
1128 lck_grp_attr_t
*lck_grp_attr
= NULL
;
1129 struct ifnet_clone_params ifnet_clone_params
;
1131 bridge_rtnode_pool
= zinit(sizeof (struct bridge_rtnode
),
1132 1024 * sizeof (struct bridge_rtnode
), 0, "bridge_rtnode");
1133 zone_change(bridge_rtnode_pool
, Z_CALLERACCT
, FALSE
);
1135 lck_grp_attr
= lck_grp_attr_alloc_init();
1137 bridge_lock_grp
= lck_grp_alloc_init("if_bridge", lck_grp_attr
);
1139 bridge_lock_attr
= lck_attr_alloc_init();
1142 lck_attr_setdebug(bridge_lock_attr
);
1145 lck_mtx_init(&bridge_list_mtx
, bridge_lock_grp
, bridge_lock_attr
);
1147 /* can free the attributes once we've allocated the group lock */
1148 lck_grp_attr_free(lck_grp_attr
);
1150 LIST_INIT(&bridge_list
);
1154 #endif /* BRIDGESTP */
1156 ifnet_clone_params
.ifc_name
= "bridge";
1157 ifnet_clone_params
.ifc_create
= bridge_clone_create
;
1158 ifnet_clone_params
.ifc_destroy
= bridge_clone_destroy
;
1160 error
= ifnet_clone_attach(&ifnet_clone_params
, &bridge_cloner
);
1162 printf("%s: ifnet_clone_attach failed %d\n", __func__
, error
);
1167 #if defined(PFIL_HOOKS)
1169 * handler for net.link.bridge.pfil_ipfw
1172 sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1174 #pragma unused(arg1, arg2)
1175 int enable
= pfil_ipfw
;
1178 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
1179 enable
= (enable
) ? 1 : 0;
1181 if (enable
!= pfil_ipfw
) {
1185 * Disable pfil so that ipfw doesnt run twice, if the user
1186 * really wants both then they can re-enable pfil_bridge and/or
1187 * pfil_member. Also allow non-ip packets as ipfw can filter by
1200 SYSCTL_PROC(_net_link_bridge
, OID_AUTO
, ipfw
, CTLTYPE_INT
|CTLFLAG_RW
,
1201 &pfil_ipfw
, 0, &sysctl_pfil_ipfw
, "I", "Layer2 filter with IPFW");
1202 #endif /* PFIL_HOOKS */
1205 bridge_ifnet_set_attrs(struct ifnet
* ifp
)
1209 error
= ifnet_set_mtu(ifp
, ETHERMTU
);
1211 printf("%s: ifnet_set_mtu failed %d\n", __func__
, error
);
1214 error
= ifnet_set_addrlen(ifp
, ETHER_ADDR_LEN
);
1216 printf("%s: ifnet_set_addrlen failed %d\n", __func__
, error
);
1219 error
= ifnet_set_hdrlen(ifp
, ETHER_HDR_LEN
);
1221 printf("%s: ifnet_set_hdrlen failed %d\n", __func__
, error
);
1224 error
= ifnet_set_flags(ifp
,
1225 IFF_BROADCAST
| IFF_SIMPLEX
| IFF_NOTRAILERS
| IFF_MULTICAST
,
1229 printf("%s: ifnet_set_flags failed %d\n", __func__
, error
);
1237 * bridge_clone_create:
1239 * Create a new bridge instance.
1242 bridge_clone_create(struct if_clone
*ifc
, uint32_t unit
, void *params
)
1244 #pragma unused(params)
1245 struct ifnet
*ifp
= NULL
;
1246 struct bridge_softc
*sc
, *sc2
;
1247 struct ifnet_init_eparams init_params
;
1249 uint8_t eth_hostid
[ETHER_ADDR_LEN
];
1250 int fb
, retry
, has_hostid
;
1252 sc
= _MALLOC(sizeof (*sc
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
1254 lck_mtx_init(&sc
->sc_mtx
, bridge_lock_grp
, bridge_lock_attr
);
1255 sc
->sc_brtmax
= BRIDGE_RTABLE_MAX
;
1256 sc
->sc_brttimeout
= BRIDGE_RTABLE_TIMEOUT
;
1257 sc
->sc_filter_flags
= IFBF_FILT_DEFAULT
;
1260 * For backwards compatibility with previous behaviour...
1261 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1264 sc
->sc_filter_flags
&= ~IFBF_FILT_USEIPF
;
1267 if (bridge_bsd_mode
!= 0) {
1268 bridge_set_bsd_mode(sc
);
1271 TAILQ_INIT(&sc
->sc_iflist
);
1273 /* use the interface name as the unique id for ifp recycle */
1274 snprintf(sc
->sc_if_xname
, sizeof (sc
->sc_if_xname
), "%s%d",
1275 ifc
->ifc_name
, unit
);
1276 bzero(&init_params
, sizeof (init_params
));
1277 init_params
.ver
= IFNET_INIT_CURRENT_VERSION
;
1278 init_params
.len
= sizeof (init_params
);
1279 if (bridge_in_bsd_mode(sc
)) {
1280 /* Initialize our routing table. */
1281 error
= bridge_rtable_init(sc
);
1283 printf("%s: bridge_rtable_init failed %d\n",
1287 TAILQ_INIT(&sc
->sc_spanlist
);
1288 if (if_bridge_txstart
) {
1289 init_params
.start
= bridge_start
;
1291 init_params
.flags
= IFNET_INIT_LEGACY
;
1292 init_params
.output
= bridge_output
;
1294 init_params
.set_bpf_tap
= bridge_set_bpf_tap
;
1296 init_params
.uniqueid
= sc
->sc_if_xname
;
1297 init_params
.uniqueid_len
= strlen(sc
->sc_if_xname
);
1298 init_params
.sndq_maxlen
= IFQ_MAXLEN
;
1299 init_params
.name
= ifc
->ifc_name
;
1300 init_params
.unit
= unit
;
1301 init_params
.family
= IFNET_FAMILY_ETHERNET
;
1302 init_params
.type
= IFT_BRIDGE
;
1303 init_params
.demux
= ether_demux
;
1304 init_params
.add_proto
= ether_add_proto
;
1305 init_params
.del_proto
= ether_del_proto
;
1306 init_params
.check_multi
= ether_check_multi
;
1307 init_params
.framer_extended
= ether_frameout_extended
;
1308 init_params
.softc
= sc
;
1309 init_params
.ioctl
= bridge_ioctl
;
1310 init_params
.detach
= bridge_detach
;
1311 init_params
.broadcast_addr
= etherbroadcastaddr
;
1312 init_params
.broadcast_len
= ETHER_ADDR_LEN
;
1314 if (bridge_in_bsd_mode(sc
)) {
1315 error
= ifnet_allocate_extended(&init_params
, &ifp
);
1317 printf("%s: ifnet_allocate failed %d\n",
1322 error
= bridge_ifnet_set_attrs(ifp
);
1324 printf("%s: bridge_ifnet_set_attrs failed %d\n",
1331 * Generate an ethernet address with a locally administered address.
1333 * Since we are using random ethernet addresses for the bridge, it is
1334 * possible that we might have address collisions, so make sure that
1335 * this hardware address isn't already in use on another bridge.
1336 * The first try uses the "hostid" and falls back to read_frandom();
1337 * for "hostid", we use the MAC address of the first-encountered
1338 * Ethernet-type interface that is currently configured.
1341 has_hostid
= (uuid_get_ethernet(ð_hostid
[0]) == 0);
1342 for (retry
= 1; retry
!= 0; ) {
1343 if (fb
|| has_hostid
== 0) {
1344 read_frandom(&sc
->sc_defaddr
, ETHER_ADDR_LEN
);
1345 sc
->sc_defaddr
[0] &= ~1; /* clear multicast bit */
1346 sc
->sc_defaddr
[0] |= 2; /* set the LAA bit */
1348 bcopy(ð_hostid
[0], &sc
->sc_defaddr
,
1350 sc
->sc_defaddr
[0] &= ~1; /* clear multicast bit */
1351 sc
->sc_defaddr
[0] |= 2; /* set the LAA bit */
1352 sc
->sc_defaddr
[3] = /* stir it up a bit */
1353 ((sc
->sc_defaddr
[3] & 0x0f) << 4) |
1354 ((sc
->sc_defaddr
[3] & 0xf0) >> 4);
1356 * Mix in the LSB as it's actually pretty significant,
1357 * see rdar://14076061
1360 (((sc
->sc_defaddr
[4] & 0x0f) << 4) |
1361 ((sc
->sc_defaddr
[4] & 0xf0) >> 4)) ^
1363 sc
->sc_defaddr
[5] = ifp
->if_unit
& 0xff;
1368 lck_mtx_lock(&bridge_list_mtx
);
1369 LIST_FOREACH(sc2
, &bridge_list
, sc_list
) {
1370 if (memcmp(sc
->sc_defaddr
,
1371 IF_LLADDR(sc2
->sc_ifp
), ETHER_ADDR_LEN
) == 0)
1374 lck_mtx_unlock(&bridge_list_mtx
);
1377 sc
->sc_flags
&= ~SCF_MEDIA_ACTIVE
;
1380 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
1383 if (bridge_in_bsd_mode(sc
)) {
1384 error
= ifnet_attach(ifp
, NULL
);
1386 printf("%s: ifnet_attach failed %d\n", __func__
, error
);
1391 error
= ifnet_set_lladdr_and_type(ifp
, sc
->sc_defaddr
, ETHER_ADDR_LEN
,
1394 printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__
,
1399 if (bridge_in_bsd_mode(sc
)) {
1400 ifnet_set_offload(ifp
,
1401 IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
|
1402 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
| IFNET_MULTIPAGES
);
1403 error
= bridge_set_tso(sc
);
1405 printf("%s: bridge_set_tso failed %d\n",
1410 bstp_attach(&sc
->sc_stp
, &bridge_ops
);
1411 #endif /* BRIDGESTP */
1414 lck_mtx_lock(&bridge_list_mtx
);
1415 LIST_INSERT_HEAD(&bridge_list
, sc
, sc_list
);
1416 lck_mtx_unlock(&bridge_list_mtx
);
1418 /* attach as ethernet */
1419 error
= bpf_attach(ifp
, DLT_EN10MB
, sizeof (struct ether_header
),
1424 printf("%s failed error %d\n", __func__
, error
);
1432 * bridge_clone_destroy:
1434 * Destroy a bridge instance.
1437 bridge_clone_destroy(struct ifnet
*ifp
)
1439 struct bridge_softc
*sc
= ifp
->if_softc
;
1440 struct bridge_iflist
*bif
;
1444 if ((sc
->sc_flags
& SCF_DETACHING
)) {
1448 sc
->sc_flags
|= SCF_DETACHING
;
1450 bridge_ifstop(ifp
, 1);
1452 if (bridge_in_bsd_mode(sc
)) {
1453 bridge_cancel_delayed_call(&sc
->sc_resize_call
);
1455 bridge_cleanup_delayed_call(&sc
->sc_resize_call
);
1456 bridge_cleanup_delayed_call(&sc
->sc_aging_timer
);
1459 error
= ifnet_set_flags(ifp
, 0, IFF_UP
);
1461 printf("%s: ifnet_set_flags failed %d\n", __func__
, error
);
1464 while ((bif
= TAILQ_FIRST(&sc
->sc_iflist
)) != NULL
)
1465 bridge_delete_member(sc
, bif
, 0);
1467 if (bridge_in_bsd_mode(sc
)) {
1468 while ((bif
= TAILQ_FIRST(&sc
->sc_spanlist
)) != NULL
) {
1469 bridge_delete_span(sc
, bif
);
1474 error
= ifnet_detach(ifp
);
1476 panic("%s: ifnet_detach(%p) failed %d\n",
1477 __func__
, ifp
, error
);
1482 #define DRVSPEC do { \
1483 if (ifd->ifd_cmd >= bridge_control_table_size) { \
1487 bc = &bridge_control_table[ifd->ifd_cmd]; \
1489 if (cmd == SIOCGDRVSPEC && \
1490 (bc->bc_flags & BC_F_COPYOUT) == 0) { \
1493 } else if (cmd == SIOCSDRVSPEC && \
1494 (bc->bc_flags & BC_F_COPYOUT) != 0) { \
1499 if (bc->bc_flags & BC_F_SUSER) { \
1500 error = kauth_authorize_generic(kauth_cred_get(), \
1501 KAUTH_GENERIC_ISSUSER); \
1506 if (ifd->ifd_len != bc->bc_argsize || \
1507 ifd->ifd_len > sizeof (args)) { \
1512 bzero(&args, sizeof (args)); \
1513 if (bc->bc_flags & BC_F_COPYIN) { \
1514 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
1520 error = (*bc->bc_func)(sc, &args); \
1521 BRIDGE_UNLOCK(sc); \
1525 if (bc->bc_flags & BC_F_COPYOUT) \
1526 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
1532 * Handle a control request from the operator.
1535 bridge_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
1537 struct bridge_softc
*sc
= ifp
->if_softc
;
1538 struct ifreq
*ifr
= (struct ifreq
*)data
;
1539 struct bridge_iflist
*bif
;
1542 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1545 if (if_bridge_debug
& BR_DBGF_IOCTL
)
1546 printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1547 __func__
, ifp
->if_xname
, cmd
, (cmd
& IOC_IN
) ? 'I' : ' ',
1548 (cmd
& IOC_OUT
) ? 'O' : ' ', IOCPARM_LEN(cmd
),
1549 (char)IOCGROUP(cmd
), cmd
& 0xff);
1550 #endif /* BRIDGE_DEBUG */
1556 ifnet_set_flags(ifp
, IFF_UP
, IFF_UP
);
1559 case SIOCGIFMEDIA32
:
1560 case SIOCGIFMEDIA64
: {
1561 struct ifmediareq
*ifmr
= (struct ifmediareq
*)data
;
1562 user_addr_t user_addr
;
1564 user_addr
= (cmd
== SIOCGIFMEDIA64
) ?
1565 ((struct ifmediareq64
*)ifmr
)->ifmu_ulist
:
1566 CAST_USER_ADDR_T(((struct ifmediareq32
*)ifmr
)->ifmu_ulist
);
1568 ifmr
->ifm_status
= IFM_AVALID
;
1570 ifmr
->ifm_count
= 1;
1573 if (!(sc
->sc_flags
& SCF_DETACHING
) &&
1574 (sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
1575 ifmr
->ifm_status
|= IFM_ACTIVE
;
1576 ifmr
->ifm_active
= ifmr
->ifm_current
=
1577 IFM_ETHER
| IFM_AUTO
;
1579 ifmr
->ifm_active
= ifmr
->ifm_current
= IFM_NONE
;
1583 if (user_addr
!= USER_ADDR_NULL
) {
1584 error
= copyout(&ifmr
->ifm_current
, user_addr
,
1594 case SIOCSDRVSPEC32
:
1595 case SIOCGDRVSPEC32
: {
1597 struct ifbreq ifbreq
;
1598 struct ifbifconf32 ifbifconf
;
1599 struct ifbareq32 ifbareq
;
1600 struct ifbaconf32 ifbaconf
;
1601 struct ifbrparam ifbrparam
;
1602 struct ifbropreq32 ifbropreq
;
1604 struct ifdrv32
*ifd
= (struct ifdrv32
*)data
;
1605 const struct bridge_control
*bridge_control_table
=
1606 bridge_control_table32
, *bc
;
1612 case SIOCSDRVSPEC64
:
1613 case SIOCGDRVSPEC64
: {
1615 struct ifbreq ifbreq
;
1616 struct ifbifconf64 ifbifconf
;
1617 struct ifbareq64 ifbareq
;
1618 struct ifbaconf64 ifbaconf
;
1619 struct ifbrparam ifbrparam
;
1620 struct ifbropreq64 ifbropreq
;
1622 struct ifdrv64
*ifd
= (struct ifdrv64
*)data
;
1623 const struct bridge_control
*bridge_control_table
=
1624 bridge_control_table64
, *bc
;
1632 if (!(ifp
->if_flags
& IFF_UP
) &&
1633 (ifp
->if_flags
& IFF_RUNNING
)) {
1635 * If interface is marked down and it is running,
1636 * then stop and disable it.
1639 bridge_ifstop(ifp
, 1);
1641 } else if ((ifp
->if_flags
& IFF_UP
) &&
1642 !(ifp
->if_flags
& IFF_RUNNING
)) {
1644 * If interface is marked up and it is stopped, then
1648 error
= bridge_init(ifp
);
1654 error
= ifnet_set_lladdr(ifp
, ifr
->ifr_addr
.sa_data
,
1655 ifr
->ifr_addr
.sa_len
);
1657 printf("%s: SIOCSIFLLADDR error %d\n", ifp
->if_xname
,
1662 if (ifr
->ifr_mtu
< 576) {
1667 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
1668 sc
->sc_ifp
->if_mtu
= ifr
->ifr_mtu
;
1672 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1673 if (bif
->bif_ifp
->if_mtu
!= (unsigned)ifr
->ifr_mtu
) {
1674 printf("%s: invalid MTU: %u(%s) != %d\n",
1675 sc
->sc_ifp
->if_xname
,
1676 bif
->bif_ifp
->if_mtu
,
1677 bif
->bif_ifp
->if_xname
, ifr
->ifr_mtu
);
1683 sc
->sc_ifp
->if_mtu
= ifr
->ifr_mtu
;
1688 error
= ether_ioctl(ifp
, cmd
, data
);
1690 if (error
!= 0 && error
!= EOPNOTSUPP
)
1691 printf("%s: ifp %s cmd 0x%08lx "
1692 "(%c%c [%lu] %c %lu) failed error: %d\n",
1693 __func__
, ifp
->if_xname
, cmd
,
1694 (cmd
& IOC_IN
) ? 'I' : ' ',
1695 (cmd
& IOC_OUT
) ? 'O' : ' ',
1696 IOCPARM_LEN(cmd
), (char)IOCGROUP(cmd
),
1698 #endif /* BRIDGE_DEBUG */
1701 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1710 * Clear or restore unwanted capabilities on the member interface
1713 bridge_mutecaps(struct bridge_softc
*sc
)
1715 struct bridge_iflist
*bif
;
1718 /* Initial bitmask of capabilities to test */
1719 mask
= BRIDGE_IFCAPS_MASK
;
1721 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1722 /* Every member must support it or its disabled */
1723 mask
&= bif
->bif_savedcaps
;
1726 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1727 enabled
= bif
->bif_ifp
->if_capenable
;
1728 enabled
&= ~BRIDGE_IFCAPS_STRIP
;
1729 /* strip off mask bits and enable them again if allowed */
1730 enabled
&= ~BRIDGE_IFCAPS_MASK
;
1733 bridge_set_ifcap(sc
, bif
, enabled
);
1739 bridge_set_ifcap(struct bridge_softc
*sc
, struct bridge_iflist
*bif
, int set
)
1741 struct ifnet
*ifp
= bif
->bif_ifp
;
1745 bzero(&ifr
, sizeof (ifr
));
1746 ifr
.ifr_reqcap
= set
;
1748 if (ifp
->if_capenable
!= set
) {
1750 error
= (*ifp
->if_ioctl
)(ifp
, SIOCSIFCAP
, (caddr_t
)&ifr
);
1751 IFF_UNLOCKGIANT(ifp
);
1753 printf("%s: %s error setting interface capabilities "
1754 "on %s\n", __func__
, sc
->sc_ifp
->if_xname
,
1758 #endif /* HAS_IF_CAP */
1761 bridge_set_tso(struct bridge_softc
*sc
)
1763 struct bridge_iflist
*bif
;
1764 u_int32_t tso_v4_mtu
;
1765 u_int32_t tso_v6_mtu
;
1766 ifnet_offload_t offload
;
1769 /* By default, support TSO */
1770 offload
= sc
->sc_ifp
->if_hwassist
| IFNET_TSO_IPV4
| IFNET_TSO_IPV6
;
1771 tso_v4_mtu
= IP_MAXPACKET
;
1772 tso_v6_mtu
= IP_MAXPACKET
;
1774 /* Use the lowest common denominator of the members */
1775 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1776 ifnet_t ifp
= bif
->bif_ifp
;
1781 if (offload
& IFNET_TSO_IPV4
) {
1782 if (ifp
->if_hwassist
& IFNET_TSO_IPV4
) {
1783 if (tso_v4_mtu
> ifp
->if_tso_v4_mtu
)
1784 tso_v4_mtu
= ifp
->if_tso_v4_mtu
;
1786 offload
&= ~IFNET_TSO_IPV4
;
1790 if (offload
& IFNET_TSO_IPV6
) {
1791 if (ifp
->if_hwassist
& IFNET_TSO_IPV6
) {
1792 if (tso_v6_mtu
> ifp
->if_tso_v6_mtu
)
1793 tso_v6_mtu
= ifp
->if_tso_v6_mtu
;
1795 offload
&= ~IFNET_TSO_IPV6
;
1801 if (offload
!= sc
->sc_ifp
->if_hwassist
) {
1802 error
= ifnet_set_offload(sc
->sc_ifp
, offload
);
1805 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
1806 printf("%s: ifnet_set_offload(%s, 0x%x) "
1807 "failed %d\n", __func__
,
1808 sc
->sc_ifp
->if_xname
, offload
, error
);
1809 #endif /* BRIDGE_DEBUG */
1813 * For ifnet_set_tso_mtu() sake, the TSO MTU must be at least
1814 * as large as the interface MTU
1816 if (sc
->sc_ifp
->if_hwassist
& IFNET_TSO_IPV4
) {
1817 if (tso_v4_mtu
< sc
->sc_ifp
->if_mtu
)
1818 tso_v4_mtu
= sc
->sc_ifp
->if_mtu
;
1819 error
= ifnet_set_tso_mtu(sc
->sc_ifp
, AF_INET
,
1823 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
1824 printf("%s: ifnet_set_tso_mtu(%s, "
1825 "AF_INET, %u) failed %d\n",
1826 __func__
, sc
->sc_ifp
->if_xname
,
1828 #endif /* BRIDGE_DEBUG */
1832 if (sc
->sc_ifp
->if_hwassist
& IFNET_TSO_IPV6
) {
1833 if (tso_v6_mtu
< sc
->sc_ifp
->if_mtu
)
1834 tso_v6_mtu
= sc
->sc_ifp
->if_mtu
;
1835 error
= ifnet_set_tso_mtu(sc
->sc_ifp
, AF_INET6
,
1839 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
1840 printf("%s: ifnet_set_tso_mtu(%s, "
1841 "AF_INET6, %u) failed %d\n",
1842 __func__
, sc
->sc_ifp
->if_xname
,
1844 #endif /* BRIDGE_DEBUG */
1854 * bridge_lookup_member:
1856 * Lookup a bridge member interface.
1858 static struct bridge_iflist
*
1859 bridge_lookup_member(struct bridge_softc
*sc
, const char *name
)
1861 struct bridge_iflist
*bif
;
1864 BRIDGE_LOCK_ASSERT_HELD(sc
);
1866 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1868 if (strcmp(ifp
->if_xname
, name
) == 0)
1876 * bridge_lookup_member_if:
1878 * Lookup a bridge member interface by ifnet*.
1880 static struct bridge_iflist
*
1881 bridge_lookup_member_if(struct bridge_softc
*sc
, struct ifnet
*member_ifp
)
1883 struct bridge_iflist
*bif
;
1885 BRIDGE_LOCK_ASSERT_HELD(sc
);
1887 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1888 if (bif
->bif_ifp
== member_ifp
)
1896 bridge_iff_input(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
1897 mbuf_t
*data
, char **frame_ptr
)
1899 #pragma unused(protocol)
1901 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
1902 struct bridge_softc
*sc
= bif
->bif_sc
;
1907 if ((m
->m_flags
& M_PROTO1
))
1910 if (*frame_ptr
>= (char *)mbuf_datastart(m
) &&
1911 *frame_ptr
<= (char *)mbuf_data(m
)) {
1913 frmlen
= (char *)mbuf_data(m
) - *frame_ptr
;
1916 if (if_bridge_debug
& BR_DBGF_INPUT
) {
1917 printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
1918 "frmlen %lu\n", __func__
, sc
->sc_ifp
->if_xname
,
1919 ifp
->if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(m
),
1920 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)),
1921 (uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr
),
1922 included
? "inside" : "outside", frmlen
);
1924 if (if_bridge_debug
& BR_DBGF_MBUF
) {
1925 printf_mbuf(m
, "bridge_iff_input[", "\n");
1926 printf_ether_header((struct ether_header
*)
1927 (void *)*frame_ptr
);
1928 printf_mbuf_data(m
, 0, 20);
1932 #endif /* BRIDGE_DEBUG */
1934 /* Move data pointer to start of frame to the link layer header */
1936 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) - frmlen
,
1937 mbuf_len(m
) + frmlen
);
1938 (void) mbuf_pkthdr_adjustlen(m
, frmlen
);
1940 printf("%s: frame_ptr outside mbuf\n", __func__
);
1944 error
= bridge_input(ifp
, m
, *frame_ptr
);
1946 /* Adjust packet back to original */
1948 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) + frmlen
,
1949 mbuf_len(m
) - frmlen
);
1950 (void) mbuf_pkthdr_adjustlen(m
, -frmlen
);
1953 if ((if_bridge_debug
& BR_DBGF_INPUT
) &&
1954 (if_bridge_debug
& BR_DBGF_MBUF
)) {
1956 printf_mbuf(m
, "bridge_iff_input]", "\n");
1958 #endif /* BRIDGE_DEBUG */
1961 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1966 #if BRIDGE_MEMBER_OUT_FILTER
1968 bridge_iff_output(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
1971 #pragma unused(protocol)
1973 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
1974 struct bridge_softc
*sc
= bif
->bif_sc
;
1977 if ((m
->m_flags
& M_PROTO1
))
1981 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
1982 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__
,
1983 sc
->sc_ifp
->if_xname
, ifp
->if_xname
,
1984 (uint64_t)VM_KERNEL_ADDRPERM(m
),
1985 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)));
1987 #endif /* BRIDGE_DEBUG */
1989 error
= bridge_member_output(sc
, ifp
, m
);
1991 printf("%s: bridge_member_output failed error %d\n", __func__
,
1996 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
2000 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2003 bridge_iff_event(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
2004 const struct kev_msg
*event_msg
)
2006 #pragma unused(protocol)
2007 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
2008 struct bridge_softc
*sc
= bif
->bif_sc
;
2010 if (event_msg
->vendor_code
== KEV_VENDOR_APPLE
&&
2011 event_msg
->kev_class
== KEV_NETWORK_CLASS
&&
2012 event_msg
->kev_subclass
== KEV_DL_SUBCLASS
) {
2014 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
2015 printf("%s: %s event_code %u - %s\n", __func__
,
2016 ifp
->if_xname
, event_msg
->event_code
,
2017 dlil_kev_dl_code_str(event_msg
->event_code
));
2018 #endif /* BRIDGE_DEBUG */
2020 switch (event_msg
->event_code
) {
2021 case KEV_DL_IF_DETACHING
:
2022 case KEV_DL_IF_DETACHED
: {
2023 bridge_ifdetach(bif
, ifp
);
2026 case KEV_DL_LINK_OFF
:
2027 case KEV_DL_LINK_ON
: {
2028 bridge_iflinkevent(ifp
);
2030 bstp_linkstate(ifp
, event_msg
->event_code
);
2031 #endif /* BRIDGESTP */
2034 case KEV_DL_SIFFLAGS
: {
2035 if ((bif
->bif_flags
& BIFF_PROMISC
) == 0 &&
2036 (ifp
->if_flags
& IFF_UP
)) {
2039 error
= ifnet_set_promiscuous(ifp
, 1);
2042 "ifnet_set_promiscuous (%s)"
2044 __func__
, ifp
->if_xname
,
2047 bif
->bif_flags
|= BIFF_PROMISC
;
2052 case KEV_DL_IFCAP_CHANGED
: {
2065 * bridge_iff_detached:
2067 * Detach an interface from a bridge. Called when a member
2068 * interface is detaching.
2071 bridge_iff_detached(void *cookie
, ifnet_t ifp
)
2073 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
2076 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
2077 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2078 #endif /* BRIDGE_DEBUG */
2080 bridge_ifdetach(bif
, ifp
);
2082 _FREE(bif
, M_DEVBUF
);
2086 bridge_proto_input(ifnet_t ifp
, protocol_family_t protocol
, mbuf_t packet
,
2089 #pragma unused(protocol, packet, header)
2091 printf("%s: unexpected packet from %s\n", __func__
,
2093 #endif /* BRIDGE_DEBUG */
2098 bridge_attach_protocol(struct ifnet
*ifp
)
2101 struct ifnet_attach_proto_param reg
;
2104 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
2105 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2106 #endif /* BRIDGE_DEBUG */
2108 bzero(®
, sizeof (reg
));
2109 reg
.input
= bridge_proto_input
;
2111 error
= ifnet_attach_protocol(ifp
, PF_BRIDGE
, ®
);
2113 printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
2114 __func__
, ifp
->if_xname
, error
);
2120 bridge_detach_protocol(struct ifnet
*ifp
)
2125 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
2126 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2127 #endif /* BRIDGE_DEBUG */
2128 error
= ifnet_detach_protocol(ifp
, PF_BRIDGE
);
2130 printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
2131 __func__
, ifp
->if_xname
, error
);
2137 * bridge_delete_member:
2139 * Delete the specified member interface.
2142 bridge_delete_member(struct bridge_softc
*sc
, struct bridge_iflist
*bif
,
2145 struct ifnet
*ifs
= bif
->bif_ifp
, *bifp
= sc
->sc_ifp
;
2146 int lladdr_changed
= 0, error
, filt_attached
;
2147 uint8_t eaddr
[ETHER_ADDR_LEN
];
2148 u_int32_t event_code
= 0;
2151 BRIDGE_LOCK_ASSERT_HELD(sc
);
2152 VERIFY(ifs
!= NULL
);
2154 bsd_mode
= bridge_in_bsd_mode(sc
);
2157 * First, remove the member from the list first so it cannot be found anymore
2158 * when we release the bridge lock below
2161 TAILQ_REMOVE(&sc
->sc_iflist
, bif
, bif_next
);
2165 switch (ifs
->if_type
) {
2169 * Take the interface out of promiscuous mode.
2171 if (bif
->bif_flags
& BIFF_PROMISC
) {
2173 * Unlock to prevent deadlock with bridge_iff_event() in
2174 * case the driver generates an interface event
2177 (void) ifnet_set_promiscuous(ifs
, 0);
2183 /* currently not supported */
2191 /* reneable any interface capabilities */
2192 bridge_set_ifcap(sc
, bif
, bif
->bif_savedcaps
);
2196 if (bif
->bif_flags
& BIFF_PROTO_ATTACHED
) {
2197 /* Respect lock ordering with DLIL lock */
2199 (void) bridge_detach_protocol(ifs
);
2203 if (bsd_mode
&& (bif
->bif_ifflags
& IFBIF_STP
) != 0) {
2204 bstp_disable(&bif
->bif_stp
);
2206 #endif /* BRIDGESTP */
2209 * If removing the interface that gave the bridge its mac address, set
2210 * the mac address of the bridge to the address of the next member, or
2211 * to its default address if no members are left.
2213 if (bridge_inherit_mac
&& sc
->sc_ifaddr
== ifs
) {
2214 ifnet_release(sc
->sc_ifaddr
);
2215 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
2216 bcopy(sc
->sc_defaddr
, eaddr
, ETHER_ADDR_LEN
);
2217 sc
->sc_ifaddr
= NULL
;
2220 TAILQ_FIRST(&sc
->sc_iflist
)->bif_ifp
;
2221 bcopy(IF_LLADDR(fif
), eaddr
, ETHER_ADDR_LEN
);
2222 sc
->sc_ifaddr
= fif
;
2223 ifnet_reference(fif
); /* for sc_ifaddr */
2229 bridge_mutecaps(sc
); /* recalculate now this interface is removed */
2230 #endif /* HAS_IF_CAP */
2232 error
= bridge_set_tso(sc
);
2234 printf("%s: bridge_set_tso failed %d\n", __func__
, error
);
2238 bridge_rtdelete(sc
, ifs
, IFBF_FLUSHALL
);
2241 KASSERT(bif
->bif_addrcnt
== 0,
2242 ("%s: %d bridge routes referenced", __func__
, bif
->bif_addrcnt
));
2244 filt_attached
= bif
->bif_flags
& BIFF_FILTER_ATTACHED
;
2247 * Update link status of the bridge based on its remaining members
2249 event_code
= bridge_updatelinkstatus(sc
);
2255 if (lladdr_changed
&&
2256 (error
= ifnet_set_lladdr(bifp
, eaddr
, ETHER_ADDR_LEN
)) != 0)
2257 printf("%s: ifnet_set_lladdr failed %d\n", __func__
, error
);
2259 if (event_code
!= 0)
2260 bridge_link_event(bifp
, event_code
);
2264 bstp_destroy(&bif
->bif_stp
); /* prepare to free */
2266 #endif /* BRIDGESTP */
2269 iflt_detach(bif
->bif_iff_ref
);
2271 _FREE(bif
, M_DEVBUF
);
2273 ifs
->if_bridge
= NULL
;
2280 * bridge_delete_span:
2282 * Delete the specified span interface.
2285 bridge_delete_span(struct bridge_softc
*sc
, struct bridge_iflist
*bif
)
2287 BRIDGE_LOCK_ASSERT_HELD(sc
);
2289 KASSERT(bif
->bif_ifp
->if_bridge
== NULL
,
2290 ("%s: not a span interface", __func__
));
2292 ifnet_release(bif
->bif_ifp
);
2294 TAILQ_REMOVE(&sc
->sc_spanlist
, bif
, bif_next
);
2295 _FREE(bif
, M_DEVBUF
);
2299 bridge_ioctl_add(struct bridge_softc
*sc
, void *arg
)
2301 struct ifbreq
*req
= arg
;
2302 struct bridge_iflist
*bif
= NULL
;
2303 struct ifnet
*ifs
, *bifp
= sc
->sc_ifp
;
2304 int error
= 0, lladdr_changed
= 0;
2305 uint8_t eaddr
[ETHER_ADDR_LEN
];
2306 struct iff_filter iff
;
2307 u_int32_t event_code
= 0;
2308 boolean_t bsd_mode
= bridge_in_bsd_mode(sc
);
2310 ifs
= ifunit(req
->ifbr_ifsname
);
2313 if (ifs
->if_ioctl
== NULL
) /* must be supported */
2316 if (IFNET_IS_INTCOPROC(ifs
)) {
2321 /* If it's in the span list, it can't be a member. */
2322 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
2323 if (ifs
== bif
->bif_ifp
)
2327 if (ifs
->if_bridge
== sc
)
2330 if (ifs
->if_bridge
!= NULL
)
2333 switch (ifs
->if_type
) {
2336 /* permitted interface types */
2339 /* currently not supported */
2345 bif
= _MALLOC(sizeof (*bif
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
2350 ifnet_reference(ifs
);
2351 bif
->bif_ifflags
= IFBIF_LEARNING
| IFBIF_DISCOVER
;
2353 bif
->bif_savedcaps
= ifs
->if_capenable
;
2354 #endif /* HAS_IF_CAP */
2357 /* Allow the first Ethernet member to define the MTU */
2358 if (TAILQ_EMPTY(&sc
->sc_iflist
))
2359 sc
->sc_ifp
->if_mtu
= ifs
->if_mtu
;
2360 else if (sc
->sc_ifp
->if_mtu
!= ifs
->if_mtu
) {
2361 printf("%s: %s: invalid MTU for %s", __func__
,
2362 sc
->sc_ifp
->if_xname
,
2368 * Assign the interface's MAC address to the bridge if it's the first
2369 * member and the MAC address of the bridge has not been changed from
2370 * the default (randomly) generated one.
2372 if (bridge_inherit_mac
&& TAILQ_EMPTY(&sc
->sc_iflist
) &&
2373 !memcmp(IF_LLADDR(sc
->sc_ifp
), sc
->sc_defaddr
, ETHER_ADDR_LEN
)) {
2374 bcopy(IF_LLADDR(ifs
), eaddr
, ETHER_ADDR_LEN
);
2375 sc
->sc_ifaddr
= ifs
;
2376 ifnet_reference(ifs
); /* for sc_ifaddr */
2380 ifs
->if_bridge
= sc
;
2383 bstp_create(&sc
->sc_stp
, &bif
->bif_stp
, bif
->bif_ifp
);
2385 #endif /* BRIDGESTP */
2388 * XXX: XLOCK HERE!?!
2390 TAILQ_INSERT_TAIL(&sc
->sc_iflist
, bif
, bif_next
);
2393 /* Set interface capabilities to the intersection set of all members */
2394 bridge_mutecaps(sc
);
2395 #endif /* HAS_IF_CAP */
2401 * Place the interface into promiscuous mode.
2403 switch (ifs
->if_type
) {
2406 error
= ifnet_set_promiscuous(ifs
, 1);
2408 /* Ignore error when device is not up */
2409 if (error
!= ENETDOWN
)
2413 bif
->bif_flags
|= BIFF_PROMISC
;
2422 * The new member may change the link status of the bridge interface
2424 if (interface_media_active(ifs
))
2425 bif
->bif_flags
|= BIFF_MEDIA_ACTIVE
;
2427 bif
->bif_flags
&= ~BIFF_MEDIA_ACTIVE
;
2429 event_code
= bridge_updatelinkstatus(sc
);
2432 * Respect lock ordering with DLIL lock for the following operations
2439 * install an interface filter
2441 memset(&iff
, 0, sizeof (struct iff_filter
));
2442 iff
.iff_cookie
= bif
;
2443 iff
.iff_name
= "com.apple.kernel.bsd.net.if_bridge";
2445 iff
.iff_input
= bridge_iff_input
;
2446 #if BRIDGE_MEMBER_OUT_FILTER
2447 iff
.iff_output
= bridge_iff_output
;
2448 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2450 iff
.iff_event
= bridge_iff_event
;
2451 iff
.iff_detached
= bridge_iff_detached
;
2452 error
= dlil_attach_filter(ifs
, &iff
, &bif
->bif_iff_ref
,
2453 DLIL_IFF_TSO
| DLIL_IFF_INTERNAL
);
2455 printf("%s: iflt_attach failed %d\n", __func__
, error
);
2459 bif
->bif_flags
|= BIFF_FILTER_ATTACHED
;
2462 * install an dummy "bridge" protocol
2464 if ((error
= bridge_attach_protocol(ifs
)) != 0) {
2466 printf("%s: bridge_attach_protocol failed %d\n",
2472 bif
->bif_flags
|= BIFF_PROTO_ATTACHED
;
2474 if (lladdr_changed
&&
2475 (error
= ifnet_set_lladdr(bifp
, eaddr
, ETHER_ADDR_LEN
)) != 0)
2476 printf("%s: ifnet_set_lladdr failed %d\n", __func__
, error
);
2478 if (event_code
!= 0)
2479 bridge_link_event(bifp
, event_code
);
2484 if (error
&& bif
!= NULL
)
2485 bridge_delete_member(sc
, bif
, 1);
2491 bridge_ioctl_del(struct bridge_softc
*sc
, void *arg
)
2493 struct ifbreq
*req
= arg
;
2494 struct bridge_iflist
*bif
;
2496 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2500 bridge_delete_member(sc
, bif
, 0);
2506 bridge_ioctl_purge(struct bridge_softc
*sc
, void *arg
)
2508 #pragma unused(sc, arg)
2513 bridge_ioctl_gifflags(struct bridge_softc
*sc
, void *arg
)
2515 struct ifbreq
*req
= arg
;
2516 struct bridge_iflist
*bif
;
2518 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2522 if (bridge_in_bsd_mode(sc
)) {
2523 struct bstp_port
*bp
;
2526 req
->ifbr_state
= bp
->bp_state
;
2527 req
->ifbr_priority
= bp
->bp_priority
;
2528 req
->ifbr_path_cost
= bp
->bp_path_cost
;
2529 req
->ifbr_proto
= bp
->bp_protover
;
2530 req
->ifbr_role
= bp
->bp_role
;
2531 req
->ifbr_stpflags
= bp
->bp_flags
;
2532 /* Copy STP state options as flags */
2533 if (bp
->bp_operedge
)
2534 req
->ifbr_ifsflags
|= IFBIF_BSTP_EDGE
;
2535 if (bp
->bp_flags
& BSTP_PORT_AUTOEDGE
)
2536 req
->ifbr_ifsflags
|= IFBIF_BSTP_AUTOEDGE
;
2537 if (bp
->bp_ptp_link
)
2538 req
->ifbr_ifsflags
|= IFBIF_BSTP_PTP
;
2539 if (bp
->bp_flags
& BSTP_PORT_AUTOPTP
)
2540 req
->ifbr_ifsflags
|= IFBIF_BSTP_AUTOPTP
;
2541 if (bp
->bp_flags
& BSTP_PORT_ADMEDGE
)
2542 req
->ifbr_ifsflags
|= IFBIF_BSTP_ADMEDGE
;
2543 if (bp
->bp_flags
& BSTP_PORT_ADMCOST
)
2544 req
->ifbr_ifsflags
|= IFBIF_BSTP_ADMCOST
;
2546 req
->ifbr_ifsflags
= bif
->bif_ifflags
;
2547 req
->ifbr_portno
= bif
->bif_ifp
->if_index
& 0xfff;
2548 req
->ifbr_addrcnt
= bif
->bif_addrcnt
;
2549 req
->ifbr_addrmax
= bif
->bif_addrmax
;
2550 req
->ifbr_addrexceeded
= bif
->bif_addrexceeded
;
2556 bridge_ioctl_sifflags(struct bridge_softc
*sc
, void *arg
)
2558 struct ifbreq
*req
= arg
;
2559 struct bridge_iflist
*bif
;
2561 struct bstp_port
*bp
;
2563 #endif /* BRIDGESTP */
2565 if (!bridge_in_bsd_mode(sc
)) {
2569 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2573 if (req
->ifbr_ifsflags
& IFBIF_SPAN
)
2574 /* SPAN is readonly */
2579 if (req
->ifbr_ifsflags
& IFBIF_STP
) {
2580 if ((bif
->bif_ifflags
& IFBIF_STP
) == 0) {
2581 error
= bstp_enable(&bif
->bif_stp
);
2586 if ((bif
->bif_ifflags
& IFBIF_STP
) != 0)
2587 bstp_disable(&bif
->bif_stp
);
2590 /* Pass on STP flags */
2592 bstp_set_edge(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_EDGE
? 1 : 0);
2593 bstp_set_autoedge(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_AUTOEDGE
? 1 : 0);
2594 bstp_set_ptp(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_PTP
? 1 : 0);
2595 bstp_set_autoptp(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_AUTOPTP
? 1 : 0);
2596 #else /* !BRIDGESTP */
2597 if (req
->ifbr_ifsflags
& IFBIF_STP
)
2598 return (EOPNOTSUPP
);
2599 #endif /* !BRIDGESTP */
2601 /* Save the bits relating to the bridge */
2602 bif
->bif_ifflags
= req
->ifbr_ifsflags
& IFBIFMASK
;
2609 bridge_ioctl_scache(struct bridge_softc
*sc
, void *arg
)
2611 struct ifbrparam
*param
= arg
;
2613 sc
->sc_brtmax
= param
->ifbrp_csize
;
2614 if (bridge_in_bsd_mode(sc
)) {
2621 bridge_ioctl_gcache(struct bridge_softc
*sc
, void *arg
)
2623 struct ifbrparam
*param
= arg
;
2625 param
->ifbrp_csize
= sc
->sc_brtmax
;
2630 #define BRIDGE_IOCTL_GIFS do { \
2631 struct bridge_iflist *bif; \
2632 struct ifbreq breq; \
2633 char *buf, *outbuf; \
2634 unsigned int count, buflen, len; \
2637 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
2639 if (bridge_in_bsd_mode(sc)) { \
2640 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
2644 buflen = sizeof (breq) * count; \
2645 if (bifc->ifbic_len == 0) { \
2646 bifc->ifbic_len = buflen; \
2649 BRIDGE_UNLOCK(sc); \
2650 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2655 len = min(bifc->ifbic_len, buflen); \
2656 bzero(&breq, sizeof (breq)); \
2657 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2658 if (len < sizeof (breq)) \
2661 snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
2662 "%s", bif->bif_ifp->if_xname); \
2663 /* Fill in the ifbreq structure */ \
2664 error = bridge_ioctl_gifflags(sc, &breq); \
2667 memcpy(buf, &breq, sizeof (breq)); \
2669 buf += sizeof (breq); \
2670 len -= sizeof (breq); \
2672 if (bridge_in_bsd_mode(sc)) { \
2673 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
2674 if (len < sizeof (breq)) \
2677 snprintf(breq.ifbr_ifsname, \
2678 sizeof (breq.ifbr_ifsname), \
2679 "%s", bif->bif_ifp->if_xname); \
2680 breq.ifbr_ifsflags = bif->bif_ifflags; \
2682 = bif->bif_ifp->if_index & 0xfff; \
2683 memcpy(buf, &breq, sizeof (breq)); \
2685 buf += sizeof (breq); \
2686 len -= sizeof (breq); \
2690 BRIDGE_UNLOCK(sc); \
2691 bifc->ifbic_len = sizeof (breq) * count; \
2692 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
2694 _FREE(outbuf, M_TEMP); \
2698 bridge_ioctl_gifs64(struct bridge_softc
*sc
, void *arg
)
2700 struct ifbifconf64
*bifc
= arg
;
2709 bridge_ioctl_gifs32(struct bridge_softc
*sc
, void *arg
)
2711 struct ifbifconf32
*bifc
= arg
;
2719 #define BRIDGE_IOCTL_RTS do { \
2720 struct bridge_rtnode *brt; \
2722 char *outbuf = NULL; \
2723 unsigned int count, buflen, len; \
2724 unsigned long now; \
2726 if (bac->ifbac_len == 0) \
2729 bzero(&bareq, sizeof (bareq)); \
2731 if (!bridge_in_bsd_mode(sc)) { \
2734 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
2736 buflen = sizeof (bareq) * count; \
2738 BRIDGE_UNLOCK(sc); \
2739 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2744 len = min(bac->ifbac_len, buflen); \
2745 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
2746 if (len < sizeof (bareq)) \
2748 snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
2749 "%s", brt->brt_ifp->if_xname); \
2750 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2751 bareq.ifba_vlan = brt->brt_vlan; \
2752 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
2753 now = (unsigned long) net_uptime(); \
2754 if (now < brt->brt_expire) \
2755 bareq.ifba_expire = \
2756 brt->brt_expire - now; \
2758 bareq.ifba_expire = 0; \
2759 bareq.ifba_flags = brt->brt_flags; \
2761 memcpy(buf, &bareq, sizeof (bareq)); \
2763 buf += sizeof (bareq); \
2764 len -= sizeof (bareq); \
2767 bac->ifbac_len = sizeof (bareq) * count; \
2768 if (outbuf != NULL) { \
2769 BRIDGE_UNLOCK(sc); \
2770 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
2771 _FREE(outbuf, M_TEMP); \
2778 bridge_ioctl_rts64(struct bridge_softc
*sc
, void *arg
)
2780 struct ifbaconf64
*bac
= arg
;
2781 struct ifbareq64 bareq
;
2789 bridge_ioctl_rts32(struct bridge_softc
*sc
, void *arg
)
2791 struct ifbaconf32
*bac
= arg
;
2792 struct ifbareq32 bareq
;
2800 bridge_ioctl_saddr32(struct bridge_softc
*sc
, void *arg
)
2802 struct ifbareq32
*req
= arg
;
2803 struct bridge_iflist
*bif
;
2806 if (!bridge_in_bsd_mode(sc
)) {
2810 bif
= bridge_lookup_member(sc
, req
->ifba_ifsname
);
2814 error
= bridge_rtupdate(sc
, req
->ifba_dst
, req
->ifba_vlan
, bif
, 1,
2821 bridge_ioctl_saddr64(struct bridge_softc
*sc
, void *arg
)
2823 struct ifbareq64
*req
= arg
;
2824 struct bridge_iflist
*bif
;
2827 if (!bridge_in_bsd_mode(sc
)) {
2831 bif
= bridge_lookup_member(sc
, req
->ifba_ifsname
);
2835 error
= bridge_rtupdate(sc
, req
->ifba_dst
, req
->ifba_vlan
, bif
, 1,
2842 bridge_ioctl_sto(struct bridge_softc
*sc
, void *arg
)
2844 struct ifbrparam
*param
= arg
;
2846 sc
->sc_brttimeout
= param
->ifbrp_ctime
;
2851 bridge_ioctl_gto(struct bridge_softc
*sc
, void *arg
)
2853 struct ifbrparam
*param
= arg
;
2855 param
->ifbrp_ctime
= sc
->sc_brttimeout
;
2860 bridge_ioctl_daddr32(struct bridge_softc
*sc
, void *arg
)
2862 struct ifbareq32
*req
= arg
;
2864 if (!bridge_in_bsd_mode(sc
)) {
2867 return (bridge_rtdaddr(sc
, req
->ifba_dst
, req
->ifba_vlan
));
2871 bridge_ioctl_daddr64(struct bridge_softc
*sc
, void *arg
)
2873 struct ifbareq64
*req
= arg
;
2875 if (!bridge_in_bsd_mode(sc
)) {
2878 return (bridge_rtdaddr(sc
, req
->ifba_dst
, req
->ifba_vlan
));
2882 bridge_ioctl_flush(struct bridge_softc
*sc
, void *arg
)
2884 struct ifbreq
*req
= arg
;
2886 if (!bridge_in_bsd_mode(sc
)) {
2889 bridge_rtflush(sc
, req
->ifbr_ifsflags
);
2894 bridge_ioctl_gpri(struct bridge_softc
*sc
, void *arg
)
2896 struct ifbrparam
*param
= arg
;
2897 struct bstp_state
*bs
= &sc
->sc_stp
;
2899 if (!bridge_in_bsd_mode(sc
)) {
2902 param
->ifbrp_prio
= bs
->bs_bridge_priority
;
2907 bridge_ioctl_spri(struct bridge_softc
*sc
, void *arg
)
2910 struct ifbrparam
*param
= arg
;
2912 if (!bridge_in_bsd_mode(sc
)) {
2913 return (EOPNOTSUPP
);
2915 return (bstp_set_priority(&sc
->sc_stp
, param
->ifbrp_prio
));
2916 #else /* !BRIDGESTP */
2917 #pragma unused(sc, arg)
2918 return (EOPNOTSUPP
);
2919 #endif /* !BRIDGESTP */
2923 bridge_ioctl_ght(struct bridge_softc
*sc
, void *arg
)
2925 struct ifbrparam
*param
= arg
;
2926 struct bstp_state
*bs
= &sc
->sc_stp
;
2928 if (!bridge_in_bsd_mode(sc
)) {
2931 param
->ifbrp_hellotime
= bs
->bs_bridge_htime
>> 8;
2936 bridge_ioctl_sht(struct bridge_softc
*sc
, void *arg
)
2939 struct ifbrparam
*param
= arg
;
2941 if (!bridge_in_bsd_mode(sc
)) {
2942 return (EOPNOTSUPP
);
2944 return (bstp_set_htime(&sc
->sc_stp
, param
->ifbrp_hellotime
));
2945 #else /* !BRIDGESTP */
2946 #pragma unused(sc, arg)
2947 return (EOPNOTSUPP
);
2948 #endif /* !BRIDGESTP */
2952 bridge_ioctl_gfd(struct bridge_softc
*sc
, void *arg
)
2954 struct ifbrparam
*param
;
2955 struct bstp_state
*bs
;
2957 if (!bridge_in_bsd_mode(sc
)) {
2962 param
->ifbrp_fwddelay
= bs
->bs_bridge_fdelay
>> 8;
2967 bridge_ioctl_sfd(struct bridge_softc
*sc
, void *arg
)
2970 struct ifbrparam
*param
= arg
;
2972 if (!bridge_in_bsd_mode(sc
)) {
2973 return (EOPNOTSUPP
);
2975 return (bstp_set_fdelay(&sc
->sc_stp
, param
->ifbrp_fwddelay
));
2976 #else /* !BRIDGESTP */
2977 #pragma unused(sc, arg)
2978 return (EOPNOTSUPP
);
2979 #endif /* !BRIDGESTP */
2983 bridge_ioctl_gma(struct bridge_softc
*sc
, void *arg
)
2985 struct ifbrparam
*param
;
2986 struct bstp_state
*bs
;
2988 if (!bridge_in_bsd_mode(sc
)) {
2989 return (EOPNOTSUPP
);
2993 param
->ifbrp_maxage
= bs
->bs_bridge_max_age
>> 8;
2998 bridge_ioctl_sma(struct bridge_softc
*sc
, void *arg
)
3001 struct ifbrparam
*param
= arg
;
3003 if (!bridge_in_bsd_mode(sc
)) {
3004 return (EOPNOTSUPP
);
3006 return (bstp_set_maxage(&sc
->sc_stp
, param
->ifbrp_maxage
));
3007 #else /* !BRIDGESTP */
3008 #pragma unused(sc, arg)
3009 return (EOPNOTSUPP
);
3010 #endif /* !BRIDGESTP */
3014 bridge_ioctl_sifprio(struct bridge_softc
*sc
, void *arg
)
3017 struct ifbreq
*req
= arg
;
3018 struct bridge_iflist
*bif
;
3020 if (!bridge_in_bsd_mode(sc
)) {
3021 return (EOPNOTSUPP
);
3023 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3027 return (bstp_set_port_priority(&bif
->bif_stp
, req
->ifbr_priority
));
3028 #else /* !BRIDGESTP */
3029 #pragma unused(sc, arg)
3030 return (EOPNOTSUPP
);
3031 #endif /* !BRIDGESTP */
3035 bridge_ioctl_sifcost(struct bridge_softc
*sc
, void *arg
)
3038 struct ifbreq
*req
= arg
;
3039 struct bridge_iflist
*bif
;
3041 if (!bridge_in_bsd_mode(sc
)) {
3042 return (EOPNOTSUPP
);
3044 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3048 return (bstp_set_path_cost(&bif
->bif_stp
, req
->ifbr_path_cost
));
3049 #else /* !BRIDGESTP */
3050 #pragma unused(sc, arg)
3051 return (EOPNOTSUPP
);
3052 #endif /* !BRIDGESTP */
3056 bridge_ioctl_gfilt(struct bridge_softc
*sc
, void *arg
)
3058 struct ifbrparam
*param
= arg
;
3060 param
->ifbrp_filter
= sc
->sc_filter_flags
;
3066 bridge_ioctl_sfilt(struct bridge_softc
*sc
, void *arg
)
3068 struct ifbrparam
*param
= arg
;
3070 if (param
->ifbrp_filter
& ~IFBF_FILT_MASK
)
3074 if (param
->ifbrp_filter
& IFBF_FILT_USEIPF
)
3078 sc
->sc_filter_flags
= param
->ifbrp_filter
;
3084 bridge_ioctl_sifmaxaddr(struct bridge_softc
*sc
, void *arg
)
3086 struct ifbreq
*req
= arg
;
3087 struct bridge_iflist
*bif
;
3089 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3093 bif
->bif_addrmax
= req
->ifbr_addrmax
;
3098 bridge_ioctl_addspan(struct bridge_softc
*sc
, void *arg
)
3100 struct ifbreq
*req
= arg
;
3101 struct bridge_iflist
*bif
= NULL
;
3104 if (!bridge_in_bsd_mode(sc
)) {
3105 return (EOPNOTSUPP
);
3107 ifs
= ifunit(req
->ifbr_ifsname
);
3111 if (IFNET_IS_INTCOPROC(ifs
)) {
3115 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3116 if (ifs
== bif
->bif_ifp
)
3119 if (ifs
->if_bridge
!= NULL
)
3122 switch (ifs
->if_type
) {
3127 /* currently not supported */
3133 bif
= _MALLOC(sizeof (*bif
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
3138 bif
->bif_ifflags
= IFBIF_SPAN
;
3140 ifnet_reference(bif
->bif_ifp
);
3142 TAILQ_INSERT_HEAD(&sc
->sc_spanlist
, bif
, bif_next
);
3148 bridge_ioctl_delspan(struct bridge_softc
*sc
, void *arg
)
3150 struct ifbreq
*req
= arg
;
3151 struct bridge_iflist
*bif
;
3154 if (!bridge_in_bsd_mode(sc
)) {
3155 return (EOPNOTSUPP
);
3157 ifs
= ifunit(req
->ifbr_ifsname
);
3161 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3162 if (ifs
== bif
->bif_ifp
)
3168 bridge_delete_span(sc
, bif
);
3173 #define BRIDGE_IOCTL_GBPARAM do { \
3174 struct bstp_state *bs = &sc->sc_stp; \
3175 struct bstp_port *root_port; \
3177 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
3178 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
3179 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
3181 root_port = bs->bs_root_port; \
3182 if (root_port == NULL) \
3183 req->ifbop_root_port = 0; \
3185 req->ifbop_root_port = root_port->bp_ifp->if_index; \
3187 req->ifbop_holdcount = bs->bs_txholdcount; \
3188 req->ifbop_priority = bs->bs_bridge_priority; \
3189 req->ifbop_protocol = bs->bs_protover; \
3190 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
3191 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
3192 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
3193 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
3194 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
3195 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
3199 bridge_ioctl_gbparam32(struct bridge_softc
*sc
, void *arg
)
3201 struct ifbropreq32
*req
= arg
;
3203 if (bridge_in_bsd_mode(sc
)) {
3204 BRIDGE_IOCTL_GBPARAM
;
3210 bridge_ioctl_gbparam64(struct bridge_softc
*sc
, void *arg
)
3212 struct ifbropreq64
*req
= arg
;
3214 if (bridge_in_bsd_mode(sc
)) {
3215 BRIDGE_IOCTL_GBPARAM
;
3221 bridge_ioctl_grte(struct bridge_softc
*sc
, void *arg
)
3223 struct ifbrparam
*param
= arg
;
3225 param
->ifbrp_cexceeded
= sc
->sc_brtexceeded
;
3229 #define BRIDGE_IOCTL_GIFSSTP do { \
3230 struct bridge_iflist *bif; \
3231 struct bstp_port *bp; \
3232 struct ifbpstpreq bpreq; \
3233 char *buf, *outbuf; \
3234 unsigned int count, buflen, len; \
3237 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3238 if ((bif->bif_ifflags & IFBIF_STP) != 0) \
3242 buflen = sizeof (bpreq) * count; \
3243 if (bifstp->ifbpstp_len == 0) { \
3244 bifstp->ifbpstp_len = buflen; \
3248 BRIDGE_UNLOCK(sc); \
3249 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
3254 len = min(bifstp->ifbpstp_len, buflen); \
3255 bzero(&bpreq, sizeof (bpreq)); \
3256 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3257 if (len < sizeof (bpreq)) \
3260 if ((bif->bif_ifflags & IFBIF_STP) == 0) \
3263 bp = &bif->bif_stp; \
3264 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
3265 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
3266 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
3267 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
3268 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
3269 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
3271 memcpy(buf, &bpreq, sizeof (bpreq)); \
3273 buf += sizeof (bpreq); \
3274 len -= sizeof (bpreq); \
3277 BRIDGE_UNLOCK(sc); \
3278 bifstp->ifbpstp_len = sizeof (bpreq) * count; \
3279 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
3281 _FREE(outbuf, M_TEMP); \
3286 bridge_ioctl_gifsstp32(struct bridge_softc
*sc
, void *arg
)
3288 struct ifbpstpconf32
*bifstp
= arg
;
3291 if (bridge_in_bsd_mode(sc
)) {
3292 BRIDGE_IOCTL_GIFSSTP
;
3298 bridge_ioctl_gifsstp64(struct bridge_softc
*sc
, void *arg
)
3300 struct ifbpstpconf64
*bifstp
= arg
;
3303 if (bridge_in_bsd_mode(sc
)) {
3304 BRIDGE_IOCTL_GIFSSTP
;
3310 bridge_ioctl_sproto(struct bridge_softc
*sc
, void *arg
)
3313 struct ifbrparam
*param
= arg
;
3315 if (!bridge_in_bsd_mode(sc
)) {
3316 return (EOPNOTSUPP
);
3318 return (bstp_set_protocol(&sc
->sc_stp
, param
->ifbrp_proto
));
3319 #else /* !BRIDGESTP */
3320 #pragma unused(sc, arg)
3321 return (EOPNOTSUPP
);
3322 #endif /* !BRIDGESTP */
3326 bridge_ioctl_stxhc(struct bridge_softc
*sc
, void *arg
)
3329 struct ifbrparam
*param
= arg
;
3331 if (!bridge_in_bsd_mode(sc
)) {
3332 return (EOPNOTSUPP
);
3334 return (bstp_set_holdcount(&sc
->sc_stp
, param
->ifbrp_txhc
));
3335 #else /* !BRIDGESTP */
3336 #pragma unused(sc, arg)
3337 return (EOPNOTSUPP
);
3338 #endif /* !BRIDGESTP */
3343 bridge_ioctl_ghostfilter(struct bridge_softc
*sc
, void *arg
)
3345 struct ifbrhostfilter
*req
= arg
;
3346 struct bridge_iflist
*bif
;
3348 bif
= bridge_lookup_member(sc
, req
->ifbrhf_ifsname
);
3352 bzero(req
, sizeof(struct ifbrhostfilter
));
3353 if (bif
->bif_flags
& BIFF_HOST_FILTER
) {
3354 req
->ifbrhf_flags
|= IFBRHF_ENABLED
;
3355 bcopy(bif
->bif_hf_hwsrc
, req
->ifbrhf_hwsrca
,
3357 req
->ifbrhf_ipsrc
= bif
->bif_hf_ipsrc
.s_addr
;
3363 bridge_ioctl_shostfilter(struct bridge_softc
*sc
, void *arg
)
3365 struct ifbrhostfilter
*req
= arg
;
3366 struct bridge_iflist
*bif
;
3368 bif
= bridge_lookup_member(sc
, req
->ifbrhf_ifsname
);
3372 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_vmnet_total
);
3374 if (req
->ifbrhf_flags
& IFBRHF_ENABLED
) {
3375 bif
->bif_flags
|= BIFF_HOST_FILTER
;
3377 if (req
->ifbrhf_flags
& IFBRHF_HWSRC
) {
3378 bcopy(req
->ifbrhf_hwsrca
, bif
->bif_hf_hwsrc
,
3380 if (bcmp(req
->ifbrhf_hwsrca
, ethernulladdr
,
3381 ETHER_ADDR_LEN
) != 0)
3382 bif
->bif_flags
|= BIFF_HF_HWSRC
;
3384 bif
->bif_flags
&= ~BIFF_HF_HWSRC
;
3386 if (req
->ifbrhf_flags
& IFBRHF_IPSRC
) {
3387 bif
->bif_hf_ipsrc
.s_addr
= req
->ifbrhf_ipsrc
;
3388 if (bif
->bif_hf_ipsrc
.s_addr
!= INADDR_ANY
)
3389 bif
->bif_flags
|= BIFF_HF_IPSRC
;
3391 bif
->bif_flags
&= ~BIFF_HF_IPSRC
;
3394 bif
->bif_flags
&= ~(BIFF_HOST_FILTER
| BIFF_HF_HWSRC
|
3396 bzero(bif
->bif_hf_hwsrc
, ETHER_ADDR_LEN
);
3397 bif
->bif_hf_ipsrc
.s_addr
= INADDR_ANY
;
3407 * Detach an interface from a bridge. Called when a member
3408 * interface is detaching.
3410 __private_extern__
void
3411 bridge_ifdetach(struct bridge_iflist
*bif
, struct ifnet
*ifp
)
3413 struct bridge_softc
*sc
= ifp
->if_bridge
;
3416 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
3417 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3418 #endif /* BRIDGE_DEBUG */
3420 /* Check if the interface is a bridge member */
3423 bif
= bridge_lookup_member_if(sc
, ifp
);
3425 bridge_delete_member(sc
, bif
, 1);
3429 /* Check if the interface is a span port */
3430 lck_mtx_lock(&bridge_list_mtx
);
3431 LIST_FOREACH(sc
, &bridge_list
, sc_list
) {
3432 if (bridge_in_bsd_mode(sc
)) {
3434 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3435 if (ifp
== bif
->bif_ifp
) {
3436 bridge_delete_span(sc
, bif
);
3442 lck_mtx_unlock(&bridge_list_mtx
);
3446 * interface_media_active:
3448 * Tells if an interface media is active.
3451 interface_media_active(struct ifnet
*ifp
)
3453 struct ifmediareq ifmr
;
3456 bzero(&ifmr
, sizeof(ifmr
));
3457 if (ifnet_ioctl(ifp
, 0, SIOCGIFMEDIA
, &ifmr
) == 0) {
3458 if ((ifmr
.ifm_status
& IFM_AVALID
) && ifmr
.ifm_count
> 0)
3459 status
= ifmr
.ifm_status
& IFM_ACTIVE
? 1 : 0;
3466 * bridge_updatelinkstatus:
3468 * Update the media active status of the bridge based on the
3469 * media active status of its member.
3470 * If changed, return the corresponding onf/off link event.
3473 bridge_updatelinkstatus(struct bridge_softc
*sc
)
3475 struct bridge_iflist
*bif
;
3476 int active_member
= 0;
3477 u_int32_t event_code
= 0;
3479 BRIDGE_LOCK_ASSERT_HELD(sc
);
3482 * Find out if we have an active interface
3484 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
3485 if (bif
->bif_flags
& BIFF_MEDIA_ACTIVE
) {
3491 if (active_member
&& !(sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
3492 sc
->sc_flags
|= SCF_MEDIA_ACTIVE
;
3493 event_code
= KEV_DL_LINK_ON
;
3494 } else if (!active_member
&& (sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
3495 sc
->sc_flags
&= ~SCF_MEDIA_ACTIVE
;
3496 event_code
= KEV_DL_LINK_OFF
;
3499 return (event_code
);
3503 * bridge_iflinkevent:
3506 bridge_iflinkevent(struct ifnet
*ifp
)
3508 struct bridge_softc
*sc
= ifp
->if_bridge
;
3509 struct bridge_iflist
*bif
;
3510 u_int32_t event_code
= 0;
3513 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
3514 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3515 #endif /* BRIDGE_DEBUG */
3517 /* Check if the interface is a bridge member */
3522 bif
= bridge_lookup_member_if(sc
, ifp
);
3524 if (interface_media_active(ifp
))
3525 bif
->bif_flags
|= BIFF_MEDIA_ACTIVE
;
3527 bif
->bif_flags
&= ~BIFF_MEDIA_ACTIVE
;
3529 event_code
= bridge_updatelinkstatus(sc
);
3533 if (event_code
!= 0)
3534 bridge_link_event(sc
->sc_ifp
, event_code
);
3538 * bridge_delayed_callback:
3540 * Makes a delayed call
3543 bridge_delayed_callback(void *param
)
3545 struct bridge_delayed_call
*call
= (struct bridge_delayed_call
*)param
;
3546 struct bridge_softc
*sc
= call
->bdc_sc
;
3548 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3549 if (bridge_delayed_callback_delay
> 0) {
3552 ts
.tv_sec
= bridge_delayed_callback_delay
;
3555 printf("%s: sleeping for %d seconds\n",
3556 __func__
, bridge_delayed_callback_delay
);
3558 msleep(&bridge_delayed_callback_delay
, NULL
, PZERO
,
3561 printf("%s: awoken\n", __func__
);
3563 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3567 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3568 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
)
3569 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3570 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3572 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3574 if (call
->bdc_flags
& BDCF_CANCELLING
) {
3577 if ((sc
->sc_flags
& SCF_DETACHING
) == 0)
3578 (*call
->bdc_func
)(sc
);
3580 call
->bdc_flags
&= ~BDCF_OUTSTANDING
;
3585 * bridge_schedule_delayed_call:
3587 * Schedule a function to be called on a separate thread
3588 * The actual call may be scheduled to run at a given time or ASAP.
3591 bridge_schedule_delayed_call(struct bridge_delayed_call
*call
)
3593 uint64_t deadline
= 0;
3594 struct bridge_softc
*sc
= call
->bdc_sc
;
3596 BRIDGE_LOCK_ASSERT_HELD(sc
);
3598 if ((sc
->sc_flags
& SCF_DETACHING
) ||
3599 (call
->bdc_flags
& (BDCF_OUTSTANDING
| BDCF_CANCELLING
)))
3602 if (call
->bdc_ts
.tv_sec
|| call
->bdc_ts
.tv_nsec
) {
3603 nanoseconds_to_absolutetime(
3604 (uint64_t)call
->bdc_ts
.tv_sec
* NSEC_PER_SEC
+
3605 call
->bdc_ts
.tv_nsec
, &deadline
);
3606 clock_absolutetime_interval_to_deadline(deadline
, &deadline
);
3609 call
->bdc_flags
= BDCF_OUTSTANDING
;
3611 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3612 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
)
3613 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3614 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3616 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3618 if (call
->bdc_ts
.tv_sec
|| call
->bdc_ts
.tv_nsec
)
3619 thread_call_func_delayed(
3620 (thread_call_func_t
)bridge_delayed_callback
,
3623 if (call
->bdc_thread_call
== NULL
)
3624 call
->bdc_thread_call
= thread_call_allocate(
3625 (thread_call_func_t
)bridge_delayed_callback
,
3627 thread_call_enter(call
->bdc_thread_call
);
3632 * bridge_cancel_delayed_call:
3634 * Cancel a queued or running delayed call.
3635 * If call is running, does not return until the call is done to
3636 * prevent race condition with the brigde interface getting destroyed
3639 bridge_cancel_delayed_call(struct bridge_delayed_call
*call
)
3642 struct bridge_softc
*sc
= call
->bdc_sc
;
3645 * The call was never scheduled
3650 BRIDGE_LOCK_ASSERT_HELD(sc
);
3652 call
->bdc_flags
|= BDCF_CANCELLING
;
3654 while (call
->bdc_flags
& BDCF_OUTSTANDING
) {
3656 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
)
3657 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3658 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3660 #endif /* BRIDGE_DEBUG */
3661 result
= thread_call_func_cancel(
3662 (thread_call_func_t
)bridge_delayed_callback
, call
, FALSE
);
3666 * We managed to dequeue the delayed call
3668 call
->bdc_flags
&= ~BDCF_OUTSTANDING
;
3671 * Wait for delayed call do be done running
3673 msleep(call
, &sc
->sc_mtx
, PZERO
, __func__
, NULL
);
3676 call
->bdc_flags
&= ~BDCF_CANCELLING
;
3680 * bridge_cleanup_delayed_call:
3682 * Dispose resource allocated for a delayed call
3683 * Assume the delayed call is not queued or running .
3686 bridge_cleanup_delayed_call(struct bridge_delayed_call
*call
)
3689 struct bridge_softc
*sc
= call
->bdc_sc
;
3692 * The call was never scheduled
3697 BRIDGE_LOCK_ASSERT_HELD(sc
);
3699 VERIFY((call
->bdc_flags
& BDCF_OUTSTANDING
) == 0);
3700 VERIFY((call
->bdc_flags
& BDCF_CANCELLING
) == 0);
3702 if (call
->bdc_thread_call
!= NULL
) {
3703 result
= thread_call_free(call
->bdc_thread_call
);
3704 if (result
== FALSE
)
3705 panic("%s thread_call_free() failed for call %p",
3707 call
->bdc_thread_call
= NULL
;
3714 * Initialize a bridge interface.
3717 bridge_init(struct ifnet
*ifp
)
3719 struct bridge_softc
*sc
= (struct bridge_softc
*)ifp
->if_softc
;
3722 BRIDGE_LOCK_ASSERT_HELD(sc
);
3724 if ((ifnet_flags(ifp
) & IFF_RUNNING
))
3727 error
= ifnet_set_flags(ifp
, IFF_RUNNING
, IFF_RUNNING
);
3729 if (bridge_in_bsd_mode(sc
)) {
3731 * Calling bridge_aging_timer() is OK as there are no entries to
3732 * age so we're just going to arm the timer
3734 bridge_aging_timer(sc
);
3737 bstp_init(&sc
->sc_stp
); /* Initialize Spanning Tree */
3738 #endif /* BRIDGESTP */
3746 * Stop the bridge interface.
3749 bridge_ifstop(struct ifnet
*ifp
, int disable
)
3751 #pragma unused(disable)
3752 struct bridge_softc
*sc
= ifp
->if_softc
;
3754 BRIDGE_LOCK_ASSERT_HELD(sc
);
3756 if ((ifnet_flags(ifp
) & IFF_RUNNING
) == 0)
3759 if (bridge_in_bsd_mode(sc
)) {
3760 bridge_cancel_delayed_call(&sc
->sc_aging_timer
);
3763 bstp_stop(&sc
->sc_stp
);
3764 #endif /* BRIDGESTP */
3766 bridge_rtflush(sc
, IFBF_FLUSHDYN
);
3768 (void) ifnet_set_flags(ifp
, 0, IFF_RUNNING
);
3774 * Enqueue a packet on a bridge member interface.
3778 bridge_enqueue(struct bridge_softc
*sc
, struct ifnet
*dst_ifp
, struct mbuf
*m
)
3784 VERIFY(dst_ifp
!= NULL
);
3787 * We may be sending a fragment so traverse the mbuf
3789 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
3793 struct flowadv adv
= { FADV_SUCCESS
};
3796 m
->m_nextpkt
= NULL
;
3798 len
= m
->m_pkthdr
.len
;
3799 mflags
= m
->m_flags
;
3800 m
->m_flags
|= M_PROTO1
; /* set to avoid loops */
3802 bridge_finalize_cksum(dst_ifp
, m
);
3806 * If underlying interface can not do VLAN tag insertion itself
3807 * then attach a packet tag that holds it.
3809 if ((m
->m_flags
& M_VLANTAG
) &&
3810 (dst_ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) == 0) {
3811 m
= ether_vlanencap(m
, m
->m_pkthdr
.ether_vtag
);
3813 printf("%s: %s: unable to prepend VLAN "
3814 "header\n", __func__
, dst_ifp
->if_xname
);
3815 (void) ifnet_stat_increment_out(dst_ifp
,
3819 m
->m_flags
&= ~M_VLANTAG
;
3821 #endif /* HAS_IF_CAP */
3823 _error
= dlil_output(dst_ifp
, 0, m
, NULL
, NULL
, 1, &adv
);
3825 /* Preserve existing error value */
3829 else if (adv
.code
== FADV_FLOW_CONTROLLED
)
3831 else if (adv
.code
== FADV_SUSPENDED
)
3832 error
= EQSUSPENDED
;
3836 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 1, len
, 0);
3838 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 0, 0, 1);
3845 #if HAS_BRIDGE_DUMMYNET
3849 * Receive a queued packet from dummynet and pass it on to the output
3852 * The mbuf has the Ethernet header already attached.
3855 bridge_dummynet(struct mbuf
*m
, struct ifnet
*ifp
)
3857 struct bridge_softc
*sc
;
3859 sc
= ifp
->if_bridge
;
3862 * The packet didnt originate from a member interface. This should only
3863 * ever happen if a member interface is removed while packets are
3871 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
3872 if (bridge_pfil(&m
, sc
->sc_ifp
, ifp
, PFIL_OUT
) != 0)
3878 (void) bridge_enqueue(sc
, ifp
, m
);
3880 #endif /* HAS_BRIDGE_DUMMYNET */
3882 #if BRIDGE_MEMBER_OUT_FILTER
3884 * bridge_member_output:
3886 * Send output from a bridge member interface. This
3887 * performs the bridging function for locally originated
3890 * The mbuf has the Ethernet header already attached. We must
3891 * enqueue or free the mbuf before returning.
3894 bridge_member_output(struct ifnet
*ifp
, struct mbuf
*m
, struct sockaddr
*sa
,
3897 #pragma unused(sa, rt)
3898 struct ether_header
*eh
;
3899 struct ifnet
*dst_if
;
3900 struct bridge_softc
*sc
;
3904 if (if_bridge_debug
& BR_DBGF_OUTPUT
)
3905 printf("%s: ifp %s\n", __func__
, ifp
->if_xname
);
3906 #endif /* BRIDGE_DEBUG */
3908 if (m
->m_len
< ETHER_HDR_LEN
) {
3909 m
= m_pullup(m
, ETHER_HDR_LEN
);
3914 eh
= mtod(m
, struct ether_header
*);
3915 sc
= ifp
->if_bridge
;
3916 vlan
= VLANTAGOF(m
);
3921 * APPLE MODIFICATION
3922 * If the packet is an 802.1X ethertype, then only send on the
3923 * original output interface.
3925 if (eh
->ether_type
== htons(ETHERTYPE_PAE
)) {
3931 * If bridge is down, but the original output interface is up,
3932 * go ahead and send out that interface. Otherwise, the packet
3935 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) == 0) {
3941 * If the packet is a multicast, or we don't know a better way to
3942 * get there, send to all interfaces.
3944 if (ETHER_IS_MULTICAST(eh
->ether_dhost
))
3947 dst_if
= bridge_rtlookup(sc
, eh
->ether_dhost
, vlan
);
3948 if (dst_if
== NULL
) {
3949 struct bridge_iflist
*bif
;
3951 int error
= 0, used
= 0;
3955 BRIDGE_LOCK2REF(sc
, error
);
3961 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
3962 dst_if
= bif
->bif_ifp
;
3964 if (dst_if
->if_type
== IFT_GIF
)
3966 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0)
3970 * If this is not the original output interface,
3971 * and the interface is participating in spanning
3972 * tree, make sure the port is in a state that
3973 * allows forwarding.
3975 if (dst_if
!= ifp
&& (bif
->bif_ifflags
& IFBIF_STP
) &&
3976 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
)
3979 if (LIST_NEXT(bif
, bif_next
) == NULL
) {
3983 mc
= m_copypacket(m
, M_DONTWAIT
);
3985 (void) ifnet_stat_increment_out(
3986 sc
->sc_ifp
, 0, 0, 1);
3991 (void) bridge_enqueue(sc
, dst_if
, mc
);
4001 * XXX Spanning tree consideration here?
4005 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4012 (void) bridge_enqueue(sc
, dst_if
, m
);
4015 #endif /* BRIDGE_MEMBER_OUT_FILTER */
4020 * This routine is called externally from above only when if_bridge_txstart
4021 * is disabled; otherwise it is called internally by bridge_start().
4024 bridge_output(struct ifnet
*ifp
, struct mbuf
*m
)
4026 struct bridge_softc
*sc
= ifnet_softc(ifp
);
4027 struct ether_header
*eh
;
4028 struct ifnet
*dst_if
;
4031 eh
= mtod(m
, struct ether_header
*);
4035 ASSERT(bridge_in_bsd_mode(sc
));
4037 if (!(m
->m_flags
& (M_BCAST
|M_MCAST
)))
4038 dst_if
= bridge_rtlookup(sc
, eh
->ether_dhost
, 0);
4040 (void) ifnet_stat_increment_out(ifp
, 1, m
->m_pkthdr
.len
, 0);
4043 if (sc
->sc_bpf_output
)
4044 bridge_bpf_output(ifp
, m
);
4047 if (dst_if
== NULL
) {
4048 /* callee will unlock */
4049 bridge_broadcast(sc
, ifp
, m
, 0);
4052 error
= bridge_enqueue(sc
, dst_if
, m
);
4059 bridge_finalize_cksum(struct ifnet
*ifp
, struct mbuf
*m
)
4061 struct ether_header
*eh
= mtod(m
, struct ether_header
*);
4062 uint32_t sw_csum
, hwcap
;
4065 hwcap
= (ifp
->if_hwassist
| CSUM_DATA_VALID
);
4069 /* do in software what the hardware cannot */
4070 sw_csum
= m
->m_pkthdr
.csum_flags
& ~IF_HWASSIST_CSUM_FLAGS(hwcap
);
4071 sw_csum
&= IF_HWASSIST_CSUM_MASK
;
4073 switch (ntohs(eh
->ether_type
)) {
4075 if ((hwcap
& CSUM_PARTIAL
) && !(sw_csum
& CSUM_DELAY_DATA
) &&
4076 (m
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
)) {
4077 if (m
->m_pkthdr
.csum_flags
& CSUM_TCP
) {
4079 sizeof (*eh
) + sizeof (struct ip
);
4081 m
->m_pkthdr
.csum_data
& 0xffff;
4082 m
->m_pkthdr
.csum_flags
|=
4083 (CSUM_DATA_VALID
| CSUM_PARTIAL
);
4084 m
->m_pkthdr
.csum_tx_stuff
= (ulpoff
+ start
);
4085 m
->m_pkthdr
.csum_tx_start
= start
;
4087 sw_csum
|= (CSUM_DELAY_DATA
&
4088 m
->m_pkthdr
.csum_flags
);
4091 (void) in_finalize_cksum(m
, sizeof (*eh
), sw_csum
);
4095 case ETHERTYPE_IPV6
:
4096 if ((hwcap
& CSUM_PARTIAL
) &&
4097 !(sw_csum
& CSUM_DELAY_IPV6_DATA
) &&
4098 (m
->m_pkthdr
.csum_flags
& CSUM_DELAY_IPV6_DATA
)) {
4099 if (m
->m_pkthdr
.csum_flags
& CSUM_TCPIPV6
) {
4101 sizeof (*eh
) + sizeof (struct ip6_hdr
);
4103 m
->m_pkthdr
.csum_data
& 0xffff;
4104 m
->m_pkthdr
.csum_flags
|=
4105 (CSUM_DATA_VALID
| CSUM_PARTIAL
);
4106 m
->m_pkthdr
.csum_tx_stuff
= (ulpoff
+ start
);
4107 m
->m_pkthdr
.csum_tx_start
= start
;
4109 sw_csum
|= (CSUM_DELAY_IPV6_DATA
&
4110 m
->m_pkthdr
.csum_flags
);
4113 (void) in6_finalize_cksum(m
, sizeof (*eh
), -1, -1, sw_csum
);
4122 * Start output on a bridge.
4124 * This routine is invoked by the start worker thread; because we never call
4125 * it directly, there is no need do deploy any serialization mechanism other
4126 * than what's already used by the worker thread, i.e. this is already single
4129 * This routine is called only when if_bridge_txstart is enabled.
4132 bridge_start(struct ifnet
*ifp
)
4137 if (ifnet_dequeue(ifp
, &m
) != 0)
4140 (void) bridge_output(ifp
, m
);
4147 * The forwarding function of the bridge.
4149 * NOTE: Releases the lock on return.
4152 bridge_forward(struct bridge_softc
*sc
, struct bridge_iflist
*sbif
,
4155 struct bridge_iflist
*dbif
;
4156 struct ifnet
*src_if
, *dst_if
, *ifp
;
4157 struct ether_header
*eh
;
4162 BRIDGE_LOCK_ASSERT_HELD(sc
);
4163 ASSERT(bridge_in_bsd_mode(sc
));
4166 if (if_bridge_debug
& BR_DBGF_OUTPUT
)
4167 printf("%s: %s m 0x%llx\n", __func__
, sc
->sc_ifp
->if_xname
,
4168 (uint64_t)VM_KERNEL_ADDRPERM(m
));
4169 #endif /* BRIDGE_DEBUG */
4171 src_if
= m
->m_pkthdr
.rcvif
;
4174 (void) ifnet_stat_increment_in(ifp
, 1, m
->m_pkthdr
.len
, 0);
4175 vlan
= VLANTAGOF(m
);
4178 if ((sbif
->bif_ifflags
& IFBIF_STP
) &&
4179 sbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
)
4182 eh
= mtod(m
, struct ether_header
*);
4183 dst
= eh
->ether_dhost
;
4185 /* If the interface is learning, record the address. */
4186 if (sbif
->bif_ifflags
& IFBIF_LEARNING
) {
4187 error
= bridge_rtupdate(sc
, eh
->ether_shost
, vlan
,
4188 sbif
, 0, IFBAF_DYNAMIC
);
4190 * If the interface has addresses limits then deny any source
4191 * that is not in the cache.
4193 if (error
&& sbif
->bif_addrmax
)
4197 if ((sbif
->bif_ifflags
& IFBIF_STP
) != 0 &&
4198 sbif
->bif_stp
.bp_state
== BSTP_IFSTATE_LEARNING
)
4202 * At this point, the port either doesn't participate
4203 * in spanning tree or it is in the forwarding state.
4207 * If the packet is unicast, destined for someone on
4208 * "this" side of the bridge, drop it.
4210 if ((m
->m_flags
& (M_BCAST
|M_MCAST
)) == 0) {
4211 dst_if
= bridge_rtlookup(sc
, dst
, vlan
);
4212 if (src_if
== dst_if
)
4216 * Check if its a reserved multicast address, any address
4217 * listed in 802.1D section 7.12.6 may not be forwarded by the
4219 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
4221 if (dst
[0] == 0x01 && dst
[1] == 0x80 &&
4222 dst
[2] == 0xc2 && dst
[3] == 0x00 &&
4223 dst
[4] == 0x00 && dst
[5] <= 0x0f)
4227 /* ...forward it to all interfaces. */
4228 atomic_add_64(&ifp
->if_imcasts
, 1);
4233 * If we have a destination interface which is a member of our bridge,
4234 * OR this is a unicast packet, push it through the bpf(4) machinery.
4235 * For broadcast or multicast packets, don't bother because it will
4236 * be reinjected into ether_input. We do this before we pass the packets
4237 * through the pfil(9) framework, as it is possible that pfil(9) will
4238 * drop the packet, or possibly modify it, making it difficult to debug
4239 * firewall issues on the bridge.
4242 if (eh
->ether_type
== htons(ETHERTYPE_RSN_PREAUTH
) ||
4243 dst_if
!= NULL
|| (m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4244 m
->m_pkthdr
.rcvif
= ifp
;
4245 if (sc
->sc_bpf_input
)
4246 bridge_bpf_input(ifp
, m
);
4248 #endif /* NBPFILTER */
4250 #if defined(PFIL_HOOKS)
4251 /* run the packet filter */
4252 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
4254 if (bridge_pfil(&m
, ifp
, src_if
, PFIL_IN
) != 0)
4260 #endif /* PFIL_HOOKS */
4262 if (dst_if
== NULL
) {
4263 bridge_broadcast(sc
, src_if
, m
, 1);
4268 * At this point, we're dealing with a unicast frame
4269 * going to a different interface.
4271 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0)
4274 dbif
= bridge_lookup_member_if(sc
, dst_if
);
4276 /* Not a member of the bridge (anymore?) */
4279 /* Private segments can not talk to each other */
4280 if (sbif
->bif_ifflags
& dbif
->bif_ifflags
& IFBIF_PRIVATE
)
4283 if ((dbif
->bif_ifflags
& IFBIF_STP
) &&
4284 dbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
)
4288 /* APPLE MODIFICATION <rdar:6985737> */
4289 if ((dst_if
->if_extflags
& IFEXTF_DHCPRA_MASK
) != 0) {
4290 m
= ip_xdhcpra_output(dst_if
, m
);
4292 ++sc
->sc_sc
.sc_ifp
.if_xdhcpra
;
4296 #endif /* HAS_DHCPRA_MASK */
4300 #if defined(PFIL_HOOKS)
4301 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
4302 if (bridge_pfil(&m
, ifp
, dst_if
, PFIL_OUT
) != 0)
4307 #endif /* PFIL_HOOKS */
4309 (void) bridge_enqueue(sc
, dst_if
, m
);
4319 char *ether_ntop(char *, size_t, const u_char
*);
4321 __private_extern__
char *
4322 ether_ntop(char *buf
, size_t len
, const u_char
*ap
)
4324 snprintf(buf
, len
, "%02x:%02x:%02x:%02x:%02x:%02x",
4325 ap
[0], ap
[1], ap
[2], ap
[3], ap
[4], ap
[5]);
4330 #endif /* BRIDGE_DEBUG */
4335 * Filter input from a member interface. Queue the packet for
4336 * bridging if it is not for us.
4338 __private_extern__ errno_t
4339 bridge_input(struct ifnet
*ifp
, struct mbuf
*m
, void *frame_header
)
4341 struct bridge_softc
*sc
= ifp
->if_bridge
;
4342 struct bridge_iflist
*bif
, *bif2
;
4344 struct ether_header
*eh
;
4345 struct mbuf
*mc
, *mc2
;
4349 ASSERT(bridge_in_bsd_mode(sc
));
4351 if (if_bridge_debug
& BR_DBGF_INPUT
)
4352 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__
,
4353 sc
->sc_ifp
->if_xname
, ifp
->if_xname
,
4354 (uint64_t)VM_KERNEL_ADDRPERM(m
),
4355 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)));
4356 #endif /* BRIDGE_DEBUG */
4358 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) == 0) {
4360 if (if_bridge_debug
& BR_DBGF_INPUT
)
4361 printf("%s: %s not running passing along\n",
4362 __func__
, sc
->sc_ifp
->if_xname
);
4363 #endif /* BRIDGE_DEBUG */
4368 vlan
= VLANTAGOF(m
);
4372 * Implement support for bridge monitoring. If this flag has been
4373 * set on this interface, discard the packet once we push it through
4374 * the bpf(4) machinery, but before we do, increment the byte and
4375 * packet counters associated with this interface.
4377 if ((bifp
->if_flags
& IFF_MONITOR
) != 0) {
4378 m
->m_pkthdr
.rcvif
= bifp
;
4379 BRIDGE_BPF_MTAP_INPUT(sc
, m
);
4380 (void) ifnet_stat_increment_in(bifp
, 1, m
->m_pkthdr
.len
, 0);
4382 return (EJUSTRETURN
);
4384 #endif /* IFF_MONITOR */
4387 * Need to clear the promiscous flags otherwise it will be
4388 * dropped by DLIL after processing filters
4390 if ((mbuf_flags(m
) & MBUF_PROMISC
))
4391 mbuf_setflags_mask(m
, 0, MBUF_PROMISC
);
4394 bif
= bridge_lookup_member_if(sc
, ifp
);
4398 if (if_bridge_debug
& BR_DBGF_INPUT
)
4399 printf("%s: %s bridge_lookup_member_if failed\n",
4400 __func__
, sc
->sc_ifp
->if_xname
);
4401 #endif /* BRIDGE_DEBUG */
4405 if (bif
->bif_flags
& BIFF_HOST_FILTER
) {
4406 error
= bridge_host_filter(bif
, m
);
4408 if (if_bridge_debug
& BR_DBGF_INPUT
)
4409 printf("%s: %s bridge_host_filter failed\n",
4410 __func__
, bif
->bif_ifp
->if_xname
);
4412 return (EJUSTRETURN
);
4416 eh
= mtod(m
, struct ether_header
*);
4420 if (m
->m_flags
& (M_BCAST
|M_MCAST
)) {
4423 if (if_bridge_debug
& BR_DBGF_MCAST
)
4424 if ((m
->m_flags
& M_MCAST
))
4425 printf("%s: multicast: "
4426 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4428 eh
->ether_dhost
[0], eh
->ether_dhost
[1],
4429 eh
->ether_dhost
[2], eh
->ether_dhost
[3],
4430 eh
->ether_dhost
[4], eh
->ether_dhost
[5]);
4431 #endif /* BRIDGE_DEBUG */
4433 /* Tap off 802.1D packets; they do not get forwarded. */
4434 if (memcmp(eh
->ether_dhost
, bstp_etheraddr
,
4435 ETHER_ADDR_LEN
) == 0) {
4437 m
= bstp_input(&bif
->bif_stp
, ifp
, m
);
4438 #else /* !BRIDGESTP */
4441 #endif /* !BRIDGESTP */
4444 return (EJUSTRETURN
);
4448 if ((bif
->bif_ifflags
& IFBIF_STP
) &&
4449 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4455 * Make a deep copy of the packet and enqueue the copy
4456 * for bridge processing; return the original packet for
4459 mc
= m_dup(m
, M_DONTWAIT
);
4466 * Perform the bridge forwarding function with the copy.
4468 * Note that bridge_forward calls BRIDGE_UNLOCK
4470 bridge_forward(sc
, bif
, mc
);
4473 * Reinject the mbuf as arriving on the bridge so we have a
4474 * chance at claiming multicast packets. We can not loop back
4475 * here from ether_input as a bridge is never a member of a
4478 VERIFY(bifp
->if_bridge
== NULL
);
4479 mc2
= m_dup(m
, M_DONTWAIT
);
4481 /* Keep the layer3 header aligned */
4482 int i
= min(mc2
->m_pkthdr
.len
, max_protohdr
);
4483 mc2
= m_copyup(mc2
, i
, ETHER_ALIGN
);
4486 /* mark packet as arriving on the bridge */
4487 mc2
->m_pkthdr
.rcvif
= bifp
;
4488 mc2
->m_pkthdr
.pkt_hdr
= mbuf_data(mc2
);
4491 if (sc
->sc_bpf_input
)
4492 bridge_bpf_input(bifp
, mc2
);
4493 #endif /* NBPFILTER */
4494 (void) mbuf_setdata(mc2
,
4495 (char *)mbuf_data(mc2
) + ETHER_HDR_LEN
,
4496 mbuf_len(mc2
) - ETHER_HDR_LEN
);
4497 (void) mbuf_pkthdr_adjustlen(mc2
, - ETHER_HDR_LEN
);
4499 (void) ifnet_stat_increment_in(bifp
, 1,
4500 mbuf_pkthdr_len(mc2
), 0);
4503 if (if_bridge_debug
& BR_DBGF_MCAST
)
4504 printf("%s: %s mcast for us\n", __func__
,
4505 sc
->sc_ifp
->if_xname
);
4506 #endif /* BRIDGE_DEBUG */
4508 dlil_input_packet_list(bifp
, mc2
);
4511 /* Return the original packet for local processing. */
4515 if ((bif
->bif_ifflags
& IFBIF_STP
) &&
4516 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4522 #define CARP_CHECK_WE_ARE_DST(iface) \
4523 ((iface)->if_carp &&\
4524 carp_forus((iface)->if_carp, eh->ether_dhost))
4525 #define CARP_CHECK_WE_ARE_SRC(iface) \
4526 ((iface)->if_carp &&\
4527 carp_forus((iface)->if_carp, eh->ether_shost))
4529 #define CARP_CHECK_WE_ARE_DST(iface) 0
4530 #define CARP_CHECK_WE_ARE_SRC(iface) 0
4534 #define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
4536 #define PFIL_HOOKED_INET6 0
4539 #if defined(PFIL_HOOKS)
4540 #define PFIL_PHYS(sc, ifp, m) do { \
4541 if (pfil_local_phys && \
4542 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \
4543 if (bridge_pfil(&m, NULL, ifp, \
4544 PFIL_IN) != 0 || m == NULL) { \
4545 BRIDGE_UNLOCK(sc); \
4550 #else /* PFIL_HOOKS */
4551 #define PFIL_PHYS(sc, ifp, m)
4552 #endif /* PFIL_HOOKS */
4554 #define GRAB_OUR_PACKETS(iface) \
4555 if ((iface)->if_type == IFT_GIF) \
4557 /* It is destined for us. */ \
4558 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
4559 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
4560 if ((iface)->if_type == IFT_BRIDGE) { \
4561 BRIDGE_BPF_MTAP_INPUT(sc, m); \
4562 /* Filter on the physical interface. */ \
4563 PFIL_PHYS(sc, iface, m); \
4565 if (bif->bif_ifflags & IFBIF_LEARNING) { \
4566 error = bridge_rtupdate(sc, eh->ether_shost, \
4567 vlan, bif, 0, IFBAF_DYNAMIC); \
4568 if (error && bif->bif_addrmax) { \
4569 BRIDGE_UNLOCK(sc); \
4570 return (EJUSTRETURN); \
4573 m->m_pkthdr.rcvif = iface; \
4574 BRIDGE_UNLOCK(sc); \
4578 /* We just received a packet that we sent out. */ \
4579 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
4580 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
4581 BRIDGE_UNLOCK(sc); \
4582 return (EJUSTRETURN); \
4589 * If the packet is for us, set the packets source as the
4590 * bridge, and return the packet back to ether_input for
4593 if (memcmp(eh
->ether_dhost
, IF_LLADDR(bifp
),
4594 ETHER_ADDR_LEN
) == 0 || CARP_CHECK_WE_ARE_DST(bifp
)) {
4596 /* Mark the packet as arriving on the bridge interface */
4597 (void) mbuf_pkthdr_setrcvif(m
, bifp
);
4598 mbuf_pkthdr_setheader(m
, frame_header
);
4601 * If the interface is learning, and the source
4602 * address is valid and not multicast, record
4605 if (bif
->bif_ifflags
& IFBIF_LEARNING
)
4606 (void) bridge_rtupdate(sc
, eh
->ether_shost
,
4607 vlan
, bif
, 0, IFBAF_DYNAMIC
);
4609 BRIDGE_BPF_MTAP_INPUT(sc
, m
);
4611 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) + ETHER_HDR_LEN
,
4612 mbuf_len(m
) - ETHER_HDR_LEN
);
4613 (void) mbuf_pkthdr_adjustlen(m
, - ETHER_HDR_LEN
);
4615 (void) ifnet_stat_increment_in(bifp
, 1, mbuf_pkthdr_len(m
), 0);
4620 if (if_bridge_debug
& BR_DBGF_INPUT
)
4621 printf("%s: %s packet for bridge\n", __func__
,
4622 sc
->sc_ifp
->if_xname
);
4623 #endif /* BRIDGE_DEBUG */
4625 dlil_input_packet_list(bifp
, m
);
4627 return (EJUSTRETURN
);
4631 * if the destination of the packet is for the MAC address of
4632 * the member interface itself, then we don't need to forward
4633 * it -- just pass it back. Note that it'll likely just be
4634 * dropped by the stack, but if something else is bound to
4635 * the interface directly (for example, the wireless stats
4636 * protocol -- although that actually uses BPF right now),
4637 * then it will consume the packet
4639 * ALSO, note that we do this check AFTER checking for the
4640 * bridge's own MAC address, because the bridge may be
4641 * using the SAME MAC address as one of its interfaces
4643 if (memcmp(eh
->ether_dhost
, IF_LLADDR(ifp
), ETHER_ADDR_LEN
) == 0) {
4645 #ifdef VERY_VERY_VERY_DIAGNOSTIC
4646 printf("%s: not forwarding packet bound for member "
4647 "interface\n", __func__
);
4653 /* Now check the all bridge members. */
4654 TAILQ_FOREACH(bif2
, &sc
->sc_iflist
, bif_next
) {
4655 GRAB_OUR_PACKETS(bif2
->bif_ifp
)
4658 #undef CARP_CHECK_WE_ARE_DST
4659 #undef CARP_CHECK_WE_ARE_SRC
4660 #undef GRAB_OUR_PACKETS
4663 * Perform the bridge forwarding function.
4665 * Note that bridge_forward calls BRIDGE_UNLOCK
4667 bridge_forward(sc
, bif
, m
);
4669 return (EJUSTRETURN
);
4675 * Send a frame to all interfaces that are members of
4676 * the bridge, except for the one on which the packet
4679 * NOTE: Releases the lock on return.
4682 bridge_broadcast(struct bridge_softc
*sc
, struct ifnet
*src_if
,
4683 struct mbuf
*m
, int runfilt
)
4686 #pragma unused(runfilt)
4688 struct bridge_iflist
*dbif
, *sbif
;
4690 struct ifnet
*dst_if
;
4691 int error
= 0, used
= 0;
4693 sbif
= bridge_lookup_member_if(sc
, src_if
);
4695 BRIDGE_LOCK2REF(sc
, error
);
4702 /* Filter on the bridge interface before broadcasting */
4703 if (runfilt
&& (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
)) {
4704 if (bridge_pfil(&m
, sc
->sc_ifp
, NULL
, PFIL_OUT
) != 0)
4709 #endif /* PFIL_HOOKS */
4711 TAILQ_FOREACH(dbif
, &sc
->sc_iflist
, bif_next
) {
4712 dst_if
= dbif
->bif_ifp
;
4713 if (dst_if
== src_if
)
4716 /* Private segments can not talk to each other */
4718 (sbif
->bif_ifflags
& dbif
->bif_ifflags
& IFBIF_PRIVATE
))
4721 if ((dbif
->bif_ifflags
& IFBIF_STP
) &&
4722 dbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
)
4725 if ((dbif
->bif_ifflags
& IFBIF_DISCOVER
) == 0 &&
4726 (m
->m_flags
& (M_BCAST
|M_MCAST
)) == 0)
4729 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0)
4732 if (!(dbif
->bif_flags
& BIFF_MEDIA_ACTIVE
)) {
4736 if (TAILQ_NEXT(dbif
, bif_next
) == NULL
) {
4740 mc
= m_dup(m
, M_DONTWAIT
);
4742 (void) ifnet_stat_increment_out(sc
->sc_ifp
,
4750 * Filter on the output interface. Pass a NULL bridge interface
4751 * pointer so we do not redundantly filter on the bridge for
4752 * each interface we broadcast on.
4755 (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
)) {
4757 /* Keep the layer3 header aligned */
4758 int i
= min(mc
->m_pkthdr
.len
, max_protohdr
);
4759 mc
= m_copyup(mc
, i
, ETHER_ALIGN
);
4761 (void) ifnet_stat_increment_out(
4762 sc
->sc_ifp
, 0, 0, 1);
4766 if (bridge_pfil(&mc
, NULL
, dst_if
, PFIL_OUT
) != 0)
4771 #endif /* PFIL_HOOKS */
4773 (void) bridge_enqueue(sc
, dst_if
, mc
);
4780 #endif /* PFIL_HOOKS */
4788 * Duplicate a packet out one or more interfaces that are in span mode,
4789 * the original mbuf is unmodified.
4792 bridge_span(struct bridge_softc
*sc
, struct mbuf
*m
)
4794 struct bridge_iflist
*bif
;
4795 struct ifnet
*dst_if
;
4798 if (TAILQ_EMPTY(&sc
->sc_spanlist
))
4801 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
) {
4802 dst_if
= bif
->bif_ifp
;
4804 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0)
4807 mc
= m_copypacket(m
, M_DONTWAIT
);
4809 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 0, 0, 1);
4813 (void) bridge_enqueue(sc
, dst_if
, mc
);
4821 * Add a bridge routing entry.
4824 bridge_rtupdate(struct bridge_softc
*sc
, const uint8_t *dst
, uint16_t vlan
,
4825 struct bridge_iflist
*bif
, int setflags
, uint8_t flags
)
4827 struct bridge_rtnode
*brt
;
4830 BRIDGE_LOCK_ASSERT_HELD(sc
);
4831 ASSERT(bridge_in_bsd_mode(sc
));
4833 /* Check the source address is valid and not multicast. */
4834 if (ETHER_IS_MULTICAST(dst
) ||
4835 (dst
[0] == 0 && dst
[1] == 0 && dst
[2] == 0 &&
4836 dst
[3] == 0 && dst
[4] == 0 && dst
[5] == 0) != 0)
4840 /* 802.1p frames map to vlan 1 */
4845 * A route for this destination might already exist. If so,
4846 * update it, otherwise create a new one.
4848 if ((brt
= bridge_rtnode_lookup(sc
, dst
, vlan
)) == NULL
) {
4849 if (sc
->sc_brtcnt
>= sc
->sc_brtmax
) {
4850 sc
->sc_brtexceeded
++;
4853 /* Check per interface address limits (if enabled) */
4854 if (bif
->bif_addrmax
&& bif
->bif_addrcnt
>= bif
->bif_addrmax
) {
4855 bif
->bif_addrexceeded
++;
4860 * Allocate a new bridge forwarding node, and
4861 * initialize the expiration time and Ethernet
4864 brt
= zalloc_noblock(bridge_rtnode_pool
);
4867 bzero(brt
, sizeof(struct bridge_rtnode
));
4869 if (bif
->bif_ifflags
& IFBIF_STICKY
)
4870 brt
->brt_flags
= IFBAF_STICKY
;
4872 brt
->brt_flags
= IFBAF_DYNAMIC
;
4874 memcpy(brt
->brt_addr
, dst
, ETHER_ADDR_LEN
);
4875 brt
->brt_vlan
= vlan
;
4878 if ((error
= bridge_rtnode_insert(sc
, brt
)) != 0) {
4879 zfree(bridge_rtnode_pool
, brt
);
4885 if (if_bridge_debug
& BR_DBGF_RT_TABLE
)
4886 printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
4887 "on %s count %u hashsize %u\n", __func__
,
4888 dst
[0], dst
[1], dst
[2], dst
[3], dst
[4], dst
[5],
4889 sc
->sc_ifp
->if_xname
, sc
->sc_brtcnt
,
4890 sc
->sc_rthash_size
);
4894 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
&&
4895 brt
->brt_dst
!= bif
) {
4896 brt
->brt_dst
->bif_addrcnt
--;
4898 brt
->brt_dst
->bif_addrcnt
++;
4901 if ((flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
4904 now
= (unsigned long) net_uptime();
4905 brt
->brt_expire
= now
+ sc
->sc_brttimeout
;
4908 brt
->brt_flags
= flags
;
4917 * Lookup the destination interface for an address.
4919 static struct ifnet
*
4920 bridge_rtlookup(struct bridge_softc
*sc
, const uint8_t *addr
, uint16_t vlan
)
4922 struct bridge_rtnode
*brt
;
4924 BRIDGE_LOCK_ASSERT_HELD(sc
);
4926 if ((brt
= bridge_rtnode_lookup(sc
, addr
, vlan
)) == NULL
)
4929 return (brt
->brt_ifp
);
4935 * Trim the routine table so that we have a number
4936 * of routing entries less than or equal to the
4940 bridge_rttrim(struct bridge_softc
*sc
)
4942 struct bridge_rtnode
*brt
, *nbrt
;
4944 BRIDGE_LOCK_ASSERT_HELD(sc
);
4946 /* Make sure we actually need to do this. */
4947 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
)
4950 /* Force an aging cycle; this might trim enough addresses. */
4952 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
)
4955 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
4956 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
4957 bridge_rtnode_destroy(sc
, brt
);
4958 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
)
4965 * bridge_aging_timer:
4967 * Aging periodic timer for the bridge routing table.
4970 bridge_aging_timer(struct bridge_softc
*sc
)
4972 BRIDGE_LOCK_ASSERT_HELD(sc
);
4976 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) &&
4977 (sc
->sc_flags
& SCF_DETACHING
) == 0) {
4978 sc
->sc_aging_timer
.bdc_sc
= sc
;
4979 sc
->sc_aging_timer
.bdc_func
= bridge_aging_timer
;
4980 sc
->sc_aging_timer
.bdc_ts
.tv_sec
= bridge_rtable_prune_period
;
4981 bridge_schedule_delayed_call(&sc
->sc_aging_timer
);
4988 * Perform an aging cycle.
4991 bridge_rtage(struct bridge_softc
*sc
)
4993 struct bridge_rtnode
*brt
, *nbrt
;
4996 BRIDGE_LOCK_ASSERT_HELD(sc
);
4998 now
= (unsigned long) net_uptime();
5000 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5001 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5002 if (now
>= brt
->brt_expire
)
5003 bridge_rtnode_destroy(sc
, brt
);
5011 * Remove all dynamic addresses from the bridge.
5014 bridge_rtflush(struct bridge_softc
*sc
, int full
)
5016 struct bridge_rtnode
*brt
, *nbrt
;
5018 BRIDGE_LOCK_ASSERT_HELD(sc
);
5020 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5021 if (full
|| (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
)
5022 bridge_rtnode_destroy(sc
, brt
);
5029 * Remove an address from the table.
5032 bridge_rtdaddr(struct bridge_softc
*sc
, const uint8_t *addr
, uint16_t vlan
)
5034 struct bridge_rtnode
*brt
;
5037 BRIDGE_LOCK_ASSERT_HELD(sc
);
5040 * If vlan is zero then we want to delete for all vlans so the lookup
5041 * may return more than one.
5043 while ((brt
= bridge_rtnode_lookup(sc
, addr
, vlan
)) != NULL
) {
5044 bridge_rtnode_destroy(sc
, brt
);
5048 return (found
? 0 : ENOENT
);
5054 * Delete routes to a speicifc member interface.
5057 bridge_rtdelete(struct bridge_softc
*sc
, struct ifnet
*ifp
, int full
)
5059 struct bridge_rtnode
*brt
, *nbrt
;
5061 BRIDGE_LOCK_ASSERT_HELD(sc
);
5063 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5064 if (brt
->brt_ifp
== ifp
&& (full
||
5065 (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
))
5066 bridge_rtnode_destroy(sc
, brt
);
5071 * bridge_rtable_init:
5073 * Initialize the route table for this bridge.
5076 bridge_rtable_init(struct bridge_softc
*sc
)
5080 ASSERT(bridge_in_bsd_mode(sc
));
5082 sc
->sc_rthash
= _MALLOC(sizeof (*sc
->sc_rthash
) * BRIDGE_RTHASH_SIZE
,
5083 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5084 if (sc
->sc_rthash
== NULL
) {
5085 printf("%s: no memory\n", __func__
);
5088 sc
->sc_rthash_size
= BRIDGE_RTHASH_SIZE
;
5090 for (i
= 0; i
< sc
->sc_rthash_size
; i
++)
5091 LIST_INIT(&sc
->sc_rthash
[i
]);
5093 sc
->sc_rthash_key
= RandomULong();
5095 LIST_INIT(&sc
->sc_rtlist
);
5101 * bridge_rthash_delayed_resize:
5103 * Resize the routing table hash on a delayed thread call.
5106 bridge_rthash_delayed_resize(struct bridge_softc
*sc
)
5108 u_int32_t new_rthash_size
;
5109 struct _bridge_rtnode_list
*new_rthash
= NULL
;
5110 struct _bridge_rtnode_list
*old_rthash
= NULL
;
5112 struct bridge_rtnode
*brt
;
5115 BRIDGE_LOCK_ASSERT_HELD(sc
);
5118 * Four entries per hash bucket is our ideal load factor
5120 if (sc
->sc_brtcnt
< sc
->sc_rthash_size
* 4)
5124 * Doubling the number of hash buckets may be too simplistic
5125 * especially when facing a spike of new entries
5127 new_rthash_size
= sc
->sc_rthash_size
* 2;
5129 sc
->sc_flags
|= SCF_RESIZING
;
5132 new_rthash
= _MALLOC(sizeof (*sc
->sc_rthash
) * new_rthash_size
,
5133 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5136 sc
->sc_flags
&= ~SCF_RESIZING
;
5138 if (new_rthash
== NULL
) {
5142 if ((sc
->sc_flags
& SCF_DETACHING
)) {
5147 * Fail safe from here on
5149 old_rthash
= sc
->sc_rthash
;
5150 sc
->sc_rthash
= new_rthash
;
5151 sc
->sc_rthash_size
= new_rthash_size
;
5154 * Get a new key to force entries to be shuffled around to reduce
5155 * the likelihood they will land in the same buckets
5157 sc
->sc_rthash_key
= RandomULong();
5159 for (i
= 0; i
< sc
->sc_rthash_size
; i
++)
5160 LIST_INIT(&sc
->sc_rthash
[i
]);
5162 LIST_FOREACH(brt
, &sc
->sc_rtlist
, brt_list
) {
5163 LIST_REMOVE(brt
, brt_hash
);
5164 (void) bridge_rtnode_hash(sc
, brt
);
5169 if (if_bridge_debug
& BR_DBGF_RT_TABLE
)
5170 printf("%s: %s new size %u\n", __func__
,
5171 sc
->sc_ifp
->if_xname
, sc
->sc_rthash_size
);
5172 #endif /* BRIDGE_DEBUG */
5174 _FREE(old_rthash
, M_DEVBUF
);
5177 printf("%s: %s failed %d\n", __func__
,
5178 sc
->sc_ifp
->if_xname
, error
);
5179 #endif /* BRIDGE_DEBUG */
5180 if (new_rthash
!= NULL
)
5181 _FREE(new_rthash
, M_DEVBUF
);
5186 * Resize the number of hash buckets based on the load factor
5187 * Currently only grow
5188 * Failing to resize the hash table is not fatal
5191 bridge_rthash_resize(struct bridge_softc
*sc
)
5193 BRIDGE_LOCK_ASSERT_HELD(sc
);
5195 if ((sc
->sc_flags
& SCF_DETACHING
) || (sc
->sc_flags
& SCF_RESIZING
))
5199 * Four entries per hash bucket is our ideal load factor
5201 if (sc
->sc_brtcnt
< sc
->sc_rthash_size
* 4)
5204 * Hard limit on the size of the routing hash table
5206 if (sc
->sc_rthash_size
>= bridge_rtable_hash_size_max
)
5209 sc
->sc_resize_call
.bdc_sc
= sc
;
5210 sc
->sc_resize_call
.bdc_func
= bridge_rthash_delayed_resize
;
5211 bridge_schedule_delayed_call(&sc
->sc_resize_call
);
5215 * bridge_rtable_fini:
5217 * Deconstruct the route table for this bridge.
5220 bridge_rtable_fini(struct bridge_softc
*sc
)
5222 KASSERT(sc
->sc_brtcnt
== 0,
5223 ("%s: %d bridge routes referenced", __func__
, sc
->sc_brtcnt
));
5224 if (sc
->sc_rthash
) {
5225 _FREE(sc
->sc_rthash
, M_DEVBUF
);
5226 sc
->sc_rthash
= NULL
;
5231 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
5232 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
5234 #define mix(a, b, c) \
5236 a -= b; a -= c; a ^= (c >> 13); \
5237 b -= c; b -= a; b ^= (a << 8); \
5238 c -= a; c -= b; c ^= (b >> 13); \
5239 a -= b; a -= c; a ^= (c >> 12); \
5240 b -= c; b -= a; b ^= (a << 16); \
5241 c -= a; c -= b; c ^= (b >> 5); \
5242 a -= b; a -= c; a ^= (c >> 3); \
5243 b -= c; b -= a; b ^= (a << 10); \
5244 c -= a; c -= b; c ^= (b >> 15); \
5245 } while (/*CONSTCOND*/0)
5247 static __inline
uint32_t
5248 bridge_rthash(struct bridge_softc
*sc
, const uint8_t *addr
)
5250 uint32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= sc
->sc_rthash_key
;
5261 return (c
& BRIDGE_RTHASH_MASK(sc
));
5267 bridge_rtnode_addr_cmp(const uint8_t *a
, const uint8_t *b
)
5271 for (i
= 0, d
= 0; i
< ETHER_ADDR_LEN
&& d
== 0; i
++) {
5272 d
= ((int)a
[i
]) - ((int)b
[i
]);
5279 * bridge_rtnode_lookup:
5281 * Look up a bridge route node for the specified destination. Compare the
5282 * vlan id or if zero then just return the first match.
5284 static struct bridge_rtnode
*
5285 bridge_rtnode_lookup(struct bridge_softc
*sc
, const uint8_t *addr
,
5288 struct bridge_rtnode
*brt
;
5292 BRIDGE_LOCK_ASSERT_HELD(sc
);
5293 ASSERT(bridge_in_bsd_mode(sc
));
5295 hash
= bridge_rthash(sc
, addr
);
5296 LIST_FOREACH(brt
, &sc
->sc_rthash
[hash
], brt_hash
) {
5297 dir
= bridge_rtnode_addr_cmp(addr
, brt
->brt_addr
);
5298 if (dir
== 0 && (brt
->brt_vlan
== vlan
|| vlan
== 0))
5308 * bridge_rtnode_hash:
5310 * Insert the specified bridge node into the route hash table.
5311 * This is used when adding a new node or to rehash when resizing
5315 bridge_rtnode_hash(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5317 struct bridge_rtnode
*lbrt
;
5321 BRIDGE_LOCK_ASSERT_HELD(sc
);
5323 hash
= bridge_rthash(sc
, brt
->brt_addr
);
5325 lbrt
= LIST_FIRST(&sc
->sc_rthash
[hash
]);
5327 LIST_INSERT_HEAD(&sc
->sc_rthash
[hash
], brt
, brt_hash
);
5332 dir
= bridge_rtnode_addr_cmp(brt
->brt_addr
, lbrt
->brt_addr
);
5333 if (dir
== 0 && brt
->brt_vlan
== lbrt
->brt_vlan
) {
5335 if (if_bridge_debug
& BR_DBGF_RT_TABLE
)
5336 printf("%s: %s EEXIST "
5337 "%02x:%02x:%02x:%02x:%02x:%02x\n",
5338 __func__
, sc
->sc_ifp
->if_xname
,
5339 brt
->brt_addr
[0], brt
->brt_addr
[1],
5340 brt
->brt_addr
[2], brt
->brt_addr
[3],
5341 brt
->brt_addr
[4], brt
->brt_addr
[5]);
5346 LIST_INSERT_BEFORE(lbrt
, brt
, brt_hash
);
5349 if (LIST_NEXT(lbrt
, brt_hash
) == NULL
) {
5350 LIST_INSERT_AFTER(lbrt
, brt
, brt_hash
);
5353 lbrt
= LIST_NEXT(lbrt
, brt_hash
);
5354 } while (lbrt
!= NULL
);
5357 if (if_bridge_debug
& BR_DBGF_RT_TABLE
)
5358 printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
5359 __func__
, sc
->sc_ifp
->if_xname
,
5360 brt
->brt_addr
[0], brt
->brt_addr
[1], brt
->brt_addr
[2],
5361 brt
->brt_addr
[3], brt
->brt_addr
[4], brt
->brt_addr
[5]);
5369 * bridge_rtnode_insert:
5371 * Insert the specified bridge node into the route table. We
5372 * assume the entry is not already in the table.
5375 bridge_rtnode_insert(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5379 error
= bridge_rtnode_hash(sc
, brt
);
5383 LIST_INSERT_HEAD(&sc
->sc_rtlist
, brt
, brt_list
);
5386 bridge_rthash_resize(sc
);
5392 * bridge_rtnode_destroy:
5394 * Destroy a bridge rtnode.
5397 bridge_rtnode_destroy(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5399 BRIDGE_LOCK_ASSERT_HELD(sc
);
5401 LIST_REMOVE(brt
, brt_hash
);
5403 LIST_REMOVE(brt
, brt_list
);
5405 brt
->brt_dst
->bif_addrcnt
--;
5406 zfree(bridge_rtnode_pool
, brt
);
5411 * bridge_rtable_expire:
5413 * Set the expiry time for all routes on an interface.
5416 bridge_rtable_expire(struct ifnet
*ifp
, int age
)
5418 struct bridge_softc
*sc
= ifp
->if_bridge
;
5419 struct bridge_rtnode
*brt
;
5424 * If the age is zero then flush, otherwise set all the expiry times to
5425 * age for the interface
5428 bridge_rtdelete(sc
, ifp
, IFBF_FLUSHDYN
);
5432 now
= (unsigned long) net_uptime();
5434 LIST_FOREACH(brt
, &sc
->sc_rtlist
, brt_list
) {
5435 /* Cap the expiry time to 'age' */
5436 if (brt
->brt_ifp
== ifp
&&
5437 brt
->brt_expire
> now
+ age
&&
5438 (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
)
5439 brt
->brt_expire
= now
+ age
;
5446 * bridge_state_change:
5448 * Callback from the bridgestp code when a port changes states.
5451 bridge_state_change(struct ifnet
*ifp
, int state
)
5453 struct bridge_softc
*sc
= ifp
->if_bridge
;
5454 static const char *stpstates
[] = {
5464 log(LOG_NOTICE
, "%s: state changed to %s on %s\n",
5465 sc
->sc_ifp
->if_xname
,
5466 stpstates
[state
], ifp
->if_xname
);
5468 #endif /* BRIDGESTP */
5472 * Send bridge packets through pfil if they are one of the types pfil can deal
5473 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
5474 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
5478 bridge_pfil(struct mbuf
**mp
, struct ifnet
*bifp
, struct ifnet
*ifp
, int dir
)
5480 int snap
, error
, i
, hlen
;
5481 struct ether_header
*eh1
, eh2
;
5482 struct ip_fw_args args
;
5485 u_int16_t ether_type
;
5488 error
= -1; /* Default error if not error == 0 */
5491 /* we may return with the IP fields swapped, ensure its not shared */
5492 KASSERT(M_WRITABLE(*mp
), ("%s: modifying a shared mbuf", __func__
));
5495 if (pfil_bridge
== 0 && pfil_member
== 0 && pfil_ipfw
== 0)
5496 return (0); /* filtering is disabled */
5498 i
= min((*mp
)->m_pkthdr
.len
, max_protohdr
);
5499 if ((*mp
)->m_len
< i
) {
5500 *mp
= m_pullup(*mp
, i
);
5502 printf("%s: m_pullup failed\n", __func__
);
5507 eh1
= mtod(*mp
, struct ether_header
*);
5508 ether_type
= ntohs(eh1
->ether_type
);
5511 * Check for SNAP/LLC.
5513 if (ether_type
< ETHERMTU
) {
5514 struct llc
*llc2
= (struct llc
*)(eh1
+ 1);
5516 if ((*mp
)->m_len
>= ETHER_HDR_LEN
+ 8 &&
5517 llc2
->llc_dsap
== LLC_SNAP_LSAP
&&
5518 llc2
->llc_ssap
== LLC_SNAP_LSAP
&&
5519 llc2
->llc_control
== LLC_UI
) {
5520 ether_type
= htons(llc2
->llc_un
.type_snap
.ether_type
);
5526 * If we're trying to filter bridge traffic, don't look at anything
5527 * other than IP and ARP traffic. If the filter doesn't understand
5528 * IPv6, don't allow IPv6 through the bridge either. This is lame
5529 * since if we really wanted, say, an AppleTalk filter, we are hosed,
5530 * but of course we don't have an AppleTalk filter to begin with.
5531 * (Note that since pfil doesn't understand ARP it will pass *ALL*
5534 switch (ether_type
) {
5536 case ETHERTYPE_REVARP
:
5537 if (pfil_ipfw_arp
== 0)
5538 return (0); /* Automatically pass */
5543 case ETHERTYPE_IPV6
:
5548 * Check to see if the user wants to pass non-ip
5549 * packets, these will not be checked by pfil(9) and
5550 * passed unconditionally so the default is to drop.
5556 /* Strip off the Ethernet header and keep a copy. */
5557 m_copydata(*mp
, 0, ETHER_HDR_LEN
, (caddr_t
)&eh2
);
5558 m_adj(*mp
, ETHER_HDR_LEN
);
5560 /* Strip off snap header, if present */
5562 m_copydata(*mp
, 0, sizeof (struct llc
), (caddr_t
)&llc1
);
5563 m_adj(*mp
, sizeof (struct llc
));
5567 * Check the IP header for alignment and errors
5569 if (dir
== PFIL_IN
) {
5570 switch (ether_type
) {
5572 error
= bridge_ip_checkbasic(mp
);
5575 case ETHERTYPE_IPV6
:
5576 error
= bridge_ip6_checkbasic(mp
);
5586 if (IPFW_LOADED
&& pfil_ipfw
!= 0 && dir
== PFIL_OUT
&& ifp
!= NULL
) {
5588 args
.rule
= ip_dn_claim_rule(*mp
);
5589 if (args
.rule
!= NULL
&& fw_one_pass
)
5590 goto ipfwpass
; /* packet already partially processed */
5594 args
.next_hop
= NULL
;
5596 args
.inp
= NULL
; /* used by ipfw uid/gid/jail rules */
5597 i
= ip_fw_chk_ptr(&args
);
5603 if (DUMMYNET_LOADED
&& (i
== IP_FW_DUMMYNET
)) {
5605 /* put the Ethernet header back on */
5606 M_PREPEND(*mp
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5609 bcopy(&eh2
, mtod(*mp
, caddr_t
), ETHER_HDR_LEN
);
5612 * Pass the pkt to dummynet, which consumes it. The
5613 * packet will return to us via bridge_dummynet().
5616 ip_dn_io_ptr(mp
, DN_TO_IFB_FWD
, &args
, DN_CLIENT_IPFW
);
5620 if (i
!= IP_FW_PASS
) /* drop */
5628 * Run the packet through pfil
5630 switch (ether_type
) {
5633 * before calling the firewall, swap fields the same as
5634 * IP does. here we assume the header is contiguous
5636 ip
= mtod(*mp
, struct ip
*);
5638 ip
->ip_len
= ntohs(ip
->ip_len
);
5639 ip
->ip_off
= ntohs(ip
->ip_off
);
5642 * Run pfil on the member interface and the bridge, both can
5643 * be skipped by clearing pfil_member or pfil_bridge.
5646 * in_if -> bridge_if -> out_if
5648 if (pfil_bridge
&& dir
== PFIL_OUT
&& bifp
!= NULL
)
5649 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, bifp
,
5652 if (*mp
== NULL
|| error
!= 0) /* filter may consume */
5655 if (pfil_member
&& ifp
!= NULL
)
5656 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, ifp
,
5659 if (*mp
== NULL
|| error
!= 0) /* filter may consume */
5662 if (pfil_bridge
&& dir
== PFIL_IN
&& bifp
!= NULL
)
5663 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, bifp
,
5666 if (*mp
== NULL
|| error
!= 0) /* filter may consume */
5669 /* check if we need to fragment the packet */
5670 if (pfil_member
&& ifp
!= NULL
&& dir
== PFIL_OUT
) {
5671 i
= (*mp
)->m_pkthdr
.len
;
5672 if (i
> ifp
->if_mtu
) {
5673 error
= bridge_fragment(ifp
, *mp
, &eh2
, snap
,
5679 /* Recalculate the ip checksum and restore byte ordering */
5680 ip
= mtod(*mp
, struct ip
*);
5681 hlen
= ip
->ip_hl
<< 2;
5682 if (hlen
< sizeof (struct ip
))
5684 if (hlen
> (*mp
)->m_len
) {
5685 if ((*mp
= m_pullup(*mp
, hlen
)) == 0)
5687 ip
= mtod(*mp
, struct ip
*);
5691 ip
->ip_len
= htons(ip
->ip_len
);
5692 ip
->ip_off
= htons(ip
->ip_off
);
5694 if (hlen
== sizeof (struct ip
))
5695 ip
->ip_sum
= in_cksum_hdr(ip
);
5697 ip
->ip_sum
= in_cksum(*mp
, hlen
);
5701 case ETHERTYPE_IPV6
:
5702 if (pfil_bridge
&& dir
== PFIL_OUT
&& bifp
!= NULL
)
5703 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, bifp
,
5706 if (*mp
== NULL
|| error
!= 0) /* filter may consume */
5709 if (pfil_member
&& ifp
!= NULL
)
5710 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, ifp
,
5713 if (*mp
== NULL
|| error
!= 0) /* filter may consume */
5716 if (pfil_bridge
&& dir
== PFIL_IN
&& bifp
!= NULL
)
5717 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, bifp
,
5734 * Finally, put everything back the way it was and return
5737 M_PREPEND(*mp
, sizeof (struct llc
), M_DONTWAIT
, 0);
5740 bcopy(&llc1
, mtod(*mp
, caddr_t
), sizeof (struct llc
));
5743 M_PREPEND(*mp
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5746 bcopy(&eh2
, mtod(*mp
, caddr_t
), ETHER_HDR_LEN
);
5757 * Perform basic checks on header size since
5758 * pfil assumes ip_input has already processed
5759 * it for it. Cut-and-pasted from ip_input.c.
5760 * Given how simple the IPv6 version is,
5761 * does the IPv4 version really need to be
5764 * XXX Should we update ipstat here, or not?
5765 * XXX Right now we update ipstat but not
5769 bridge_ip_checkbasic(struct mbuf
**mp
)
5771 struct mbuf
*m
= *mp
;
5779 if (IP_HDR_ALIGNED_P(mtod(m
, caddr_t
)) == 0) {
5780 /* max_linkhdr is already rounded up to nearest 4-byte */
5781 if ((m
= m_copyup(m
, sizeof (struct ip
),
5782 max_linkhdr
)) == NULL
) {
5783 /* XXXJRT new stat, please */
5784 ipstat
.ips_toosmall
++;
5787 } else if (__predict_false(m
->m_len
< sizeof (struct ip
))) {
5788 if ((m
= m_pullup(m
, sizeof (struct ip
))) == NULL
) {
5789 ipstat
.ips_toosmall
++;
5793 ip
= mtod(m
, struct ip
*);
5794 if (ip
== NULL
) goto bad
;
5796 if (ip
->ip_v
!= IPVERSION
) {
5797 ipstat
.ips_badvers
++;
5800 hlen
= ip
->ip_hl
<< 2;
5801 if (hlen
< sizeof (struct ip
)) { /* minimum header length */
5802 ipstat
.ips_badhlen
++;
5805 if (hlen
> m
->m_len
) {
5806 if ((m
= m_pullup(m
, hlen
)) == 0) {
5807 ipstat
.ips_badhlen
++;
5810 ip
= mtod(m
, struct ip
*);
5811 if (ip
== NULL
) goto bad
;
5814 if (m
->m_pkthdr
.csum_flags
& CSUM_IP_CHECKED
) {
5815 sum
= !(m
->m_pkthdr
.csum_flags
& CSUM_IP_VALID
);
5817 if (hlen
== sizeof (struct ip
)) {
5818 sum
= in_cksum_hdr(ip
);
5820 sum
= in_cksum(m
, hlen
);
5824 ipstat
.ips_badsum
++;
5828 /* Retrieve the packet length. */
5829 len
= ntohs(ip
->ip_len
);
5832 * Check for additional length bogosity
5835 ipstat
.ips_badlen
++;
5840 * Check that the amount of data in the buffers
5841 * is as at least much as the IP header would have us expect.
5842 * Drop packet if shorter than we expect.
5844 if (m
->m_pkthdr
.len
< len
) {
5845 ipstat
.ips_tooshort
++;
5849 /* Checks out, proceed */
5860 * Same as above, but for IPv6.
5861 * Cut-and-pasted from ip6_input.c.
5862 * XXX Should we update ip6stat, or not?
5865 bridge_ip6_checkbasic(struct mbuf
**mp
)
5867 struct mbuf
*m
= *mp
;
5868 struct ip6_hdr
*ip6
;
5871 * If the IPv6 header is not aligned, slurp it up into a new
5872 * mbuf with space for link headers, in the event we forward
5873 * it. Otherwise, if it is aligned, make sure the entire base
5874 * IPv6 header is in the first mbuf of the chain.
5876 if (IP6_HDR_ALIGNED_P(mtod(m
, caddr_t
)) == 0) {
5877 struct ifnet
*inifp
= m
->m_pkthdr
.rcvif
;
5878 /* max_linkhdr is already rounded up to nearest 4-byte */
5879 if ((m
= m_copyup(m
, sizeof (struct ip6_hdr
),
5880 max_linkhdr
)) == NULL
) {
5881 /* XXXJRT new stat, please */
5882 ip6stat
.ip6s_toosmall
++;
5883 in6_ifstat_inc(inifp
, ifs6_in_hdrerr
);
5886 } else if (__predict_false(m
->m_len
< sizeof (struct ip6_hdr
))) {
5887 struct ifnet
*inifp
= m
->m_pkthdr
.rcvif
;
5888 if ((m
= m_pullup(m
, sizeof (struct ip6_hdr
))) == NULL
) {
5889 ip6stat
.ip6s_toosmall
++;
5890 in6_ifstat_inc(inifp
, ifs6_in_hdrerr
);
5895 ip6
= mtod(m
, struct ip6_hdr
*);
5897 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) != IPV6_VERSION
) {
5898 ip6stat
.ip6s_badvers
++;
5899 in6_ifstat_inc(m
->m_pkthdr
.rcvif
, ifs6_in_hdrerr
);
5903 /* Checks out, proceed */
5916 * Return a fragmented mbuf chain.
5919 bridge_fragment(struct ifnet
*ifp
, struct mbuf
*m
, struct ether_header
*eh
,
5920 int snap
, struct llc
*llc
)
5926 if (m
->m_len
< sizeof (struct ip
) &&
5927 (m
= m_pullup(m
, sizeof (struct ip
))) == NULL
)
5929 ip
= mtod(m
, struct ip
*);
5931 error
= ip_fragment(ip
, &m
, ifp
->if_mtu
, ifp
->if_hwassist
,
5936 /* walk the chain and re-add the Ethernet header */
5937 for (m0
= m
; m0
; m0
= m0
->m_nextpkt
) {
5940 M_PREPEND(m0
, sizeof (struct llc
), M_DONTWAIT
, 0);
5945 bcopy(llc
, mtod(m0
, caddr_t
),
5946 sizeof (struct llc
));
5948 M_PREPEND(m0
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5953 bcopy(eh
, mtod(m0
, caddr_t
), ETHER_HDR_LEN
);
5960 ipstat
.ips_fragmented
++;
5969 #endif /* PFIL_HOOKS */
5972 * bridge_set_bpf_tap:
5974 * Sets ups the BPF callbacks.
5977 bridge_set_bpf_tap(ifnet_t ifp
, bpf_tap_mode mode
, bpf_packet_func bpf_callback
)
5979 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
5982 if (sc
== NULL
|| (sc
->sc_flags
& SCF_DETACHING
)) {
5985 ASSERT(bridge_in_bsd_mode(sc
));
5987 case BPF_TAP_DISABLE
:
5988 sc
->sc_bpf_input
= sc
->sc_bpf_output
= NULL
;
5992 sc
->sc_bpf_input
= bpf_callback
;
5995 case BPF_TAP_OUTPUT
:
5996 sc
->sc_bpf_output
= bpf_callback
;
5999 case BPF_TAP_INPUT_OUTPUT
:
6000 sc
->sc_bpf_input
= sc
->sc_bpf_output
= bpf_callback
;
6013 * Callback when interface has been detached.
6016 bridge_detach(ifnet_t ifp
)
6018 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6021 bstp_detach(&sc
->sc_stp
);
6022 #endif /* BRIDGESTP */
6024 if (bridge_in_bsd_mode(sc
)) {
6025 /* Tear down the routing table. */
6026 bridge_rtable_fini(sc
);
6029 lck_mtx_lock(&bridge_list_mtx
);
6030 LIST_REMOVE(sc
, sc_list
);
6031 lck_mtx_unlock(&bridge_list_mtx
);
6035 lck_mtx_destroy(&sc
->sc_mtx
, bridge_lock_grp
);
6037 _FREE(sc
, M_DEVBUF
);
6043 * Invoke the input BPF callback if enabled
6045 __private_extern__ errno_t
6046 bridge_bpf_input(ifnet_t ifp
, struct mbuf
*m
)
6048 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6050 ASSERT(bridge_in_bsd_mode(sc
));
6051 if (sc
->sc_bpf_input
) {
6052 if (mbuf_pkthdr_rcvif(m
) != ifp
) {
6053 printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__
,
6054 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m
)),
6055 (uint64_t)VM_KERNEL_ADDRPERM(ifp
));
6057 (*sc
->sc_bpf_input
)(ifp
, m
);
6063 * bridge_bpf_output:
6065 * Invoke the output BPF callback if enabled
6067 __private_extern__ errno_t
6068 bridge_bpf_output(ifnet_t ifp
, struct mbuf
*m
)
6070 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6072 ASSERT(bridge_in_bsd_mode(sc
));
6073 if (sc
->sc_bpf_output
) {
6074 (*sc
->sc_bpf_output
)(ifp
, m
);
6080 * bridge_link_event:
6082 * Report a data link event on an interface
6085 bridge_link_event(struct ifnet
*ifp
, u_int32_t event_code
)
6088 struct kern_event_msg header
;
6090 char if_name
[IFNAMSIZ
];
6094 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
)
6095 printf("%s: %s event_code %u - %s\n", __func__
, ifp
->if_xname
,
6096 event_code
, dlil_kev_dl_code_str(event_code
));
6097 #endif /* BRIDGE_DEBUG */
6099 bzero(&event
, sizeof (event
));
6100 event
.header
.total_size
= sizeof (event
);
6101 event
.header
.vendor_code
= KEV_VENDOR_APPLE
;
6102 event
.header
.kev_class
= KEV_NETWORK_CLASS
;
6103 event
.header
.kev_subclass
= KEV_DL_SUBCLASS
;
6104 event
.header
.event_code
= event_code
;
6105 event
.header
.event_data
[0] = ifnet_family(ifp
);
6106 event
.unit
= (u_int32_t
)ifnet_unit(ifp
);
6107 strlcpy(event
.if_name
, ifnet_name(ifp
), IFNAMSIZ
);
6108 ifnet_event(ifp
, &event
.header
);
6111 #define BRIDGE_HF_DROP(reason, func, line) { \
6112 bridge_hostfilter_stats.reason++; \
6113 if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
6114 printf("%s.%d" #reason, func, line); \
6119 * Make sure this is a DHCP or Bootp request that match the host filter
6122 bridge_dhcp_filter(struct bridge_iflist
*bif
, struct mbuf
*m
, size_t offset
)
6128 * Note: We use the dhcp structure because bootp structure definition
6129 * is larger and some vendors do not pad the request
6131 error
= mbuf_copydata(m
, offset
, sizeof(struct dhcp
), &dhcp
);
6133 BRIDGE_HF_DROP(brhf_dhcp_too_small
, __func__
, __LINE__
);
6136 if (dhcp
.dp_op
!= BOOTREQUEST
) {
6137 BRIDGE_HF_DROP(brhf_dhcp_bad_op
, __func__
, __LINE__
);
6141 * The hardware address must be an exact match
6143 if (dhcp
.dp_htype
!= ARPHRD_ETHER
) {
6144 BRIDGE_HF_DROP(brhf_dhcp_bad_htype
, __func__
, __LINE__
);
6147 if (dhcp
.dp_hlen
!= ETHER_ADDR_LEN
) {
6148 BRIDGE_HF_DROP(brhf_dhcp_bad_hlen
, __func__
, __LINE__
);
6151 if (bcmp(dhcp
.dp_chaddr
, bif
->bif_hf_hwsrc
,
6152 ETHER_ADDR_LEN
) != 0) {
6153 BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr
, __func__
, __LINE__
);
6157 * Client address must match the host address or be not specified
6159 if (dhcp
.dp_ciaddr
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
&&
6160 dhcp
.dp_ciaddr
.s_addr
!= INADDR_ANY
) {
6161 BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr
, __func__
, __LINE__
);
6170 bridge_host_filter(struct bridge_iflist
*bif
, struct mbuf
*m
)
6173 struct ether_header
*eh
;
6174 static struct in_addr inaddr_any
= { .s_addr
= INADDR_ANY
};
6177 * Check the Ethernet header is large enough
6179 if (mbuf_pkthdr_len(m
) < sizeof(struct ether_header
)) {
6180 BRIDGE_HF_DROP(brhf_ether_too_small
, __func__
, __LINE__
);
6183 if (mbuf_len(m
) < sizeof(struct ether_header
) &&
6184 mbuf_pullup(&m
, sizeof(struct ether_header
)) != 0) {
6185 BRIDGE_HF_DROP(brhf_ether_pullup_failed
, __func__
, __LINE__
);
6188 eh
= mtod(m
, struct ether_header
*);
6191 * Restrict the source hardware address
6193 if ((bif
->bif_flags
& BIFF_HF_HWSRC
) == 0 ||
6194 bcmp(eh
->ether_shost
, bif
->bif_hf_hwsrc
,
6195 ETHER_ADDR_LEN
) != 0) {
6196 BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr
, __func__
, __LINE__
);
6201 * Restrict Ethernet protocols to ARP and IP
6203 if (eh
->ether_type
== htons(ETHERTYPE_ARP
)) {
6204 struct ether_arp
*ea
;
6205 size_t minlen
= sizeof(struct ether_header
) +
6206 sizeof(struct ether_arp
);
6209 * Make the Ethernet and ARP headers contiguous
6211 if (mbuf_pkthdr_len(m
) < minlen
) {
6212 BRIDGE_HF_DROP(brhf_arp_too_small
, __func__
, __LINE__
);
6215 if (mbuf_len(m
) < minlen
&& mbuf_pullup(&m
, minlen
) != 0) {
6216 BRIDGE_HF_DROP(brhf_arp_pullup_failed
,
6217 __func__
, __LINE__
);
6221 * Verify this is an ethernet/ip arp
6223 eh
= mtod(m
, struct ether_header
*);
6224 ea
= (struct ether_arp
*)(eh
+ 1);
6225 if (ea
->arp_hrd
!= htons(ARPHRD_ETHER
)) {
6226 BRIDGE_HF_DROP(brhf_arp_bad_hw_type
,
6227 __func__
, __LINE__
);
6230 if (ea
->arp_pro
!= htons(ETHERTYPE_IP
)) {
6231 BRIDGE_HF_DROP(brhf_arp_bad_pro_type
,
6232 __func__
, __LINE__
);
6236 * Verify the address lengths are correct
6238 if (ea
->arp_hln
!= ETHER_ADDR_LEN
) {
6239 BRIDGE_HF_DROP(brhf_arp_bad_hw_len
, __func__
, __LINE__
);
6242 if (ea
->arp_pln
!= sizeof(struct in_addr
)) {
6243 BRIDGE_HF_DROP(brhf_arp_bad_pro_len
,
6244 __func__
, __LINE__
);
6249 * Allow only ARP request or ARP reply
6251 if (ea
->arp_op
!= htons(ARPOP_REQUEST
) &&
6252 ea
->arp_op
!= htons(ARPOP_REPLY
)) {
6253 BRIDGE_HF_DROP(brhf_arp_bad_op
, __func__
, __LINE__
);
6257 * Verify source hardware address matches
6259 if (bcmp(ea
->arp_sha
, bif
->bif_hf_hwsrc
,
6260 ETHER_ADDR_LEN
) != 0) {
6261 BRIDGE_HF_DROP(brhf_arp_bad_sha
, __func__
, __LINE__
);
6265 * Verify source protocol address:
6266 * May be null for an ARP probe
6268 if (bcmp(ea
->arp_spa
, &bif
->bif_hf_ipsrc
.s_addr
,
6269 sizeof(struct in_addr
)) != 0 &&
6270 bcmp(ea
->arp_spa
, &inaddr_any
,
6271 sizeof(struct in_addr
)) != 0) {
6272 BRIDGE_HF_DROP(brhf_arp_bad_spa
, __func__
, __LINE__
);
6278 bridge_hostfilter_stats
.brhf_arp_ok
+= 1;
6280 } else if (eh
->ether_type
== htons(ETHERTYPE_IP
)) {
6281 size_t minlen
= sizeof(struct ether_header
) + sizeof(struct ip
);
6286 * Make the Ethernet and IP headers contiguous
6288 if (mbuf_pkthdr_len(m
) < minlen
) {
6289 BRIDGE_HF_DROP(brhf_ip_too_small
, __func__
, __LINE__
);
6292 offset
= sizeof(struct ether_header
);
6293 error
= mbuf_copydata(m
, offset
, sizeof(struct ip
), &iphdr
);
6295 BRIDGE_HF_DROP(brhf_ip_too_small
, __func__
, __LINE__
);
6299 * Verify the source IP address
6301 if (iphdr
.ip_p
== IPPROTO_UDP
) {
6304 minlen
+= sizeof(struct udphdr
);
6305 if (mbuf_pkthdr_len(m
) < minlen
) {
6306 BRIDGE_HF_DROP(brhf_ip_too_small
,
6307 __func__
, __LINE__
);
6312 * Allow all zero addresses for DHCP requests
6314 if (iphdr
.ip_src
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
&&
6315 iphdr
.ip_src
.s_addr
!= INADDR_ANY
) {
6316 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
,
6317 __func__
, __LINE__
);
6320 offset
= sizeof(struct ether_header
) +
6321 (IP_VHL_HL(iphdr
.ip_vhl
) << 2);
6322 error
= mbuf_copydata(m
, offset
,
6323 sizeof(struct udphdr
), &udp
);
6325 BRIDGE_HF_DROP(brhf_ip_too_small
,
6326 __func__
, __LINE__
);
6330 * Either it's a Bootp/DHCP packet that we like or
6331 * it's a UDP packet from the host IP as source address
6333 if (udp
.uh_sport
== htons(IPPORT_BOOTPC
) &&
6334 udp
.uh_dport
== htons(IPPORT_BOOTPS
)) {
6335 minlen
+= sizeof(struct dhcp
);
6336 if (mbuf_pkthdr_len(m
) < minlen
) {
6337 BRIDGE_HF_DROP(brhf_ip_too_small
,
6338 __func__
, __LINE__
);
6341 offset
+= sizeof(struct udphdr
);
6342 error
= bridge_dhcp_filter(bif
, m
, offset
);
6345 } else if (iphdr
.ip_src
.s_addr
== INADDR_ANY
) {
6346 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
,
6347 __func__
, __LINE__
);
6350 } else if (iphdr
.ip_src
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
||
6351 bif
->bif_hf_ipsrc
.s_addr
== INADDR_ANY
) {
6353 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
, __func__
, __LINE__
);
6357 * Allow only boring IP protocols
6359 if (iphdr
.ip_p
!= IPPROTO_TCP
&&
6360 iphdr
.ip_p
!= IPPROTO_UDP
&&
6361 iphdr
.ip_p
!= IPPROTO_ICMP
&&
6362 iphdr
.ip_p
!= IPPROTO_ESP
&&
6363 iphdr
.ip_p
!= IPPROTO_AH
&&
6364 iphdr
.ip_p
!= IPPROTO_GRE
) {
6365 BRIDGE_HF_DROP(brhf_ip_bad_proto
, __func__
, __LINE__
);
6368 bridge_hostfilter_stats
.brhf_ip_ok
+= 1;
6371 BRIDGE_HF_DROP(brhf_bad_ether_type
, __func__
, __LINE__
);
6376 if (if_bridge_debug
& BR_DBGF_HOSTFILTER
) {
6378 printf_mbuf_data(m
, 0,
6379 sizeof(struct ether_header
) +