2 * Copyright (c) 2004-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
72 * 1. Redistributions of source code must retain the above copyright
73 * notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 * notice, this list of conditions and the following disclaimer in the
76 * documentation and/or other materials provided with the distribution.
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
94 * Network interface bridge support.
98 * - Currently only supports Ethernet-like interfaces (Ethernet,
99 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
100 * to bridge other types of interfaces (FDDI-FDDI, and maybe
101 * consider heterogenous bridges).
103 * - GIF isn't handled due to the lack of IPPROTO_ETHERIP support.
106 #include <sys/cdefs.h>
108 #define BRIDGE_DEBUG 1
110 #include <sys/param.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/protosw.h>
114 #include <sys/systm.h>
115 #include <sys/time.h>
116 #include <sys/socket.h> /* for net/if.h */
117 #include <sys/sockio.h>
118 #include <sys/kernel.h>
119 #include <sys/random.h>
120 #include <sys/syslog.h>
121 #include <sys/sysctl.h>
122 #include <sys/proc.h>
123 #include <sys/lock.h>
124 #include <sys/mcache.h>
126 #include <sys/kauth.h>
128 #include <kern/thread_call.h>
130 #include <libkern/libkern.h>
132 #include <kern/zalloc.h>
138 #include <net/if_dl.h>
139 #include <net/if_types.h>
140 #include <net/if_var.h>
141 #include <net/if_media.h>
142 #include <net/net_api_stats.h>
144 #include <netinet/in.h> /* for struct arpcom */
145 #include <netinet/in_systm.h>
146 #include <netinet/in_var.h>
148 #include <netinet/ip.h>
149 #include <netinet/ip_var.h>
151 #include <netinet/ip6.h>
152 #include <netinet6/ip6_var.h>
155 #include <netinet/ip_carp.h>
157 #include <netinet/if_ether.h> /* for struct arpcom */
158 #include <net/bridgestp.h>
159 #include <net/if_bridgevar.h>
160 #include <net/if_llc.h>
162 #include <net/if_vlan_var.h>
163 #endif /* NVLAN > 0 */
165 #include <net/if_ether.h>
166 #include <net/dlil.h>
167 #include <net/kpi_interfacefilter.h>
169 #include <net/route.h>
171 #include <netinet/ip_fw2.h>
172 #include <netinet/ip_dummynet.h>
173 #endif /* PFIL_HOOKS */
174 #include <dev/random/randomdev.h>
176 #include <netinet/bootp.h>
177 #include <netinet/dhcp.h>
181 #define BR_DBGF_LIFECYCLE 0x0001
182 #define BR_DBGF_INPUT 0x0002
183 #define BR_DBGF_OUTPUT 0x0004
184 #define BR_DBGF_RT_TABLE 0x0008
185 #define BR_DBGF_DELAYED_CALL 0x0010
186 #define BR_DBGF_IOCTL 0x0020
187 #define BR_DBGF_MBUF 0x0040
188 #define BR_DBGF_MCAST 0x0080
189 #define BR_DBGF_HOSTFILTER 0x0100
190 #endif /* BRIDGE_DEBUG */
192 #define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
193 #define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
194 #define BRIDGE_LOCK_ASSERT_HELD(_sc) \
195 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
196 #define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
197 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
201 #define BR_LCKDBG_MAX 4
203 #define BRIDGE_LOCK(_sc) bridge_lock(_sc)
204 #define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
205 #define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
206 #define BRIDGE_UNREF(_sc) bridge_unref(_sc)
207 #define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
208 #define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
210 #else /* !BRIDGE_DEBUG */
212 #define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
213 #define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
214 #define BRIDGE_LOCK2REF(_sc, _err) do { \
215 BRIDGE_LOCK_ASSERT_HELD(_sc); \
216 if ((_sc)->sc_iflist_xcnt > 0) \
219 (_sc)->sc_iflist_ref++; \
220 _BRIDGE_UNLOCK(_sc); \
222 #define BRIDGE_UNREF(_sc) do { \
224 (_sc)->sc_iflist_ref--; \
225 if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
226 _BRIDGE_UNLOCK(_sc); \
227 wakeup(&(_sc)->sc_cv); \
229 _BRIDGE_UNLOCK(_sc); \
231 #define BRIDGE_XLOCK(_sc) do { \
232 BRIDGE_LOCK_ASSERT_HELD(_sc); \
233 (_sc)->sc_iflist_xcnt++; \
234 while ((_sc)->sc_iflist_ref > 0) \
235 msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
236 "BRIDGE_XLOCK", NULL); \
238 #define BRIDGE_XDROP(_sc) do { \
239 BRIDGE_LOCK_ASSERT_HELD(_sc); \
240 (_sc)->sc_iflist_xcnt--; \
243 #endif /* BRIDGE_DEBUG */
246 #define BRIDGE_BPF_MTAP_INPUT(sc, m) \
247 if (sc->sc_bpf_input) \
248 bridge_bpf_input(sc->sc_ifp, m)
249 #else /* NBPFILTER */
250 #define BRIDGE_BPF_MTAP_INPUT(ifp, m)
251 #endif /* NBPFILTER */
254 * Initial size of the route hash table. Must be a power of two.
256 #ifndef BRIDGE_RTHASH_SIZE
257 #define BRIDGE_RTHASH_SIZE 16
261 * Maximum size of the routing hash table
263 #define BRIDGE_RTHASH_SIZE_MAX 2048
265 #define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
268 * Maximum number of addresses to cache.
270 #ifndef BRIDGE_RTABLE_MAX
271 #define BRIDGE_RTABLE_MAX 100
276 * Timeout (in seconds) for entries learned dynamically.
278 #ifndef BRIDGE_RTABLE_TIMEOUT
279 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
283 * Number of seconds between walks of the route list.
285 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
286 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
290 * List of capabilities to possibly mask on the member interface.
292 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
294 * List of capabilities to disable on the member interface.
296 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
299 * Bridge interface list entry.
301 struct bridge_iflist
{
302 TAILQ_ENTRY(bridge_iflist
) bif_next
;
303 struct ifnet
*bif_ifp
; /* member if */
304 struct bstp_port bif_stp
; /* STP state */
305 uint32_t bif_ifflags
; /* member if flags */
306 int bif_savedcaps
; /* saved capabilities */
307 uint32_t bif_addrmax
; /* max # of addresses */
308 uint32_t bif_addrcnt
; /* cur. # of addresses */
309 uint32_t bif_addrexceeded
; /* # of address violations */
311 interface_filter_t bif_iff_ref
;
312 struct bridge_softc
*bif_sc
;
315 struct in_addr bif_hf_ipsrc
;
316 uint8_t bif_hf_hwsrc
[ETHER_ADDR_LEN
];
319 #define BIFF_PROMISC 0x01 /* promiscuous mode set */
320 #define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
321 #define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
322 #define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
323 #define BIFF_HOST_FILTER 0x10 /* host filter enabled */
324 #define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
325 #define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
330 struct bridge_rtnode
{
331 LIST_ENTRY(bridge_rtnode
) brt_hash
; /* hash table linkage */
332 LIST_ENTRY(bridge_rtnode
) brt_list
; /* list linkage */
333 struct bridge_iflist
*brt_dst
; /* destination if */
334 unsigned long brt_expire
; /* expiration time */
335 uint8_t brt_flags
; /* address flags */
336 uint8_t brt_addr
[ETHER_ADDR_LEN
];
337 uint16_t brt_vlan
; /* vlan id */
340 #define brt_ifp brt_dst->bif_ifp
343 * Bridge delayed function call context
345 typedef void (*bridge_delayed_func_t
)(struct bridge_softc
*);
347 struct bridge_delayed_call
{
348 struct bridge_softc
*bdc_sc
;
349 bridge_delayed_func_t bdc_func
; /* Function to call */
350 struct timespec bdc_ts
; /* Time to call */
352 thread_call_t bdc_thread_call
;
355 #define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
356 #define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
360 * Software state for each bridge.
362 LIST_HEAD(_bridge_rtnode_list
, bridge_rtnode
);
365 struct _bridge_rtnode_list
*bb_rthash
; /* our forwarding table */
366 struct _bridge_rtnode_list bb_rtlist
; /* list version of above */
367 uint32_t bb_rthash_key
; /* key for hash */
368 uint32_t bb_rthash_size
; /* size of the hash table */
369 struct bridge_delayed_call bb_aging_timer
;
370 struct bridge_delayed_call bb_resize_call
;
371 TAILQ_HEAD(, bridge_iflist
) bb_spanlist
; /* span ports list */
372 struct bstp_state bb_stp
; /* STP state */
373 bpf_packet_func bb_bpf_input
;
374 bpf_packet_func bb_bpf_output
;
375 } bridge_bsd
, *bridge_bsd_t
;
377 #define sc_rthash sc_u.scu_bsd.bb_rthash
378 #define sc_rtlist sc_u.scu_bsd.bb_rtlist
379 #define sc_rthash_key sc_u.scu_bsd.bb_rthash_key
380 #define sc_rthash_size sc_u.scu_bsd.bb_rthash_size
381 #define sc_aging_timer sc_u.scu_bsd.bb_aging_timer
382 #define sc_resize_call sc_u.scu_bsd.bb_resize_call
383 #define sc_spanlist sc_u.scu_bsd.bb_spanlist
384 #define sc_stp sc_u.scu_bsd.bb_stp
385 #define sc_bpf_input sc_u.scu_bsd.bb_bpf_input
386 #define sc_bpf_output sc_u.scu_bsd.bb_bpf_output
388 struct bridge_softc
{
389 struct ifnet
*sc_ifp
; /* make this an interface */
394 LIST_ENTRY(bridge_softc
) sc_list
;
395 decl_lck_mtx_data(, sc_mtx
);
397 uint32_t sc_brtmax
; /* max # of addresses */
398 uint32_t sc_brtcnt
; /* cur. # of addresses */
399 uint32_t sc_brttimeout
; /* rt timeout in seconds */
400 uint32_t sc_iflist_ref
; /* refcount for sc_iflist */
401 uint32_t sc_iflist_xcnt
; /* refcount for sc_iflist */
402 TAILQ_HEAD(, bridge_iflist
) sc_iflist
; /* member interface list */
403 uint32_t sc_brtexceeded
; /* # of cache drops */
404 uint32_t sc_filter_flags
; /* ipf and flags */
405 struct ifnet
*sc_ifaddr
; /* member mac copied from */
406 u_char sc_defaddr
[6]; /* Default MAC address */
407 char sc_if_xname
[IFNAMSIZ
];
411 * Locking and unlocking calling history
413 void *lock_lr
[BR_LCKDBG_MAX
];
415 void *unlock_lr
[BR_LCKDBG_MAX
];
417 #endif /* BRIDGE_DEBUG */
420 #define SCF_DETACHING 0x01
421 #define SCF_RESIZING 0x02
422 #define SCF_MEDIA_ACTIVE 0x04
423 #define SCF_BSD_MODE 0x08
426 bridge_set_bsd_mode(struct bridge_softc
* sc
)
428 sc
->sc_flags
|= SCF_BSD_MODE
;
431 static inline boolean_t
432 bridge_in_bsd_mode(const struct bridge_softc
* sc
)
434 return (sc
->sc_flags
& SCF_BSD_MODE
) != 0;
437 struct bridge_hostfilter_stats bridge_hostfilter_stats
;
439 decl_lck_mtx_data(static, bridge_list_mtx
);
441 static int bridge_rtable_prune_period
= BRIDGE_RTABLE_PRUNE_PERIOD
;
443 static zone_t bridge_rtnode_pool
= NULL
;
445 static int bridge_clone_create(struct if_clone
*, uint32_t, void *);
446 static int bridge_clone_destroy(struct ifnet
*);
448 static errno_t
bridge_ioctl(struct ifnet
*, u_long
, void *);
450 static void bridge_mutecaps(struct bridge_softc
*);
451 static void bridge_set_ifcap(struct bridge_softc
*, struct bridge_iflist
*,
454 static errno_t
bridge_set_tso(struct bridge_softc
*);
455 __private_extern__
void bridge_ifdetach(struct bridge_iflist
*, struct ifnet
*);
456 static int bridge_init(struct ifnet
*);
457 #if HAS_BRIDGE_DUMMYNET
458 static void bridge_dummynet(struct mbuf
*, struct ifnet
*);
460 static void bridge_ifstop(struct ifnet
*, int);
461 static int bridge_output(struct ifnet
*, struct mbuf
*);
462 static void bridge_finalize_cksum(struct ifnet
*, struct mbuf
*);
463 static void bridge_start(struct ifnet
*);
464 __private_extern__ errno_t
bridge_input(struct ifnet
*, struct mbuf
*, void *);
465 #if BRIDGE_MEMBER_OUT_FILTER
466 static errno_t
bridge_iff_output(void *, ifnet_t
, protocol_family_t
,
468 static int bridge_member_output(struct ifnet
*, struct mbuf
*,
469 struct sockaddr
*, struct rtentry
*);
471 static int bridge_enqueue(struct bridge_softc
*, struct ifnet
*,
473 static void bridge_rtdelete(struct bridge_softc
*, struct ifnet
*ifp
, int);
475 static void bridge_forward(struct bridge_softc
*, struct bridge_iflist
*,
478 static void bridge_aging_timer(struct bridge_softc
*sc
);
480 static void bridge_broadcast(struct bridge_softc
*, struct ifnet
*,
482 static void bridge_span(struct bridge_softc
*, struct mbuf
*);
484 static int bridge_rtupdate(struct bridge_softc
*, const uint8_t *,
485 uint16_t, struct bridge_iflist
*, int, uint8_t);
486 static struct ifnet
*bridge_rtlookup(struct bridge_softc
*, const uint8_t *,
488 static void bridge_rttrim(struct bridge_softc
*);
489 static void bridge_rtage(struct bridge_softc
*);
490 static void bridge_rtflush(struct bridge_softc
*, int);
491 static int bridge_rtdaddr(struct bridge_softc
*, const uint8_t *,
494 static int bridge_rtable_init(struct bridge_softc
*);
495 static void bridge_rtable_fini(struct bridge_softc
*);
497 static void bridge_rthash_resize(struct bridge_softc
*);
499 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
500 static struct bridge_rtnode
*bridge_rtnode_lookup(struct bridge_softc
*,
501 const uint8_t *, uint16_t);
502 static int bridge_rtnode_hash(struct bridge_softc
*,
503 struct bridge_rtnode
*);
504 static int bridge_rtnode_insert(struct bridge_softc
*,
505 struct bridge_rtnode
*);
506 static void bridge_rtnode_destroy(struct bridge_softc
*,
507 struct bridge_rtnode
*);
509 static void bridge_rtable_expire(struct ifnet
*, int);
510 static void bridge_state_change(struct ifnet
*, int);
511 #endif /* BRIDGESTP */
513 static struct bridge_iflist
*bridge_lookup_member(struct bridge_softc
*,
515 static struct bridge_iflist
*bridge_lookup_member_if(struct bridge_softc
*,
517 static void bridge_delete_member(struct bridge_softc
*,
518 struct bridge_iflist
*, int);
519 static void bridge_delete_span(struct bridge_softc
*,
520 struct bridge_iflist
*);
522 static int bridge_ioctl_add(struct bridge_softc
*, void *);
523 static int bridge_ioctl_del(struct bridge_softc
*, void *);
524 static int bridge_ioctl_gifflags(struct bridge_softc
*, void *);
525 static int bridge_ioctl_sifflags(struct bridge_softc
*, void *);
526 static int bridge_ioctl_scache(struct bridge_softc
*, void *);
527 static int bridge_ioctl_gcache(struct bridge_softc
*, void *);
528 static int bridge_ioctl_gifs32(struct bridge_softc
*, void *);
529 static int bridge_ioctl_gifs64(struct bridge_softc
*, void *);
530 static int bridge_ioctl_rts32(struct bridge_softc
*, void *);
531 static int bridge_ioctl_rts64(struct bridge_softc
*, void *);
532 static int bridge_ioctl_saddr32(struct bridge_softc
*, void *);
533 static int bridge_ioctl_saddr64(struct bridge_softc
*, void *);
534 static int bridge_ioctl_sto(struct bridge_softc
*, void *);
535 static int bridge_ioctl_gto(struct bridge_softc
*, void *);
536 static int bridge_ioctl_daddr32(struct bridge_softc
*, void *);
537 static int bridge_ioctl_daddr64(struct bridge_softc
*, void *);
538 static int bridge_ioctl_flush(struct bridge_softc
*, void *);
539 static int bridge_ioctl_gpri(struct bridge_softc
*, void *);
540 static int bridge_ioctl_spri(struct bridge_softc
*, void *);
541 static int bridge_ioctl_ght(struct bridge_softc
*, void *);
542 static int bridge_ioctl_sht(struct bridge_softc
*, void *);
543 static int bridge_ioctl_gfd(struct bridge_softc
*, void *);
544 static int bridge_ioctl_sfd(struct bridge_softc
*, void *);
545 static int bridge_ioctl_gma(struct bridge_softc
*, void *);
546 static int bridge_ioctl_sma(struct bridge_softc
*, void *);
547 static int bridge_ioctl_sifprio(struct bridge_softc
*, void *);
548 static int bridge_ioctl_sifcost(struct bridge_softc
*, void *);
549 static int bridge_ioctl_sifmaxaddr(struct bridge_softc
*, void *);
550 static int bridge_ioctl_addspan(struct bridge_softc
*, void *);
551 static int bridge_ioctl_delspan(struct bridge_softc
*, void *);
552 static int bridge_ioctl_gbparam32(struct bridge_softc
*, void *);
553 static int bridge_ioctl_gbparam64(struct bridge_softc
*, void *);
554 static int bridge_ioctl_grte(struct bridge_softc
*, void *);
555 static int bridge_ioctl_gifsstp32(struct bridge_softc
*, void *);
556 static int bridge_ioctl_gifsstp64(struct bridge_softc
*, void *);
557 static int bridge_ioctl_sproto(struct bridge_softc
*, void *);
558 static int bridge_ioctl_stxhc(struct bridge_softc
*, void *);
559 static int bridge_ioctl_purge(struct bridge_softc
*sc
, void *);
560 static int bridge_ioctl_gfilt(struct bridge_softc
*, void *);
561 static int bridge_ioctl_sfilt(struct bridge_softc
*, void *);
562 static int bridge_ioctl_ghostfilter(struct bridge_softc
*, void *);
563 static int bridge_ioctl_shostfilter(struct bridge_softc
*, void *);
565 static int bridge_pfil(struct mbuf
**, struct ifnet
*, struct ifnet
*,
567 static int bridge_ip_checkbasic(struct mbuf
**);
569 static int bridge_ip6_checkbasic(struct mbuf
**);
571 static int bridge_fragment(struct ifnet
*, struct mbuf
*,
572 struct ether_header
*, int, struct llc
*);
573 #endif /* PFIL_HOOKS */
575 static errno_t
bridge_set_bpf_tap(ifnet_t
, bpf_tap_mode
, bpf_packet_func
);
576 __private_extern__ errno_t
bridge_bpf_input(ifnet_t
, struct mbuf
*);
577 __private_extern__ errno_t
bridge_bpf_output(ifnet_t
, struct mbuf
*);
579 static void bridge_detach(ifnet_t
);
580 static void bridge_link_event(struct ifnet
*, u_int32_t
);
581 static void bridge_iflinkevent(struct ifnet
*);
582 static u_int32_t
bridge_updatelinkstatus(struct bridge_softc
*);
583 static int interface_media_active(struct ifnet
*);
584 static void bridge_schedule_delayed_call(struct bridge_delayed_call
*);
585 static void bridge_cancel_delayed_call(struct bridge_delayed_call
*);
586 static void bridge_cleanup_delayed_call(struct bridge_delayed_call
*);
587 static int bridge_host_filter(struct bridge_iflist
*, struct mbuf
*);
590 #define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
592 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
593 #define VLANTAGOF(_m) 0
595 u_int8_t bstp_etheraddr
[ETHER_ADDR_LEN
] =
596 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
598 static u_int8_t ethernulladdr
[ETHER_ADDR_LEN
] =
599 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
602 static struct bstp_cb_ops bridge_ops
= {
603 .bcb_state
= bridge_state_change
,
604 .bcb_rtage
= bridge_rtable_expire
606 #endif /* BRIDGESTP */
608 SYSCTL_DECL(_net_link
);
609 SYSCTL_NODE(_net_link
, IFT_BRIDGE
, bridge
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
612 static int bridge_inherit_mac
= 0; /* share MAC with first bridge member */
613 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, inherit_mac
,
614 CTLFLAG_RW
| CTLFLAG_LOCKED
,
615 &bridge_inherit_mac
, 0,
616 "Inherit MAC address from the first bridge member");
618 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, rtable_prune_period
,
619 CTLFLAG_RW
| CTLFLAG_LOCKED
,
620 &bridge_rtable_prune_period
, 0,
621 "Interval between pruning of routing table");
623 static unsigned int bridge_rtable_hash_size_max
= BRIDGE_RTHASH_SIZE_MAX
;
624 SYSCTL_UINT(_net_link_bridge
, OID_AUTO
, rtable_hash_size_max
,
625 CTLFLAG_RW
| CTLFLAG_LOCKED
,
626 &bridge_rtable_hash_size_max
, 0,
627 "Maximum size of the routing hash table");
629 #if BRIDGE_DEBUG_DELAYED_CALLBACK
630 static int bridge_delayed_callback_delay
= 0;
631 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, delayed_callback_delay
,
632 CTLFLAG_RW
| CTLFLAG_LOCKED
,
633 &bridge_delayed_callback_delay
, 0,
634 "Delay before calling delayed function");
637 static int bridge_bsd_mode
= 1;
638 #if (DEVELOPMENT || DEBUG)
639 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, bsd_mode
,
640 CTLFLAG_RW
| CTLFLAG_LOCKED
,
642 "Bridge using bsd mode");
643 #endif /* (DEVELOPMENT || DEBUG) */
645 SYSCTL_STRUCT(_net_link_bridge
, OID_AUTO
,
646 hostfilterstats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
647 &bridge_hostfilter_stats
, bridge_hostfilter_stats
, "");
649 #if defined(PFIL_HOOKS)
650 static int pfil_onlyip
= 1; /* only pass IP[46] packets when pfil is enabled */
651 static int pfil_bridge
= 1; /* run pfil hooks on the bridge interface */
652 static int pfil_member
= 1; /* run pfil hooks on the member interface */
653 static int pfil_ipfw
= 0; /* layer2 filter with ipfw */
654 static int pfil_ipfw_arp
= 0; /* layer2 filter with ipfw */
655 static int pfil_local_phys
= 0; /* run pfil hooks on the physical interface */
656 /* for locally destined packets */
657 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_onlyip
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
658 &pfil_onlyip
, 0, "Only pass IP packets when pfil is enabled");
659 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, ipfw_arp
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
660 &pfil_ipfw_arp
, 0, "Filter ARP packets through IPFW layer2");
661 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_bridge
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
662 &pfil_bridge
, 0, "Packet filter on the bridge interface");
663 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_member
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
664 &pfil_member
, 0, "Packet filter on the member interface");
665 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_local_phys
,
666 CTLFLAG_RW
| CTLFLAG_LOCKED
, &pfil_local_phys
, 0,
667 "Packet filter on the physical interface for locally destined packets");
668 #endif /* PFIL_HOOKS */
671 static int log_stp
= 0; /* log STP state changes */
672 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, log_stp
, CTLFLAG_RW
,
673 &log_stp
, 0, "Log STP state changes");
674 #endif /* BRIDGESTP */
676 struct bridge_control
{
677 int (*bc_func
)(struct bridge_softc
*, void *);
678 unsigned int bc_argsize
;
679 unsigned int bc_flags
;
682 #define BC_F_COPYIN 0x01 /* copy arguments in */
683 #define BC_F_COPYOUT 0x02 /* copy arguments out */
684 #define BC_F_SUSER 0x04 /* do super-user check */
686 static const struct bridge_control bridge_control_table32
[] = {
687 { bridge_ioctl_add
, sizeof(struct ifbreq
), /* 0 */
688 BC_F_COPYIN
| BC_F_SUSER
},
689 { bridge_ioctl_del
, sizeof(struct ifbreq
),
690 BC_F_COPYIN
| BC_F_SUSER
},
692 { bridge_ioctl_gifflags
, sizeof(struct ifbreq
),
693 BC_F_COPYIN
| BC_F_COPYOUT
},
694 { bridge_ioctl_sifflags
, sizeof(struct ifbreq
),
695 BC_F_COPYIN
| BC_F_SUSER
},
697 { bridge_ioctl_scache
, sizeof(struct ifbrparam
),
698 BC_F_COPYIN
| BC_F_SUSER
},
699 { bridge_ioctl_gcache
, sizeof(struct ifbrparam
),
702 { bridge_ioctl_gifs32
, sizeof(struct ifbifconf32
),
703 BC_F_COPYIN
| BC_F_COPYOUT
},
704 { bridge_ioctl_rts32
, sizeof(struct ifbaconf32
),
705 BC_F_COPYIN
| BC_F_COPYOUT
},
707 { bridge_ioctl_saddr32
, sizeof(struct ifbareq32
),
708 BC_F_COPYIN
| BC_F_SUSER
},
710 { bridge_ioctl_sto
, sizeof(struct ifbrparam
),
711 BC_F_COPYIN
| BC_F_SUSER
},
712 { bridge_ioctl_gto
, sizeof(struct ifbrparam
), /* 10 */
715 { bridge_ioctl_daddr32
, sizeof(struct ifbareq32
),
716 BC_F_COPYIN
| BC_F_SUSER
},
718 { bridge_ioctl_flush
, sizeof(struct ifbreq
),
719 BC_F_COPYIN
| BC_F_SUSER
},
721 { bridge_ioctl_gpri
, sizeof(struct ifbrparam
),
723 { bridge_ioctl_spri
, sizeof(struct ifbrparam
),
724 BC_F_COPYIN
| BC_F_SUSER
},
726 { bridge_ioctl_ght
, sizeof(struct ifbrparam
),
728 { bridge_ioctl_sht
, sizeof(struct ifbrparam
),
729 BC_F_COPYIN
| BC_F_SUSER
},
731 { bridge_ioctl_gfd
, sizeof(struct ifbrparam
),
733 { bridge_ioctl_sfd
, sizeof(struct ifbrparam
),
734 BC_F_COPYIN
| BC_F_SUSER
},
736 { bridge_ioctl_gma
, sizeof(struct ifbrparam
),
738 { bridge_ioctl_sma
, sizeof(struct ifbrparam
), /* 20 */
739 BC_F_COPYIN
| BC_F_SUSER
},
741 { bridge_ioctl_sifprio
, sizeof(struct ifbreq
),
742 BC_F_COPYIN
| BC_F_SUSER
},
744 { bridge_ioctl_sifcost
, sizeof(struct ifbreq
),
745 BC_F_COPYIN
| BC_F_SUSER
},
747 { bridge_ioctl_gfilt
, sizeof(struct ifbrparam
),
749 { bridge_ioctl_sfilt
, sizeof(struct ifbrparam
),
750 BC_F_COPYIN
| BC_F_SUSER
},
752 { bridge_ioctl_purge
, sizeof(struct ifbreq
),
753 BC_F_COPYIN
| BC_F_SUSER
},
755 { bridge_ioctl_addspan
, sizeof(struct ifbreq
),
756 BC_F_COPYIN
| BC_F_SUSER
},
757 { bridge_ioctl_delspan
, sizeof(struct ifbreq
),
758 BC_F_COPYIN
| BC_F_SUSER
},
760 { bridge_ioctl_gbparam32
, sizeof(struct ifbropreq32
),
763 { bridge_ioctl_grte
, sizeof(struct ifbrparam
),
766 { bridge_ioctl_gifsstp32
, sizeof(struct ifbpstpconf32
), /* 30 */
767 BC_F_COPYIN
| BC_F_COPYOUT
},
769 { bridge_ioctl_sproto
, sizeof(struct ifbrparam
),
770 BC_F_COPYIN
| BC_F_SUSER
},
772 { bridge_ioctl_stxhc
, sizeof(struct ifbrparam
),
773 BC_F_COPYIN
| BC_F_SUSER
},
775 { bridge_ioctl_sifmaxaddr
, sizeof(struct ifbreq
),
776 BC_F_COPYIN
| BC_F_SUSER
},
778 { bridge_ioctl_ghostfilter
, sizeof(struct ifbrhostfilter
),
779 BC_F_COPYIN
| BC_F_COPYOUT
},
780 { bridge_ioctl_shostfilter
, sizeof(struct ifbrhostfilter
),
781 BC_F_COPYIN
| BC_F_SUSER
},
784 static const struct bridge_control bridge_control_table64
[] = {
785 { bridge_ioctl_add
, sizeof(struct ifbreq
), /* 0 */
786 BC_F_COPYIN
| BC_F_SUSER
},
787 { bridge_ioctl_del
, sizeof(struct ifbreq
),
788 BC_F_COPYIN
| BC_F_SUSER
},
790 { bridge_ioctl_gifflags
, sizeof(struct ifbreq
),
791 BC_F_COPYIN
| BC_F_COPYOUT
},
792 { bridge_ioctl_sifflags
, sizeof(struct ifbreq
),
793 BC_F_COPYIN
| BC_F_SUSER
},
795 { bridge_ioctl_scache
, sizeof(struct ifbrparam
),
796 BC_F_COPYIN
| BC_F_SUSER
},
797 { bridge_ioctl_gcache
, sizeof(struct ifbrparam
),
800 { bridge_ioctl_gifs64
, sizeof(struct ifbifconf64
),
801 BC_F_COPYIN
| BC_F_COPYOUT
},
802 { bridge_ioctl_rts64
, sizeof(struct ifbaconf64
),
803 BC_F_COPYIN
| BC_F_COPYOUT
},
805 { bridge_ioctl_saddr64
, sizeof(struct ifbareq64
),
806 BC_F_COPYIN
| BC_F_SUSER
},
808 { bridge_ioctl_sto
, sizeof(struct ifbrparam
),
809 BC_F_COPYIN
| BC_F_SUSER
},
810 { bridge_ioctl_gto
, sizeof(struct ifbrparam
), /* 10 */
813 { bridge_ioctl_daddr64
, sizeof(struct ifbareq64
),
814 BC_F_COPYIN
| BC_F_SUSER
},
816 { bridge_ioctl_flush
, sizeof(struct ifbreq
),
817 BC_F_COPYIN
| BC_F_SUSER
},
819 { bridge_ioctl_gpri
, sizeof(struct ifbrparam
),
821 { bridge_ioctl_spri
, sizeof(struct ifbrparam
),
822 BC_F_COPYIN
| BC_F_SUSER
},
824 { bridge_ioctl_ght
, sizeof(struct ifbrparam
),
826 { bridge_ioctl_sht
, sizeof(struct ifbrparam
),
827 BC_F_COPYIN
| BC_F_SUSER
},
829 { bridge_ioctl_gfd
, sizeof(struct ifbrparam
),
831 { bridge_ioctl_sfd
, sizeof(struct ifbrparam
),
832 BC_F_COPYIN
| BC_F_SUSER
},
834 { bridge_ioctl_gma
, sizeof(struct ifbrparam
),
836 { bridge_ioctl_sma
, sizeof(struct ifbrparam
), /* 20 */
837 BC_F_COPYIN
| BC_F_SUSER
},
839 { bridge_ioctl_sifprio
, sizeof(struct ifbreq
),
840 BC_F_COPYIN
| BC_F_SUSER
},
842 { bridge_ioctl_sifcost
, sizeof(struct ifbreq
),
843 BC_F_COPYIN
| BC_F_SUSER
},
845 { bridge_ioctl_gfilt
, sizeof(struct ifbrparam
),
847 { bridge_ioctl_sfilt
, sizeof(struct ifbrparam
),
848 BC_F_COPYIN
| BC_F_SUSER
},
850 { bridge_ioctl_purge
, sizeof(struct ifbreq
),
851 BC_F_COPYIN
| BC_F_SUSER
},
853 { bridge_ioctl_addspan
, sizeof(struct ifbreq
),
854 BC_F_COPYIN
| BC_F_SUSER
},
855 { bridge_ioctl_delspan
, sizeof(struct ifbreq
),
856 BC_F_COPYIN
| BC_F_SUSER
},
858 { bridge_ioctl_gbparam64
, sizeof(struct ifbropreq64
),
861 { bridge_ioctl_grte
, sizeof(struct ifbrparam
),
864 { bridge_ioctl_gifsstp64
, sizeof(struct ifbpstpconf64
), /* 30 */
865 BC_F_COPYIN
| BC_F_COPYOUT
},
867 { bridge_ioctl_sproto
, sizeof(struct ifbrparam
),
868 BC_F_COPYIN
| BC_F_SUSER
},
870 { bridge_ioctl_stxhc
, sizeof(struct ifbrparam
),
871 BC_F_COPYIN
| BC_F_SUSER
},
873 { bridge_ioctl_sifmaxaddr
, sizeof(struct ifbreq
),
874 BC_F_COPYIN
| BC_F_SUSER
},
876 { bridge_ioctl_ghostfilter
, sizeof(struct ifbrhostfilter
),
877 BC_F_COPYIN
| BC_F_COPYOUT
},
878 { bridge_ioctl_shostfilter
, sizeof(struct ifbrhostfilter
),
879 BC_F_COPYIN
| BC_F_SUSER
},
882 static const unsigned int bridge_control_table_size
=
883 sizeof(bridge_control_table32
) / sizeof(bridge_control_table32
[0]);
885 static LIST_HEAD(, bridge_softc
) bridge_list
=
886 LIST_HEAD_INITIALIZER(bridge_list
);
888 static lck_grp_t
*bridge_lock_grp
= NULL
;
889 static lck_attr_t
*bridge_lock_attr
= NULL
;
891 #define BRIDGENAME "bridge"
892 #define BRIDGES_MAX IF_MAXUNIT
893 #define BRIDGE_ZONE_MAX_ELEM MIN(IFNETS_MAX, BRIDGES_MAX)
895 static struct if_clone bridge_cloner
=
896 IF_CLONE_INITIALIZER(BRIDGENAME
, bridge_clone_create
, bridge_clone_destroy
,
897 0, BRIDGES_MAX
, BRIDGE_ZONE_MAX_ELEM
, sizeof(struct bridge_softc
));
899 static int if_bridge_txstart
= 0;
900 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, txstart
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
901 &if_bridge_txstart
, 0, "Bridge interface uses TXSTART model");
904 static int if_bridge_debug
= 0;
905 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
906 &if_bridge_debug
, 0, "Bridge debug");
908 static void printf_ether_header(struct ether_header
*);
909 static void printf_mbuf_data(mbuf_t
, size_t, size_t);
910 static void printf_mbuf_pkthdr(mbuf_t
, const char *, const char *);
911 static void printf_mbuf(mbuf_t
, const char *, const char *);
912 static void link_print(struct bridge_softc
* sc
);
914 static void bridge_lock(struct bridge_softc
*);
915 static void bridge_unlock(struct bridge_softc
*);
916 static int bridge_lock2ref(struct bridge_softc
*);
917 static void bridge_unref(struct bridge_softc
*);
918 static void bridge_xlock(struct bridge_softc
*);
919 static void bridge_xdrop(struct bridge_softc
*);
922 bridge_lock(struct bridge_softc
*sc
)
924 void *lr_saved
= __builtin_return_address(0);
926 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
930 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
931 sc
->next_lock_lr
= (sc
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
935 bridge_unlock(struct bridge_softc
*sc
)
937 void *lr_saved
= __builtin_return_address(0);
939 BRIDGE_LOCK_ASSERT_HELD(sc
);
941 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
942 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
948 bridge_lock2ref(struct bridge_softc
*sc
)
951 void *lr_saved
= __builtin_return_address(0);
953 BRIDGE_LOCK_ASSERT_HELD(sc
);
955 if (sc
->sc_iflist_xcnt
> 0) {
961 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
962 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
970 bridge_unref(struct bridge_softc
*sc
)
972 void *lr_saved
= __builtin_return_address(0);
974 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
977 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
978 sc
->next_lock_lr
= (sc
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
982 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
983 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
984 if ((sc
->sc_iflist_xcnt
> 0) && (sc
->sc_iflist_ref
== 0)) {
993 bridge_xlock(struct bridge_softc
*sc
)
995 void *lr_saved
= __builtin_return_address(0);
997 BRIDGE_LOCK_ASSERT_HELD(sc
);
999 sc
->sc_iflist_xcnt
++;
1000 while (sc
->sc_iflist_ref
> 0) {
1001 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
1002 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
1004 msleep(&sc
->sc_cv
, &sc
->sc_mtx
, PZERO
, "BRIDGE_XLOCK", NULL
);
1006 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
1007 sc
->next_lock_lr
= (sc
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
1012 bridge_xdrop(struct bridge_softc
*sc
)
1014 BRIDGE_LOCK_ASSERT_HELD(sc
);
1016 sc
->sc_iflist_xcnt
--;
1020 printf_mbuf_pkthdr(mbuf_t m
, const char *prefix
, const char *suffix
)
1023 printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
1024 "nextpkt: 0x%llx%s",
1025 prefix
? prefix
: "", (unsigned int)mbuf_pkthdr_len(m
),
1026 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m
)),
1027 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m
)),
1028 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m
)),
1029 suffix
? suffix
: "");
1031 printf("%s<NULL>%s\n", prefix
, suffix
);
1036 printf_mbuf(mbuf_t m
, const char *prefix
, const char *suffix
)
1039 printf("%s0x%llx type: %u flags: 0x%x len: %u data: 0x%llx "
1040 "maxlen: %u datastart: 0x%llx next: 0x%llx%s",
1041 prefix
? prefix
: "", (uint64_t)VM_KERNEL_ADDRPERM(m
),
1042 mbuf_type(m
), mbuf_flags(m
), (unsigned int)mbuf_len(m
),
1043 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)),
1044 (unsigned int)mbuf_maxlen(m
),
1045 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m
)),
1046 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m
)),
1047 !suffix
|| (mbuf_flags(m
) & MBUF_PKTHDR
) ? "" : suffix
);
1048 if ((mbuf_flags(m
) & MBUF_PKTHDR
)) {
1049 printf_mbuf_pkthdr(m
, " ", suffix
);
1052 printf("%s<NULL>%s\n", prefix
, suffix
);
1057 printf_mbuf_data(mbuf_t m
, size_t offset
, size_t len
)
1061 size_t pktlen
, mlen
, maxlen
;
1064 pktlen
= mbuf_pkthdr_len(m
);
1066 if (offset
> pktlen
) {
1070 maxlen
= (pktlen
- offset
> len
) ? len
: pktlen
- offset
;
1074 for (i
= 0, j
= 0; i
< maxlen
; i
++, j
++) {
1085 printf("%02x%s", ptr
[j
], i
% 2 ? " " : "");
1091 printf_ether_header(struct ether_header
*eh
)
1093 printf("%02x:%02x:%02x:%02x:%02x:%02x > "
1094 "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
1095 eh
->ether_shost
[0], eh
->ether_shost
[1], eh
->ether_shost
[2],
1096 eh
->ether_shost
[3], eh
->ether_shost
[4], eh
->ether_shost
[5],
1097 eh
->ether_dhost
[0], eh
->ether_dhost
[1], eh
->ether_dhost
[2],
1098 eh
->ether_dhost
[3], eh
->ether_dhost
[4], eh
->ether_dhost
[5],
1099 ntohs(eh
->ether_type
));
1103 link_print(struct bridge_softc
* sc
)
1106 uint32_t sdl_buffer
[offsetof(struct sockaddr_dl
, sdl_data
) +
1107 IFNAMSIZ
+ ETHER_ADDR_LEN
];
1108 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)sdl_buffer
;
1110 memset(sdl
, 0, sizeof(sdl_buffer
));
1111 sdl
->sdl_family
= AF_LINK
;
1112 sdl
->sdl_nlen
= strlen(sc
->sc_if_xname
);
1113 sdl
->sdl_alen
= ETHER_ADDR_LEN
;
1114 sdl
->sdl_len
= offsetof(struct sockaddr_dl
, sdl_data
);
1115 memcpy(sdl
->sdl_data
, sc
->sc_if_xname
, sdl
->sdl_nlen
);
1116 memcpy(LLADDR(sdl
), sc
->sc_defaddr
, ETHER_ADDR_LEN
);
1119 printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
1120 " slen %d addr ", sdl
->sdl_len
, sdl
->sdl_index
,
1121 sdl
->sdl_family
, sdl
->sdl_type
, sdl
->sdl_nlen
,
1122 sdl
->sdl_alen
, sdl
->sdl_slen
);
1124 for (i
= 0; i
< sdl
->sdl_alen
; i
++) {
1125 printf("%s%x", i
? ":" : "", (CONST_LLADDR(sdl
))[i
]);
1130 #endif /* BRIDGE_DEBUG */
1135 * Pseudo-device attach routine.
1137 __private_extern__
int
1142 lck_grp_attr_t
*lck_grp_attr
= NULL
;
1144 bridge_rtnode_pool
= zinit(sizeof(struct bridge_rtnode
),
1145 1024 * sizeof(struct bridge_rtnode
), 0, "bridge_rtnode");
1146 zone_change(bridge_rtnode_pool
, Z_CALLERACCT
, FALSE
);
1148 lck_grp_attr
= lck_grp_attr_alloc_init();
1150 bridge_lock_grp
= lck_grp_alloc_init("if_bridge", lck_grp_attr
);
1152 bridge_lock_attr
= lck_attr_alloc_init();
1155 lck_attr_setdebug(bridge_lock_attr
);
1158 lck_mtx_init(&bridge_list_mtx
, bridge_lock_grp
, bridge_lock_attr
);
1160 /* can free the attributes once we've allocated the group lock */
1161 lck_grp_attr_free(lck_grp_attr
);
1163 LIST_INIT(&bridge_list
);
1167 #endif /* BRIDGESTP */
1169 error
= if_clone_attach(&bridge_cloner
);
1171 printf("%s: ifnet_clone_attach failed %d\n", __func__
, error
);
1177 #if defined(PFIL_HOOKS)
1179 * handler for net.link.bridge.pfil_ipfw
1182 sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1184 #pragma unused(arg1, arg2)
1185 int enable
= pfil_ipfw
;
1188 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
1189 enable
= (enable
) ? 1 : 0;
1191 if (enable
!= pfil_ipfw
) {
1195 * Disable pfil so that ipfw doesnt run twice, if the user
1196 * really wants both then they can re-enable pfil_bridge and/or
1197 * pfil_member. Also allow non-ip packets as ipfw can filter by
1210 SYSCTL_PROC(_net_link_bridge
, OID_AUTO
, ipfw
, CTLTYPE_INT
| CTLFLAG_RW
,
1211 &pfil_ipfw
, 0, &sysctl_pfil_ipfw
, "I", "Layer2 filter with IPFW");
1212 #endif /* PFIL_HOOKS */
1215 bridge_ifnet_set_attrs(struct ifnet
* ifp
)
1219 error
= ifnet_set_mtu(ifp
, ETHERMTU
);
1221 printf("%s: ifnet_set_mtu failed %d\n", __func__
, error
);
1224 error
= ifnet_set_addrlen(ifp
, ETHER_ADDR_LEN
);
1226 printf("%s: ifnet_set_addrlen failed %d\n", __func__
, error
);
1229 error
= ifnet_set_hdrlen(ifp
, ETHER_HDR_LEN
);
1231 printf("%s: ifnet_set_hdrlen failed %d\n", __func__
, error
);
1234 error
= ifnet_set_flags(ifp
,
1235 IFF_BROADCAST
| IFF_SIMPLEX
| IFF_NOTRAILERS
| IFF_MULTICAST
,
1239 printf("%s: ifnet_set_flags failed %d\n", __func__
, error
);
1247 * bridge_clone_create:
1249 * Create a new bridge instance.
1252 bridge_clone_create(struct if_clone
*ifc
, uint32_t unit
, void *params
)
1254 #pragma unused(params)
1255 struct ifnet
*ifp
= NULL
;
1256 struct bridge_softc
*sc
= NULL
;
1257 struct bridge_softc
*sc2
= NULL
;
1258 struct ifnet_init_eparams init_params
;
1260 uint8_t eth_hostid
[ETHER_ADDR_LEN
];
1261 int fb
, retry
, has_hostid
;
1263 sc
= if_clone_softc_allocate(&bridge_cloner
);
1269 lck_mtx_init(&sc
->sc_mtx
, bridge_lock_grp
, bridge_lock_attr
);
1270 sc
->sc_brtmax
= BRIDGE_RTABLE_MAX
;
1271 sc
->sc_brttimeout
= BRIDGE_RTABLE_TIMEOUT
;
1272 sc
->sc_filter_flags
= IFBF_FILT_DEFAULT
;
1275 * For backwards compatibility with previous behaviour...
1276 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1279 sc
->sc_filter_flags
&= ~IFBF_FILT_USEIPF
;
1282 if (bridge_bsd_mode
!= 0) {
1283 bridge_set_bsd_mode(sc
);
1286 TAILQ_INIT(&sc
->sc_iflist
);
1288 /* use the interface name as the unique id for ifp recycle */
1289 snprintf(sc
->sc_if_xname
, sizeof(sc
->sc_if_xname
), "%s%d",
1290 ifc
->ifc_name
, unit
);
1291 bzero(&init_params
, sizeof(init_params
));
1292 init_params
.ver
= IFNET_INIT_CURRENT_VERSION
;
1293 init_params
.len
= sizeof(init_params
);
1294 if (bridge_in_bsd_mode(sc
)) {
1295 /* Initialize our routing table. */
1296 error
= bridge_rtable_init(sc
);
1298 printf("%s: bridge_rtable_init failed %d\n",
1302 TAILQ_INIT(&sc
->sc_spanlist
);
1303 if (if_bridge_txstart
) {
1304 init_params
.start
= bridge_start
;
1306 init_params
.flags
= IFNET_INIT_LEGACY
;
1307 init_params
.output
= bridge_output
;
1309 init_params
.set_bpf_tap
= bridge_set_bpf_tap
;
1311 init_params
.uniqueid
= sc
->sc_if_xname
;
1312 init_params
.uniqueid_len
= strlen(sc
->sc_if_xname
);
1313 init_params
.sndq_maxlen
= IFQ_MAXLEN
;
1314 init_params
.name
= ifc
->ifc_name
;
1315 init_params
.unit
= unit
;
1316 init_params
.family
= IFNET_FAMILY_ETHERNET
;
1317 init_params
.type
= IFT_BRIDGE
;
1318 init_params
.demux
= ether_demux
;
1319 init_params
.add_proto
= ether_add_proto
;
1320 init_params
.del_proto
= ether_del_proto
;
1321 init_params
.check_multi
= ether_check_multi
;
1322 init_params
.framer_extended
= ether_frameout_extended
;
1323 init_params
.softc
= sc
;
1324 init_params
.ioctl
= bridge_ioctl
;
1325 init_params
.detach
= bridge_detach
;
1326 init_params
.broadcast_addr
= etherbroadcastaddr
;
1327 init_params
.broadcast_len
= ETHER_ADDR_LEN
;
1329 if (bridge_in_bsd_mode(sc
)) {
1330 error
= ifnet_allocate_extended(&init_params
, &ifp
);
1332 printf("%s: ifnet_allocate failed %d\n",
1337 error
= bridge_ifnet_set_attrs(ifp
);
1339 printf("%s: bridge_ifnet_set_attrs failed %d\n",
1346 * Generate an ethernet address with a locally administered address.
1348 * Since we are using random ethernet addresses for the bridge, it is
1349 * possible that we might have address collisions, so make sure that
1350 * this hardware address isn't already in use on another bridge.
1351 * The first try uses the "hostid" and falls back to read_frandom();
1352 * for "hostid", we use the MAC address of the first-encountered
1353 * Ethernet-type interface that is currently configured.
1356 has_hostid
= (uuid_get_ethernet(ð_hostid
[0]) == 0);
1357 for (retry
= 1; retry
!= 0;) {
1358 if (fb
|| has_hostid
== 0) {
1359 read_frandom(&sc
->sc_defaddr
, ETHER_ADDR_LEN
);
1360 sc
->sc_defaddr
[0] &= ~1; /* clear multicast bit */
1361 sc
->sc_defaddr
[0] |= 2; /* set the LAA bit */
1363 bcopy(ð_hostid
[0], &sc
->sc_defaddr
,
1365 sc
->sc_defaddr
[0] &= ~1; /* clear multicast bit */
1366 sc
->sc_defaddr
[0] |= 2; /* set the LAA bit */
1367 sc
->sc_defaddr
[3] = /* stir it up a bit */
1368 ((sc
->sc_defaddr
[3] & 0x0f) << 4) |
1369 ((sc
->sc_defaddr
[3] & 0xf0) >> 4);
1371 * Mix in the LSB as it's actually pretty significant,
1372 * see rdar://14076061
1375 (((sc
->sc_defaddr
[4] & 0x0f) << 4) |
1376 ((sc
->sc_defaddr
[4] & 0xf0) >> 4)) ^
1378 sc
->sc_defaddr
[5] = ifp
->if_unit
& 0xff;
1383 lck_mtx_lock(&bridge_list_mtx
);
1384 LIST_FOREACH(sc2
, &bridge_list
, sc_list
) {
1385 if (memcmp(sc
->sc_defaddr
,
1386 IF_LLADDR(sc2
->sc_ifp
), ETHER_ADDR_LEN
) == 0) {
1390 lck_mtx_unlock(&bridge_list_mtx
);
1393 sc
->sc_flags
&= ~SCF_MEDIA_ACTIVE
;
1396 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1400 if (bridge_in_bsd_mode(sc
)) {
1401 error
= ifnet_attach(ifp
, NULL
);
1403 printf("%s: ifnet_attach failed %d\n", __func__
, error
);
1408 error
= ifnet_set_lladdr_and_type(ifp
, sc
->sc_defaddr
, ETHER_ADDR_LEN
,
1411 printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__
,
1416 if (bridge_in_bsd_mode(sc
)) {
1417 ifnet_set_offload(ifp
,
1418 IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
|
1419 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
| IFNET_MULTIPAGES
);
1420 error
= bridge_set_tso(sc
);
1422 printf("%s: bridge_set_tso failed %d\n",
1427 bstp_attach(&sc
->sc_stp
, &bridge_ops
);
1428 #endif /* BRIDGESTP */
1431 lck_mtx_lock(&bridge_list_mtx
);
1432 LIST_INSERT_HEAD(&bridge_list
, sc
, sc_list
);
1433 lck_mtx_unlock(&bridge_list_mtx
);
1435 /* attach as ethernet */
1436 error
= bpf_attach(ifp
, DLT_EN10MB
, sizeof(struct ether_header
),
1441 printf("%s failed error %d\n", __func__
, error
);
1442 /* TBD: Clean up: sc, sc_rthash etc */
1449 * bridge_clone_destroy:
1451 * Destroy a bridge instance.
1454 bridge_clone_destroy(struct ifnet
*ifp
)
1456 struct bridge_softc
*sc
= ifp
->if_softc
;
1457 struct bridge_iflist
*bif
;
1461 if ((sc
->sc_flags
& SCF_DETACHING
)) {
1465 sc
->sc_flags
|= SCF_DETACHING
;
1467 bridge_ifstop(ifp
, 1);
1469 if (bridge_in_bsd_mode(sc
)) {
1470 bridge_cancel_delayed_call(&sc
->sc_resize_call
);
1472 bridge_cleanup_delayed_call(&sc
->sc_resize_call
);
1473 bridge_cleanup_delayed_call(&sc
->sc_aging_timer
);
1476 error
= ifnet_set_flags(ifp
, 0, IFF_UP
);
1478 printf("%s: ifnet_set_flags failed %d\n", __func__
, error
);
1481 while ((bif
= TAILQ_FIRST(&sc
->sc_iflist
)) != NULL
) {
1482 bridge_delete_member(sc
, bif
, 0);
1485 if (bridge_in_bsd_mode(sc
)) {
1486 while ((bif
= TAILQ_FIRST(&sc
->sc_spanlist
)) != NULL
) {
1487 bridge_delete_span(sc
, bif
);
1492 error
= ifnet_detach(ifp
);
1494 panic("%s: ifnet_detach(%p) failed %d\n",
1495 __func__
, ifp
, error
);
1500 #define DRVSPEC do { \
1501 if (ifd->ifd_cmd >= bridge_control_table_size) { \
1505 bc = &bridge_control_table[ifd->ifd_cmd]; \
1507 if (cmd == SIOCGDRVSPEC && \
1508 (bc->bc_flags & BC_F_COPYOUT) == 0) { \
1511 } else if (cmd == SIOCSDRVSPEC && \
1512 (bc->bc_flags & BC_F_COPYOUT) != 0) { \
1517 if (bc->bc_flags & BC_F_SUSER) { \
1518 error = kauth_authorize_generic(kauth_cred_get(), \
1519 KAUTH_GENERIC_ISSUSER); \
1524 if (ifd->ifd_len != bc->bc_argsize || \
1525 ifd->ifd_len > sizeof (args)) { \
1530 bzero(&args, sizeof (args)); \
1531 if (bc->bc_flags & BC_F_COPYIN) { \
1532 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
1538 error = (*bc->bc_func)(sc, &args); \
1539 BRIDGE_UNLOCK(sc); \
1543 if (bc->bc_flags & BC_F_COPYOUT) \
1544 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
1550 * Handle a control request from the operator.
1553 bridge_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
1555 struct bridge_softc
*sc
= ifp
->if_softc
;
1556 struct ifreq
*ifr
= (struct ifreq
*)data
;
1557 struct bridge_iflist
*bif
;
1560 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1563 if (if_bridge_debug
& BR_DBGF_IOCTL
) {
1564 printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1565 __func__
, ifp
->if_xname
, cmd
, (cmd
& IOC_IN
) ? 'I' : ' ',
1566 (cmd
& IOC_OUT
) ? 'O' : ' ', IOCPARM_LEN(cmd
),
1567 (char)IOCGROUP(cmd
), cmd
& 0xff);
1569 #endif /* BRIDGE_DEBUG */
1574 ifnet_set_flags(ifp
, IFF_UP
, IFF_UP
);
1577 case SIOCGIFMEDIA32
:
1578 case SIOCGIFMEDIA64
: {
1579 struct ifmediareq
*ifmr
= (struct ifmediareq
*)data
;
1580 user_addr_t user_addr
;
1582 user_addr
= (cmd
== SIOCGIFMEDIA64
) ?
1583 ((struct ifmediareq64
*)ifmr
)->ifmu_ulist
:
1584 CAST_USER_ADDR_T(((struct ifmediareq32
*)ifmr
)->ifmu_ulist
);
1586 ifmr
->ifm_status
= IFM_AVALID
;
1588 ifmr
->ifm_count
= 1;
1591 if (!(sc
->sc_flags
& SCF_DETACHING
) &&
1592 (sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
1593 ifmr
->ifm_status
|= IFM_ACTIVE
;
1594 ifmr
->ifm_active
= ifmr
->ifm_current
=
1595 IFM_ETHER
| IFM_AUTO
;
1597 ifmr
->ifm_active
= ifmr
->ifm_current
= IFM_NONE
;
1601 if (user_addr
!= USER_ADDR_NULL
) {
1602 error
= copyout(&ifmr
->ifm_current
, user_addr
,
1612 case SIOCSDRVSPEC32
:
1613 case SIOCGDRVSPEC32
: {
1615 struct ifbreq ifbreq
;
1616 struct ifbifconf32 ifbifconf
;
1617 struct ifbareq32 ifbareq
;
1618 struct ifbaconf32 ifbaconf
;
1619 struct ifbrparam ifbrparam
;
1620 struct ifbropreq32 ifbropreq
;
1622 struct ifdrv32
*ifd
= (struct ifdrv32
*)data
;
1623 const struct bridge_control
*bridge_control_table
=
1624 bridge_control_table32
, *bc
;
1630 case SIOCSDRVSPEC64
:
1631 case SIOCGDRVSPEC64
: {
1633 struct ifbreq ifbreq
;
1634 struct ifbifconf64 ifbifconf
;
1635 struct ifbareq64 ifbareq
;
1636 struct ifbaconf64 ifbaconf
;
1637 struct ifbrparam ifbrparam
;
1638 struct ifbropreq64 ifbropreq
;
1640 struct ifdrv64
*ifd
= (struct ifdrv64
*)data
;
1641 const struct bridge_control
*bridge_control_table
=
1642 bridge_control_table64
, *bc
;
1650 if (!(ifp
->if_flags
& IFF_UP
) &&
1651 (ifp
->if_flags
& IFF_RUNNING
)) {
1653 * If interface is marked down and it is running,
1654 * then stop and disable it.
1657 bridge_ifstop(ifp
, 1);
1659 } else if ((ifp
->if_flags
& IFF_UP
) &&
1660 !(ifp
->if_flags
& IFF_RUNNING
)) {
1662 * If interface is marked up and it is stopped, then
1666 error
= bridge_init(ifp
);
1672 error
= ifnet_set_lladdr(ifp
, ifr
->ifr_addr
.sa_data
,
1673 ifr
->ifr_addr
.sa_len
);
1675 printf("%s: SIOCSIFLLADDR error %d\n", ifp
->if_xname
,
1681 if (ifr
->ifr_mtu
< 576) {
1686 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
1687 sc
->sc_ifp
->if_mtu
= ifr
->ifr_mtu
;
1691 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1692 if (bif
->bif_ifp
->if_mtu
!= (unsigned)ifr
->ifr_mtu
) {
1693 printf("%s: invalid MTU: %u(%s) != %d\n",
1694 sc
->sc_ifp
->if_xname
,
1695 bif
->bif_ifp
->if_mtu
,
1696 bif
->bif_ifp
->if_xname
, ifr
->ifr_mtu
);
1702 sc
->sc_ifp
->if_mtu
= ifr
->ifr_mtu
;
1708 error
= ether_ioctl(ifp
, cmd
, data
);
1710 if (error
!= 0 && error
!= EOPNOTSUPP
) {
1711 printf("%s: ifp %s cmd 0x%08lx "
1712 "(%c%c [%lu] %c %lu) failed error: %d\n",
1713 __func__
, ifp
->if_xname
, cmd
,
1714 (cmd
& IOC_IN
) ? 'I' : ' ',
1715 (cmd
& IOC_OUT
) ? 'O' : ' ',
1716 IOCPARM_LEN(cmd
), (char)IOCGROUP(cmd
),
1719 #endif /* BRIDGE_DEBUG */
1722 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1731 * Clear or restore unwanted capabilities on the member interface
1734 bridge_mutecaps(struct bridge_softc
*sc
)
1736 struct bridge_iflist
*bif
;
1739 /* Initial bitmask of capabilities to test */
1740 mask
= BRIDGE_IFCAPS_MASK
;
1742 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1743 /* Every member must support it or its disabled */
1744 mask
&= bif
->bif_savedcaps
;
1747 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1748 enabled
= bif
->bif_ifp
->if_capenable
;
1749 enabled
&= ~BRIDGE_IFCAPS_STRIP
;
1750 /* strip off mask bits and enable them again if allowed */
1751 enabled
&= ~BRIDGE_IFCAPS_MASK
;
1754 bridge_set_ifcap(sc
, bif
, enabled
);
1759 bridge_set_ifcap(struct bridge_softc
*sc
, struct bridge_iflist
*bif
, int set
)
1761 struct ifnet
*ifp
= bif
->bif_ifp
;
1765 bzero(&ifr
, sizeof(ifr
));
1766 ifr
.ifr_reqcap
= set
;
1768 if (ifp
->if_capenable
!= set
) {
1770 error
= (*ifp
->if_ioctl
)(ifp
, SIOCSIFCAP
, (caddr_t
)&ifr
);
1771 IFF_UNLOCKGIANT(ifp
);
1773 printf("%s: %s error setting interface capabilities "
1774 "on %s\n", __func__
, sc
->sc_ifp
->if_xname
,
1779 #endif /* HAS_IF_CAP */
1782 bridge_set_tso(struct bridge_softc
*sc
)
1784 struct bridge_iflist
*bif
;
1785 u_int32_t tso_v4_mtu
;
1786 u_int32_t tso_v6_mtu
;
1787 ifnet_offload_t offload
;
1790 /* By default, support TSO */
1791 offload
= sc
->sc_ifp
->if_hwassist
| IFNET_TSO_IPV4
| IFNET_TSO_IPV6
;
1792 tso_v4_mtu
= IP_MAXPACKET
;
1793 tso_v6_mtu
= IP_MAXPACKET
;
1795 /* Use the lowest common denominator of the members */
1796 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1797 ifnet_t ifp
= bif
->bif_ifp
;
1803 if (offload
& IFNET_TSO_IPV4
) {
1804 if (ifp
->if_hwassist
& IFNET_TSO_IPV4
) {
1805 if (tso_v4_mtu
> ifp
->if_tso_v4_mtu
) {
1806 tso_v4_mtu
= ifp
->if_tso_v4_mtu
;
1809 offload
&= ~IFNET_TSO_IPV4
;
1813 if (offload
& IFNET_TSO_IPV6
) {
1814 if (ifp
->if_hwassist
& IFNET_TSO_IPV6
) {
1815 if (tso_v6_mtu
> ifp
->if_tso_v6_mtu
) {
1816 tso_v6_mtu
= ifp
->if_tso_v6_mtu
;
1819 offload
&= ~IFNET_TSO_IPV6
;
1825 if (offload
!= sc
->sc_ifp
->if_hwassist
) {
1826 error
= ifnet_set_offload(sc
->sc_ifp
, offload
);
1829 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1830 printf("%s: ifnet_set_offload(%s, 0x%x) "
1831 "failed %d\n", __func__
,
1832 sc
->sc_ifp
->if_xname
, offload
, error
);
1834 #endif /* BRIDGE_DEBUG */
1838 * For ifnet_set_tso_mtu() sake, the TSO MTU must be at least
1839 * as large as the interface MTU
1841 if (sc
->sc_ifp
->if_hwassist
& IFNET_TSO_IPV4
) {
1842 if (tso_v4_mtu
< sc
->sc_ifp
->if_mtu
) {
1843 tso_v4_mtu
= sc
->sc_ifp
->if_mtu
;
1845 error
= ifnet_set_tso_mtu(sc
->sc_ifp
, AF_INET
,
1849 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1850 printf("%s: ifnet_set_tso_mtu(%s, "
1851 "AF_INET, %u) failed %d\n",
1852 __func__
, sc
->sc_ifp
->if_xname
,
1855 #endif /* BRIDGE_DEBUG */
1859 if (sc
->sc_ifp
->if_hwassist
& IFNET_TSO_IPV6
) {
1860 if (tso_v6_mtu
< sc
->sc_ifp
->if_mtu
) {
1861 tso_v6_mtu
= sc
->sc_ifp
->if_mtu
;
1863 error
= ifnet_set_tso_mtu(sc
->sc_ifp
, AF_INET6
,
1867 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1868 printf("%s: ifnet_set_tso_mtu(%s, "
1869 "AF_INET6, %u) failed %d\n",
1870 __func__
, sc
->sc_ifp
->if_xname
,
1873 #endif /* BRIDGE_DEBUG */
1883 * bridge_lookup_member:
1885 * Lookup a bridge member interface.
1887 static struct bridge_iflist
*
1888 bridge_lookup_member(struct bridge_softc
*sc
, const char *name
)
1890 struct bridge_iflist
*bif
;
1893 BRIDGE_LOCK_ASSERT_HELD(sc
);
1895 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1897 if (strcmp(ifp
->if_xname
, name
) == 0) {
1906 * bridge_lookup_member_if:
1908 * Lookup a bridge member interface by ifnet*.
1910 static struct bridge_iflist
*
1911 bridge_lookup_member_if(struct bridge_softc
*sc
, struct ifnet
*member_ifp
)
1913 struct bridge_iflist
*bif
;
1915 BRIDGE_LOCK_ASSERT_HELD(sc
);
1917 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1918 if (bif
->bif_ifp
== member_ifp
) {
1927 bridge_iff_input(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
1928 mbuf_t
*data
, char **frame_ptr
)
1930 #pragma unused(protocol)
1932 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
1933 struct bridge_softc
*sc
= bif
->bif_sc
;
1938 if ((m
->m_flags
& M_PROTO1
)) {
1942 if (*frame_ptr
>= (char *)mbuf_datastart(m
) &&
1943 *frame_ptr
<= (char *)mbuf_data(m
)) {
1945 frmlen
= (char *)mbuf_data(m
) - *frame_ptr
;
1948 if (if_bridge_debug
& BR_DBGF_INPUT
) {
1949 printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
1950 "frmlen %lu\n", __func__
, sc
->sc_ifp
->if_xname
,
1951 ifp
->if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(m
),
1952 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)),
1953 (uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr
),
1954 included
? "inside" : "outside", frmlen
);
1956 if (if_bridge_debug
& BR_DBGF_MBUF
) {
1957 printf_mbuf(m
, "bridge_iff_input[", "\n");
1958 printf_ether_header((struct ether_header
*)
1959 (void *)*frame_ptr
);
1960 printf_mbuf_data(m
, 0, 20);
1964 #endif /* BRIDGE_DEBUG */
1966 /* Move data pointer to start of frame to the link layer header */
1968 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) - frmlen
,
1969 mbuf_len(m
) + frmlen
);
1970 (void) mbuf_pkthdr_adjustlen(m
, frmlen
);
1972 printf("%s: frame_ptr outside mbuf\n", __func__
);
1976 error
= bridge_input(ifp
, m
, *frame_ptr
);
1978 /* Adjust packet back to original */
1980 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) + frmlen
,
1981 mbuf_len(m
) - frmlen
);
1982 (void) mbuf_pkthdr_adjustlen(m
, -frmlen
);
1985 if ((if_bridge_debug
& BR_DBGF_INPUT
) &&
1986 (if_bridge_debug
& BR_DBGF_MBUF
)) {
1988 printf_mbuf(m
, "bridge_iff_input]", "\n");
1990 #endif /* BRIDGE_DEBUG */
1993 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1998 #if BRIDGE_MEMBER_OUT_FILTER
2000 bridge_iff_output(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
2003 #pragma unused(protocol)
2005 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
2006 struct bridge_softc
*sc
= bif
->bif_sc
;
2009 if ((m
->m_flags
& M_PROTO1
)) {
2014 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
2015 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__
,
2016 sc
->sc_ifp
->if_xname
, ifp
->if_xname
,
2017 (uint64_t)VM_KERNEL_ADDRPERM(m
),
2018 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)));
2020 #endif /* BRIDGE_DEBUG */
2022 error
= bridge_member_output(sc
, ifp
, m
);
2024 printf("%s: bridge_member_output failed error %d\n", __func__
,
2029 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
2033 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2036 bridge_iff_event(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
2037 const struct kev_msg
*event_msg
)
2039 #pragma unused(protocol)
2040 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
2041 struct bridge_softc
*sc
= bif
->bif_sc
;
2043 if (event_msg
->vendor_code
== KEV_VENDOR_APPLE
&&
2044 event_msg
->kev_class
== KEV_NETWORK_CLASS
&&
2045 event_msg
->kev_subclass
== KEV_DL_SUBCLASS
) {
2047 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2048 printf("%s: %s event_code %u - %s\n", __func__
,
2049 ifp
->if_xname
, event_msg
->event_code
,
2050 dlil_kev_dl_code_str(event_msg
->event_code
));
2052 #endif /* BRIDGE_DEBUG */
2054 switch (event_msg
->event_code
) {
2055 case KEV_DL_IF_DETACHING
:
2056 case KEV_DL_IF_DETACHED
: {
2057 bridge_ifdetach(bif
, ifp
);
2060 case KEV_DL_LINK_OFF
:
2061 case KEV_DL_LINK_ON
: {
2062 bridge_iflinkevent(ifp
);
2064 bstp_linkstate(ifp
, event_msg
->event_code
);
2065 #endif /* BRIDGESTP */
2068 case KEV_DL_SIFFLAGS
: {
2069 if ((bif
->bif_flags
& BIFF_PROMISC
) == 0 &&
2070 (ifp
->if_flags
& IFF_UP
)) {
2073 error
= ifnet_set_promiscuous(ifp
, 1);
2076 "ifnet_set_promiscuous (%s)"
2078 __func__
, ifp
->if_xname
,
2081 bif
->bif_flags
|= BIFF_PROMISC
;
2086 case KEV_DL_IFCAP_CHANGED
: {
2099 * bridge_iff_detached:
2101 * Detach an interface from a bridge. Called when a member
2102 * interface is detaching.
2105 bridge_iff_detached(void *cookie
, ifnet_t ifp
)
2107 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
2110 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2111 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2113 #endif /* BRIDGE_DEBUG */
2115 bridge_ifdetach(bif
, ifp
);
2117 _FREE(bif
, M_DEVBUF
);
2121 bridge_proto_input(ifnet_t ifp
, protocol_family_t protocol
, mbuf_t packet
,
2124 #pragma unused(protocol, packet, header)
2126 printf("%s: unexpected packet from %s\n", __func__
,
2128 #endif /* BRIDGE_DEBUG */
2133 bridge_attach_protocol(struct ifnet
*ifp
)
2136 struct ifnet_attach_proto_param reg
;
2139 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2140 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2142 #endif /* BRIDGE_DEBUG */
2144 bzero(®
, sizeof(reg
));
2145 reg
.input
= bridge_proto_input
;
2147 error
= ifnet_attach_protocol(ifp
, PF_BRIDGE
, ®
);
2149 printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
2150 __func__
, ifp
->if_xname
, error
);
2157 bridge_detach_protocol(struct ifnet
*ifp
)
2162 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2163 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2165 #endif /* BRIDGE_DEBUG */
2166 error
= ifnet_detach_protocol(ifp
, PF_BRIDGE
);
2168 printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
2169 __func__
, ifp
->if_xname
, error
);
2176 * bridge_delete_member:
2178 * Delete the specified member interface.
2181 bridge_delete_member(struct bridge_softc
*sc
, struct bridge_iflist
*bif
,
2184 struct ifnet
*ifs
= bif
->bif_ifp
, *bifp
= sc
->sc_ifp
;
2185 int lladdr_changed
= 0, error
, filt_attached
;
2186 uint8_t eaddr
[ETHER_ADDR_LEN
];
2187 u_int32_t event_code
= 0;
2190 BRIDGE_LOCK_ASSERT_HELD(sc
);
2191 VERIFY(ifs
!= NULL
);
2193 bsd_mode
= bridge_in_bsd_mode(sc
);
2196 * First, remove the member from the list first so it cannot be found anymore
2197 * when we release the bridge lock below
2200 TAILQ_REMOVE(&sc
->sc_iflist
, bif
, bif_next
);
2204 switch (ifs
->if_type
) {
2208 * Take the interface out of promiscuous mode.
2210 if (bif
->bif_flags
& BIFF_PROMISC
) {
2212 * Unlock to prevent deadlock with bridge_iff_event() in
2213 * case the driver generates an interface event
2216 (void) ifnet_set_promiscuous(ifs
, 0);
2222 /* currently not supported */
2230 /* reneable any interface capabilities */
2231 bridge_set_ifcap(sc
, bif
, bif
->bif_savedcaps
);
2235 if (bif
->bif_flags
& BIFF_PROTO_ATTACHED
) {
2236 /* Respect lock ordering with DLIL lock */
2238 (void) bridge_detach_protocol(ifs
);
2242 if (bsd_mode
&& (bif
->bif_ifflags
& IFBIF_STP
) != 0) {
2243 bstp_disable(&bif
->bif_stp
);
2245 #endif /* BRIDGESTP */
2248 * If removing the interface that gave the bridge its mac address, set
2249 * the mac address of the bridge to the address of the next member, or
2250 * to its default address if no members are left.
2252 if (bridge_inherit_mac
&& sc
->sc_ifaddr
== ifs
) {
2253 ifnet_release(sc
->sc_ifaddr
);
2254 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
2255 bcopy(sc
->sc_defaddr
, eaddr
, ETHER_ADDR_LEN
);
2256 sc
->sc_ifaddr
= NULL
;
2259 TAILQ_FIRST(&sc
->sc_iflist
)->bif_ifp
;
2260 bcopy(IF_LLADDR(fif
), eaddr
, ETHER_ADDR_LEN
);
2261 sc
->sc_ifaddr
= fif
;
2262 ifnet_reference(fif
); /* for sc_ifaddr */
2268 bridge_mutecaps(sc
); /* recalculate now this interface is removed */
2269 #endif /* HAS_IF_CAP */
2271 error
= bridge_set_tso(sc
);
2273 printf("%s: bridge_set_tso failed %d\n", __func__
, error
);
2277 bridge_rtdelete(sc
, ifs
, IFBF_FLUSHALL
);
2280 KASSERT(bif
->bif_addrcnt
== 0,
2281 ("%s: %d bridge routes referenced", __func__
, bif
->bif_addrcnt
));
2283 filt_attached
= bif
->bif_flags
& BIFF_FILTER_ATTACHED
;
2286 * Update link status of the bridge based on its remaining members
2288 event_code
= bridge_updatelinkstatus(sc
);
2294 if (lladdr_changed
&&
2295 (error
= ifnet_set_lladdr(bifp
, eaddr
, ETHER_ADDR_LEN
)) != 0) {
2296 printf("%s: ifnet_set_lladdr failed %d\n", __func__
, error
);
2299 if (event_code
!= 0) {
2300 bridge_link_event(bifp
, event_code
);
2305 bstp_destroy(&bif
->bif_stp
); /* prepare to free */
2307 #endif /* BRIDGESTP */
2309 if (filt_attached
) {
2310 iflt_detach(bif
->bif_iff_ref
);
2312 _FREE(bif
, M_DEVBUF
);
2315 ifs
->if_bridge
= NULL
;
2322 * bridge_delete_span:
2324 * Delete the specified span interface.
2327 bridge_delete_span(struct bridge_softc
*sc
, struct bridge_iflist
*bif
)
2329 BRIDGE_LOCK_ASSERT_HELD(sc
);
2331 KASSERT(bif
->bif_ifp
->if_bridge
== NULL
,
2332 ("%s: not a span interface", __func__
));
2334 ifnet_release(bif
->bif_ifp
);
2336 TAILQ_REMOVE(&sc
->sc_spanlist
, bif
, bif_next
);
2337 _FREE(bif
, M_DEVBUF
);
2341 bridge_ioctl_add(struct bridge_softc
*sc
, void *arg
)
2343 struct ifbreq
*req
= arg
;
2344 struct bridge_iflist
*bif
= NULL
;
2345 struct ifnet
*ifs
, *bifp
= sc
->sc_ifp
;
2346 int error
= 0, lladdr_changed
= 0;
2347 uint8_t eaddr
[ETHER_ADDR_LEN
];
2348 struct iff_filter iff
;
2349 u_int32_t event_code
= 0;
2350 boolean_t bsd_mode
= bridge_in_bsd_mode(sc
);
2352 ifs
= ifunit(req
->ifbr_ifsname
);
2356 if (ifs
->if_ioctl
== NULL
) { /* must be supported */
2360 if (IFNET_IS_INTCOPROC(ifs
)) {
2365 /* If it's in the span list, it can't be a member. */
2366 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
2367 if (ifs
== bif
->bif_ifp
) {
2372 if (ifs
->if_bridge
== sc
) {
2376 if (ifs
->if_bridge
!= NULL
) {
2380 switch (ifs
->if_type
) {
2383 /* permitted interface types */
2386 /* currently not supported */
2392 bif
= _MALLOC(sizeof(*bif
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
2398 ifnet_reference(ifs
);
2399 bif
->bif_ifflags
= IFBIF_LEARNING
| IFBIF_DISCOVER
;
2401 bif
->bif_savedcaps
= ifs
->if_capenable
;
2402 #endif /* HAS_IF_CAP */
2405 /* Allow the first Ethernet member to define the MTU */
2406 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
2407 sc
->sc_ifp
->if_mtu
= ifs
->if_mtu
;
2408 } else if (sc
->sc_ifp
->if_mtu
!= ifs
->if_mtu
) {
2409 printf("%s: %s: invalid MTU for %s", __func__
,
2410 sc
->sc_ifp
->if_xname
,
2416 * Assign the interface's MAC address to the bridge if it's the first
2417 * member and the MAC address of the bridge has not been changed from
2418 * the default (randomly) generated one.
2420 if (bridge_inherit_mac
&& TAILQ_EMPTY(&sc
->sc_iflist
) &&
2421 !memcmp(IF_LLADDR(sc
->sc_ifp
), sc
->sc_defaddr
, ETHER_ADDR_LEN
)) {
2422 bcopy(IF_LLADDR(ifs
), eaddr
, ETHER_ADDR_LEN
);
2423 sc
->sc_ifaddr
= ifs
;
2424 ifnet_reference(ifs
); /* for sc_ifaddr */
2428 ifs
->if_bridge
= sc
;
2431 bstp_create(&sc
->sc_stp
, &bif
->bif_stp
, bif
->bif_ifp
);
2433 #endif /* BRIDGESTP */
2436 * XXX: XLOCK HERE!?!
2438 TAILQ_INSERT_TAIL(&sc
->sc_iflist
, bif
, bif_next
);
2441 /* Set interface capabilities to the intersection set of all members */
2442 bridge_mutecaps(sc
);
2443 #endif /* HAS_IF_CAP */
2449 * Place the interface into promiscuous mode.
2451 switch (ifs
->if_type
) {
2454 error
= ifnet_set_promiscuous(ifs
, 1);
2456 /* Ignore error when device is not up */
2457 if (error
!= ENETDOWN
) {
2462 bif
->bif_flags
|= BIFF_PROMISC
;
2471 * The new member may change the link status of the bridge interface
2473 if (interface_media_active(ifs
)) {
2474 bif
->bif_flags
|= BIFF_MEDIA_ACTIVE
;
2476 bif
->bif_flags
&= ~BIFF_MEDIA_ACTIVE
;
2479 event_code
= bridge_updatelinkstatus(sc
);
2482 * Respect lock ordering with DLIL lock for the following operations
2489 * install an interface filter
2491 memset(&iff
, 0, sizeof(struct iff_filter
));
2492 iff
.iff_cookie
= bif
;
2493 iff
.iff_name
= "com.apple.kernel.bsd.net.if_bridge";
2495 iff
.iff_input
= bridge_iff_input
;
2496 #if BRIDGE_MEMBER_OUT_FILTER
2497 iff
.iff_output
= bridge_iff_output
;
2498 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2500 iff
.iff_event
= bridge_iff_event
;
2501 iff
.iff_detached
= bridge_iff_detached
;
2502 error
= dlil_attach_filter(ifs
, &iff
, &bif
->bif_iff_ref
,
2503 DLIL_IFF_TSO
| DLIL_IFF_INTERNAL
);
2505 printf("%s: iflt_attach failed %d\n", __func__
, error
);
2509 bif
->bif_flags
|= BIFF_FILTER_ATTACHED
;
2512 * install an dummy "bridge" protocol
2514 if ((error
= bridge_attach_protocol(ifs
)) != 0) {
2516 printf("%s: bridge_attach_protocol failed %d\n",
2522 bif
->bif_flags
|= BIFF_PROTO_ATTACHED
;
2524 if (lladdr_changed
&&
2525 (error
= ifnet_set_lladdr(bifp
, eaddr
, ETHER_ADDR_LEN
)) != 0) {
2526 printf("%s: ifnet_set_lladdr failed %d\n", __func__
, error
);
2529 if (event_code
!= 0) {
2530 bridge_link_event(bifp
, event_code
);
2536 if (error
&& bif
!= NULL
) {
2537 bridge_delete_member(sc
, bif
, 1);
2544 bridge_ioctl_del(struct bridge_softc
*sc
, void *arg
)
2546 struct ifbreq
*req
= arg
;
2547 struct bridge_iflist
*bif
;
2549 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2554 bridge_delete_member(sc
, bif
, 0);
2560 bridge_ioctl_purge(struct bridge_softc
*sc
, void *arg
)
2562 #pragma unused(sc, arg)
2567 bridge_ioctl_gifflags(struct bridge_softc
*sc
, void *arg
)
2569 struct ifbreq
*req
= arg
;
2570 struct bridge_iflist
*bif
;
2572 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2577 if (bridge_in_bsd_mode(sc
)) {
2578 struct bstp_port
*bp
;
2581 req
->ifbr_state
= bp
->bp_state
;
2582 req
->ifbr_priority
= bp
->bp_priority
;
2583 req
->ifbr_path_cost
= bp
->bp_path_cost
;
2584 req
->ifbr_proto
= bp
->bp_protover
;
2585 req
->ifbr_role
= bp
->bp_role
;
2586 req
->ifbr_stpflags
= bp
->bp_flags
;
2587 /* Copy STP state options as flags */
2588 if (bp
->bp_operedge
) {
2589 req
->ifbr_ifsflags
|= IFBIF_BSTP_EDGE
;
2591 if (bp
->bp_flags
& BSTP_PORT_AUTOEDGE
) {
2592 req
->ifbr_ifsflags
|= IFBIF_BSTP_AUTOEDGE
;
2594 if (bp
->bp_ptp_link
) {
2595 req
->ifbr_ifsflags
|= IFBIF_BSTP_PTP
;
2597 if (bp
->bp_flags
& BSTP_PORT_AUTOPTP
) {
2598 req
->ifbr_ifsflags
|= IFBIF_BSTP_AUTOPTP
;
2600 if (bp
->bp_flags
& BSTP_PORT_ADMEDGE
) {
2601 req
->ifbr_ifsflags
|= IFBIF_BSTP_ADMEDGE
;
2603 if (bp
->bp_flags
& BSTP_PORT_ADMCOST
) {
2604 req
->ifbr_ifsflags
|= IFBIF_BSTP_ADMCOST
;
2607 req
->ifbr_ifsflags
= bif
->bif_ifflags
;
2608 req
->ifbr_portno
= bif
->bif_ifp
->if_index
& 0xfff;
2609 req
->ifbr_addrcnt
= bif
->bif_addrcnt
;
2610 req
->ifbr_addrmax
= bif
->bif_addrmax
;
2611 req
->ifbr_addrexceeded
= bif
->bif_addrexceeded
;
2617 bridge_ioctl_sifflags(struct bridge_softc
*sc
, void *arg
)
2619 struct ifbreq
*req
= arg
;
2620 struct bridge_iflist
*bif
;
2622 struct bstp_port
*bp
;
2624 #endif /* BRIDGESTP */
2626 if (!bridge_in_bsd_mode(sc
)) {
2630 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2635 if (req
->ifbr_ifsflags
& IFBIF_SPAN
) {
2636 /* SPAN is readonly */
2642 if (req
->ifbr_ifsflags
& IFBIF_STP
) {
2643 if ((bif
->bif_ifflags
& IFBIF_STP
) == 0) {
2644 error
= bstp_enable(&bif
->bif_stp
);
2650 if ((bif
->bif_ifflags
& IFBIF_STP
) != 0) {
2651 bstp_disable(&bif
->bif_stp
);
2655 /* Pass on STP flags */
2657 bstp_set_edge(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_EDGE
? 1 : 0);
2658 bstp_set_autoedge(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_AUTOEDGE
? 1 : 0);
2659 bstp_set_ptp(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_PTP
? 1 : 0);
2660 bstp_set_autoptp(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_AUTOPTP
? 1 : 0);
2661 #else /* !BRIDGESTP */
2662 if (req
->ifbr_ifsflags
& IFBIF_STP
) {
2665 #endif /* !BRIDGESTP */
2667 /* Save the bits relating to the bridge */
2668 bif
->bif_ifflags
= req
->ifbr_ifsflags
& IFBIFMASK
;
2675 bridge_ioctl_scache(struct bridge_softc
*sc
, void *arg
)
2677 struct ifbrparam
*param
= arg
;
2679 sc
->sc_brtmax
= param
->ifbrp_csize
;
2680 if (bridge_in_bsd_mode(sc
)) {
2687 bridge_ioctl_gcache(struct bridge_softc
*sc
, void *arg
)
2689 struct ifbrparam
*param
= arg
;
2691 param
->ifbrp_csize
= sc
->sc_brtmax
;
2696 #define BRIDGE_IOCTL_GIFS do { \
2697 struct bridge_iflist *bif; \
2698 struct ifbreq breq; \
2699 char *buf, *outbuf; \
2700 unsigned int count, buflen, len; \
2703 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
2705 if (bridge_in_bsd_mode(sc)) { \
2706 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
2710 buflen = sizeof (breq) * count; \
2711 if (bifc->ifbic_len == 0) { \
2712 bifc->ifbic_len = buflen; \
2715 BRIDGE_UNLOCK(sc); \
2716 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2721 len = min(bifc->ifbic_len, buflen); \
2722 bzero(&breq, sizeof (breq)); \
2723 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2724 if (len < sizeof (breq)) \
2727 snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
2728 "%s", bif->bif_ifp->if_xname); \
2729 /* Fill in the ifbreq structure */ \
2730 error = bridge_ioctl_gifflags(sc, &breq); \
2733 memcpy(buf, &breq, sizeof (breq)); \
2735 buf += sizeof (breq); \
2736 len -= sizeof (breq); \
2738 if (bridge_in_bsd_mode(sc)) { \
2739 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
2740 if (len < sizeof (breq)) \
2743 snprintf(breq.ifbr_ifsname, \
2744 sizeof (breq.ifbr_ifsname), \
2745 "%s", bif->bif_ifp->if_xname); \
2746 breq.ifbr_ifsflags = bif->bif_ifflags; \
2748 = bif->bif_ifp->if_index & 0xfff; \
2749 memcpy(buf, &breq, sizeof (breq)); \
2751 buf += sizeof (breq); \
2752 len -= sizeof (breq); \
2756 BRIDGE_UNLOCK(sc); \
2757 bifc->ifbic_len = sizeof (breq) * count; \
2758 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
2760 _FREE(outbuf, M_TEMP); \
2764 bridge_ioctl_gifs64(struct bridge_softc
*sc
, void *arg
)
2766 struct ifbifconf64
*bifc
= arg
;
2775 bridge_ioctl_gifs32(struct bridge_softc
*sc
, void *arg
)
2777 struct ifbifconf32
*bifc
= arg
;
2785 #define BRIDGE_IOCTL_RTS do { \
2786 struct bridge_rtnode *brt; \
2788 char *outbuf = NULL; \
2789 unsigned int count, buflen, len; \
2790 unsigned long now; \
2792 if (bac->ifbac_len == 0) \
2795 bzero(&bareq, sizeof (bareq)); \
2797 if (!bridge_in_bsd_mode(sc)) { \
2800 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
2802 buflen = sizeof (bareq) * count; \
2804 BRIDGE_UNLOCK(sc); \
2805 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2810 len = min(bac->ifbac_len, buflen); \
2811 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
2812 if (len < sizeof (bareq)) \
2814 snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
2815 "%s", brt->brt_ifp->if_xname); \
2816 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2817 bareq.ifba_vlan = brt->brt_vlan; \
2818 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
2819 now = (unsigned long) net_uptime(); \
2820 if (now < brt->brt_expire) \
2821 bareq.ifba_expire = \
2822 brt->brt_expire - now; \
2824 bareq.ifba_expire = 0; \
2825 bareq.ifba_flags = brt->brt_flags; \
2827 memcpy(buf, &bareq, sizeof (bareq)); \
2829 buf += sizeof (bareq); \
2830 len -= sizeof (bareq); \
2833 bac->ifbac_len = sizeof (bareq) * count; \
2834 if (outbuf != NULL) { \
2835 BRIDGE_UNLOCK(sc); \
2836 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
2837 _FREE(outbuf, M_TEMP); \
2844 bridge_ioctl_rts64(struct bridge_softc
*sc
, void *arg
)
2846 struct ifbaconf64
*bac
= arg
;
2847 struct ifbareq64 bareq
;
2855 bridge_ioctl_rts32(struct bridge_softc
*sc
, void *arg
)
2857 struct ifbaconf32
*bac
= arg
;
2858 struct ifbareq32 bareq
;
2866 bridge_ioctl_saddr32(struct bridge_softc
*sc
, void *arg
)
2868 struct ifbareq32
*req
= arg
;
2869 struct bridge_iflist
*bif
;
2872 if (!bridge_in_bsd_mode(sc
)) {
2876 bif
= bridge_lookup_member(sc
, req
->ifba_ifsname
);
2881 error
= bridge_rtupdate(sc
, req
->ifba_dst
, req
->ifba_vlan
, bif
, 1,
2888 bridge_ioctl_saddr64(struct bridge_softc
*sc
, void *arg
)
2890 struct ifbareq64
*req
= arg
;
2891 struct bridge_iflist
*bif
;
2894 if (!bridge_in_bsd_mode(sc
)) {
2898 bif
= bridge_lookup_member(sc
, req
->ifba_ifsname
);
2903 error
= bridge_rtupdate(sc
, req
->ifba_dst
, req
->ifba_vlan
, bif
, 1,
2910 bridge_ioctl_sto(struct bridge_softc
*sc
, void *arg
)
2912 struct ifbrparam
*param
= arg
;
2914 sc
->sc_brttimeout
= param
->ifbrp_ctime
;
2919 bridge_ioctl_gto(struct bridge_softc
*sc
, void *arg
)
2921 struct ifbrparam
*param
= arg
;
2923 param
->ifbrp_ctime
= sc
->sc_brttimeout
;
2928 bridge_ioctl_daddr32(struct bridge_softc
*sc
, void *arg
)
2930 struct ifbareq32
*req
= arg
;
2932 if (!bridge_in_bsd_mode(sc
)) {
2935 return bridge_rtdaddr(sc
, req
->ifba_dst
, req
->ifba_vlan
);
2939 bridge_ioctl_daddr64(struct bridge_softc
*sc
, void *arg
)
2941 struct ifbareq64
*req
= arg
;
2943 if (!bridge_in_bsd_mode(sc
)) {
2946 return bridge_rtdaddr(sc
, req
->ifba_dst
, req
->ifba_vlan
);
2950 bridge_ioctl_flush(struct bridge_softc
*sc
, void *arg
)
2952 struct ifbreq
*req
= arg
;
2954 if (!bridge_in_bsd_mode(sc
)) {
2957 bridge_rtflush(sc
, req
->ifbr_ifsflags
);
2962 bridge_ioctl_gpri(struct bridge_softc
*sc
, void *arg
)
2964 struct ifbrparam
*param
= arg
;
2965 struct bstp_state
*bs
= &sc
->sc_stp
;
2967 if (!bridge_in_bsd_mode(sc
)) {
2970 param
->ifbrp_prio
= bs
->bs_bridge_priority
;
2975 bridge_ioctl_spri(struct bridge_softc
*sc
, void *arg
)
2978 struct ifbrparam
*param
= arg
;
2980 if (!bridge_in_bsd_mode(sc
)) {
2983 return bstp_set_priority(&sc
->sc_stp
, param
->ifbrp_prio
);
2984 #else /* !BRIDGESTP */
2985 #pragma unused(sc, arg)
2987 #endif /* !BRIDGESTP */
2991 bridge_ioctl_ght(struct bridge_softc
*sc
, void *arg
)
2993 struct ifbrparam
*param
= arg
;
2994 struct bstp_state
*bs
= &sc
->sc_stp
;
2996 if (!bridge_in_bsd_mode(sc
)) {
2999 param
->ifbrp_hellotime
= bs
->bs_bridge_htime
>> 8;
3004 bridge_ioctl_sht(struct bridge_softc
*sc
, void *arg
)
3007 struct ifbrparam
*param
= arg
;
3009 if (!bridge_in_bsd_mode(sc
)) {
3012 return bstp_set_htime(&sc
->sc_stp
, param
->ifbrp_hellotime
);
3013 #else /* !BRIDGESTP */
3014 #pragma unused(sc, arg)
3016 #endif /* !BRIDGESTP */
3020 bridge_ioctl_gfd(struct bridge_softc
*sc
, void *arg
)
3022 struct ifbrparam
*param
;
3023 struct bstp_state
*bs
;
3025 if (!bridge_in_bsd_mode(sc
)) {
3030 param
->ifbrp_fwddelay
= bs
->bs_bridge_fdelay
>> 8;
3035 bridge_ioctl_sfd(struct bridge_softc
*sc
, void *arg
)
3038 struct ifbrparam
*param
= arg
;
3040 if (!bridge_in_bsd_mode(sc
)) {
3043 return bstp_set_fdelay(&sc
->sc_stp
, param
->ifbrp_fwddelay
);
3044 #else /* !BRIDGESTP */
3045 #pragma unused(sc, arg)
3047 #endif /* !BRIDGESTP */
3051 bridge_ioctl_gma(struct bridge_softc
*sc
, void *arg
)
3053 struct ifbrparam
*param
;
3054 struct bstp_state
*bs
;
3056 if (!bridge_in_bsd_mode(sc
)) {
3061 param
->ifbrp_maxage
= bs
->bs_bridge_max_age
>> 8;
3066 bridge_ioctl_sma(struct bridge_softc
*sc
, void *arg
)
3069 struct ifbrparam
*param
= arg
;
3071 if (!bridge_in_bsd_mode(sc
)) {
3074 return bstp_set_maxage(&sc
->sc_stp
, param
->ifbrp_maxage
);
3075 #else /* !BRIDGESTP */
3076 #pragma unused(sc, arg)
3078 #endif /* !BRIDGESTP */
3082 bridge_ioctl_sifprio(struct bridge_softc
*sc
, void *arg
)
3085 struct ifbreq
*req
= arg
;
3086 struct bridge_iflist
*bif
;
3088 if (!bridge_in_bsd_mode(sc
)) {
3091 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3096 return bstp_set_port_priority(&bif
->bif_stp
, req
->ifbr_priority
);
3097 #else /* !BRIDGESTP */
3098 #pragma unused(sc, arg)
3100 #endif /* !BRIDGESTP */
3104 bridge_ioctl_sifcost(struct bridge_softc
*sc
, void *arg
)
3107 struct ifbreq
*req
= arg
;
3108 struct bridge_iflist
*bif
;
3110 if (!bridge_in_bsd_mode(sc
)) {
3113 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3118 return bstp_set_path_cost(&bif
->bif_stp
, req
->ifbr_path_cost
);
3119 #else /* !BRIDGESTP */
3120 #pragma unused(sc, arg)
3122 #endif /* !BRIDGESTP */
3126 bridge_ioctl_gfilt(struct bridge_softc
*sc
, void *arg
)
3128 struct ifbrparam
*param
= arg
;
3130 param
->ifbrp_filter
= sc
->sc_filter_flags
;
3136 bridge_ioctl_sfilt(struct bridge_softc
*sc
, void *arg
)
3138 struct ifbrparam
*param
= arg
;
3140 if (param
->ifbrp_filter
& ~IFBF_FILT_MASK
) {
3145 if (param
->ifbrp_filter
& IFBF_FILT_USEIPF
) {
3150 sc
->sc_filter_flags
= param
->ifbrp_filter
;
3156 bridge_ioctl_sifmaxaddr(struct bridge_softc
*sc
, void *arg
)
3158 struct ifbreq
*req
= arg
;
3159 struct bridge_iflist
*bif
;
3161 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3166 bif
->bif_addrmax
= req
->ifbr_addrmax
;
3171 bridge_ioctl_addspan(struct bridge_softc
*sc
, void *arg
)
3173 struct ifbreq
*req
= arg
;
3174 struct bridge_iflist
*bif
= NULL
;
3177 if (!bridge_in_bsd_mode(sc
)) {
3180 ifs
= ifunit(req
->ifbr_ifsname
);
3185 if (IFNET_IS_INTCOPROC(ifs
)) {
3189 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3190 if (ifs
== bif
->bif_ifp
) {
3194 if (ifs
->if_bridge
!= NULL
) {
3198 switch (ifs
->if_type
) {
3203 /* currently not supported */
3209 bif
= _MALLOC(sizeof(*bif
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
3215 bif
->bif_ifflags
= IFBIF_SPAN
;
3217 ifnet_reference(bif
->bif_ifp
);
3219 TAILQ_INSERT_HEAD(&sc
->sc_spanlist
, bif
, bif_next
);
3225 bridge_ioctl_delspan(struct bridge_softc
*sc
, void *arg
)
3227 struct ifbreq
*req
= arg
;
3228 struct bridge_iflist
*bif
;
3231 if (!bridge_in_bsd_mode(sc
)) {
3234 ifs
= ifunit(req
->ifbr_ifsname
);
3239 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3240 if (ifs
== bif
->bif_ifp
) {
3248 bridge_delete_span(sc
, bif
);
3253 #define BRIDGE_IOCTL_GBPARAM do { \
3254 struct bstp_state *bs = &sc->sc_stp; \
3255 struct bstp_port *root_port; \
3257 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
3258 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
3259 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
3261 root_port = bs->bs_root_port; \
3262 if (root_port == NULL) \
3263 req->ifbop_root_port = 0; \
3265 req->ifbop_root_port = root_port->bp_ifp->if_index; \
3267 req->ifbop_holdcount = bs->bs_txholdcount; \
3268 req->ifbop_priority = bs->bs_bridge_priority; \
3269 req->ifbop_protocol = bs->bs_protover; \
3270 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
3271 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
3272 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
3273 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
3274 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
3275 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
3279 bridge_ioctl_gbparam32(struct bridge_softc
*sc
, void *arg
)
3281 struct ifbropreq32
*req
= arg
;
3283 if (bridge_in_bsd_mode(sc
)) {
3284 BRIDGE_IOCTL_GBPARAM
;
3290 bridge_ioctl_gbparam64(struct bridge_softc
*sc
, void *arg
)
3292 struct ifbropreq64
*req
= arg
;
3294 if (bridge_in_bsd_mode(sc
)) {
3295 BRIDGE_IOCTL_GBPARAM
;
3301 bridge_ioctl_grte(struct bridge_softc
*sc
, void *arg
)
3303 struct ifbrparam
*param
= arg
;
3305 param
->ifbrp_cexceeded
= sc
->sc_brtexceeded
;
3309 #define BRIDGE_IOCTL_GIFSSTP do { \
3310 struct bridge_iflist *bif; \
3311 struct bstp_port *bp; \
3312 struct ifbpstpreq bpreq; \
3313 char *buf, *outbuf; \
3314 unsigned int count, buflen, len; \
3317 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3318 if ((bif->bif_ifflags & IFBIF_STP) != 0) \
3322 buflen = sizeof (bpreq) * count; \
3323 if (bifstp->ifbpstp_len == 0) { \
3324 bifstp->ifbpstp_len = buflen; \
3328 BRIDGE_UNLOCK(sc); \
3329 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
3334 len = min(bifstp->ifbpstp_len, buflen); \
3335 bzero(&bpreq, sizeof (bpreq)); \
3336 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3337 if (len < sizeof (bpreq)) \
3340 if ((bif->bif_ifflags & IFBIF_STP) == 0) \
3343 bp = &bif->bif_stp; \
3344 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
3345 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
3346 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
3347 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
3348 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
3349 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
3351 memcpy(buf, &bpreq, sizeof (bpreq)); \
3353 buf += sizeof (bpreq); \
3354 len -= sizeof (bpreq); \
3357 BRIDGE_UNLOCK(sc); \
3358 bifstp->ifbpstp_len = sizeof (bpreq) * count; \
3359 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
3361 _FREE(outbuf, M_TEMP); \
3366 bridge_ioctl_gifsstp32(struct bridge_softc
*sc
, void *arg
)
3368 struct ifbpstpconf32
*bifstp
= arg
;
3371 if (bridge_in_bsd_mode(sc
)) {
3372 BRIDGE_IOCTL_GIFSSTP
;
3378 bridge_ioctl_gifsstp64(struct bridge_softc
*sc
, void *arg
)
3380 struct ifbpstpconf64
*bifstp
= arg
;
3383 if (bridge_in_bsd_mode(sc
)) {
3384 BRIDGE_IOCTL_GIFSSTP
;
3390 bridge_ioctl_sproto(struct bridge_softc
*sc
, void *arg
)
3393 struct ifbrparam
*param
= arg
;
3395 if (!bridge_in_bsd_mode(sc
)) {
3398 return bstp_set_protocol(&sc
->sc_stp
, param
->ifbrp_proto
);
3399 #else /* !BRIDGESTP */
3400 #pragma unused(sc, arg)
3402 #endif /* !BRIDGESTP */
3406 bridge_ioctl_stxhc(struct bridge_softc
*sc
, void *arg
)
3409 struct ifbrparam
*param
= arg
;
3411 if (!bridge_in_bsd_mode(sc
)) {
3414 return bstp_set_holdcount(&sc
->sc_stp
, param
->ifbrp_txhc
);
3415 #else /* !BRIDGESTP */
3416 #pragma unused(sc, arg)
3418 #endif /* !BRIDGESTP */
3423 bridge_ioctl_ghostfilter(struct bridge_softc
*sc
, void *arg
)
3425 struct ifbrhostfilter
*req
= arg
;
3426 struct bridge_iflist
*bif
;
3428 bif
= bridge_lookup_member(sc
, req
->ifbrhf_ifsname
);
3433 bzero(req
, sizeof(struct ifbrhostfilter
));
3434 if (bif
->bif_flags
& BIFF_HOST_FILTER
) {
3435 req
->ifbrhf_flags
|= IFBRHF_ENABLED
;
3436 bcopy(bif
->bif_hf_hwsrc
, req
->ifbrhf_hwsrca
,
3438 req
->ifbrhf_ipsrc
= bif
->bif_hf_ipsrc
.s_addr
;
3444 bridge_ioctl_shostfilter(struct bridge_softc
*sc
, void *arg
)
3446 struct ifbrhostfilter
*req
= arg
;
3447 struct bridge_iflist
*bif
;
3449 bif
= bridge_lookup_member(sc
, req
->ifbrhf_ifsname
);
3454 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_vmnet_total
);
3456 if (req
->ifbrhf_flags
& IFBRHF_ENABLED
) {
3457 bif
->bif_flags
|= BIFF_HOST_FILTER
;
3459 if (req
->ifbrhf_flags
& IFBRHF_HWSRC
) {
3460 bcopy(req
->ifbrhf_hwsrca
, bif
->bif_hf_hwsrc
,
3462 if (bcmp(req
->ifbrhf_hwsrca
, ethernulladdr
,
3463 ETHER_ADDR_LEN
) != 0) {
3464 bif
->bif_flags
|= BIFF_HF_HWSRC
;
3466 bif
->bif_flags
&= ~BIFF_HF_HWSRC
;
3469 if (req
->ifbrhf_flags
& IFBRHF_IPSRC
) {
3470 bif
->bif_hf_ipsrc
.s_addr
= req
->ifbrhf_ipsrc
;
3471 if (bif
->bif_hf_ipsrc
.s_addr
!= INADDR_ANY
) {
3472 bif
->bif_flags
|= BIFF_HF_IPSRC
;
3474 bif
->bif_flags
&= ~BIFF_HF_IPSRC
;
3478 bif
->bif_flags
&= ~(BIFF_HOST_FILTER
| BIFF_HF_HWSRC
|
3480 bzero(bif
->bif_hf_hwsrc
, ETHER_ADDR_LEN
);
3481 bif
->bif_hf_ipsrc
.s_addr
= INADDR_ANY
;
3491 * Detach an interface from a bridge. Called when a member
3492 * interface is detaching.
3494 __private_extern__
void
3495 bridge_ifdetach(struct bridge_iflist
*bif
, struct ifnet
*ifp
)
3497 struct bridge_softc
*sc
= ifp
->if_bridge
;
3500 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
3501 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3503 #endif /* BRIDGE_DEBUG */
3505 /* Check if the interface is a bridge member */
3508 bif
= bridge_lookup_member_if(sc
, ifp
);
3510 bridge_delete_member(sc
, bif
, 1);
3515 /* Check if the interface is a span port */
3516 lck_mtx_lock(&bridge_list_mtx
);
3517 LIST_FOREACH(sc
, &bridge_list
, sc_list
) {
3518 if (bridge_in_bsd_mode(sc
)) {
3520 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3521 if (ifp
== bif
->bif_ifp
) {
3522 bridge_delete_span(sc
, bif
);
3528 lck_mtx_unlock(&bridge_list_mtx
);
3532 * interface_media_active:
3534 * Tells if an interface media is active.
3537 interface_media_active(struct ifnet
*ifp
)
3539 struct ifmediareq ifmr
;
3542 bzero(&ifmr
, sizeof(ifmr
));
3543 if (ifnet_ioctl(ifp
, 0, SIOCGIFMEDIA
, &ifmr
) == 0) {
3544 if ((ifmr
.ifm_status
& IFM_AVALID
) && ifmr
.ifm_count
> 0) {
3545 status
= ifmr
.ifm_status
& IFM_ACTIVE
? 1 : 0;
3553 * bridge_updatelinkstatus:
3555 * Update the media active status of the bridge based on the
3556 * media active status of its member.
3557 * If changed, return the corresponding onf/off link event.
3560 bridge_updatelinkstatus(struct bridge_softc
*sc
)
3562 struct bridge_iflist
*bif
;
3563 int active_member
= 0;
3564 u_int32_t event_code
= 0;
3566 BRIDGE_LOCK_ASSERT_HELD(sc
);
3569 * Find out if we have an active interface
3571 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
3572 if (bif
->bif_flags
& BIFF_MEDIA_ACTIVE
) {
3578 if (active_member
&& !(sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
3579 sc
->sc_flags
|= SCF_MEDIA_ACTIVE
;
3580 event_code
= KEV_DL_LINK_ON
;
3581 } else if (!active_member
&& (sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
3582 sc
->sc_flags
&= ~SCF_MEDIA_ACTIVE
;
3583 event_code
= KEV_DL_LINK_OFF
;
3590 * bridge_iflinkevent:
3593 bridge_iflinkevent(struct ifnet
*ifp
)
3595 struct bridge_softc
*sc
= ifp
->if_bridge
;
3596 struct bridge_iflist
*bif
;
3597 u_int32_t event_code
= 0;
3600 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
3601 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3603 #endif /* BRIDGE_DEBUG */
3605 /* Check if the interface is a bridge member */
3611 bif
= bridge_lookup_member_if(sc
, ifp
);
3613 if (interface_media_active(ifp
)) {
3614 bif
->bif_flags
|= BIFF_MEDIA_ACTIVE
;
3616 bif
->bif_flags
&= ~BIFF_MEDIA_ACTIVE
;
3619 event_code
= bridge_updatelinkstatus(sc
);
3623 if (event_code
!= 0) {
3624 bridge_link_event(sc
->sc_ifp
, event_code
);
3629 * bridge_delayed_callback:
3631 * Makes a delayed call
3634 bridge_delayed_callback(void *param
)
3636 struct bridge_delayed_call
*call
= (struct bridge_delayed_call
*)param
;
3637 struct bridge_softc
*sc
= call
->bdc_sc
;
3639 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3640 if (bridge_delayed_callback_delay
> 0) {
3643 ts
.tv_sec
= bridge_delayed_callback_delay
;
3646 printf("%s: sleeping for %d seconds\n",
3647 __func__
, bridge_delayed_callback_delay
);
3649 msleep(&bridge_delayed_callback_delay
, NULL
, PZERO
,
3652 printf("%s: awoken\n", __func__
);
3654 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3658 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3659 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
) {
3660 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3661 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3664 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3666 if (call
->bdc_flags
& BDCF_CANCELLING
) {
3669 if ((sc
->sc_flags
& SCF_DETACHING
) == 0) {
3670 (*call
->bdc_func
)(sc
);
3673 call
->bdc_flags
&= ~BDCF_OUTSTANDING
;
3678 * bridge_schedule_delayed_call:
3680 * Schedule a function to be called on a separate thread
3681 * The actual call may be scheduled to run at a given time or ASAP.
3684 bridge_schedule_delayed_call(struct bridge_delayed_call
*call
)
3686 uint64_t deadline
= 0;
3687 struct bridge_softc
*sc
= call
->bdc_sc
;
3689 BRIDGE_LOCK_ASSERT_HELD(sc
);
3691 if ((sc
->sc_flags
& SCF_DETACHING
) ||
3692 (call
->bdc_flags
& (BDCF_OUTSTANDING
| BDCF_CANCELLING
))) {
3696 if (call
->bdc_ts
.tv_sec
|| call
->bdc_ts
.tv_nsec
) {
3697 nanoseconds_to_absolutetime(
3698 (uint64_t)call
->bdc_ts
.tv_sec
* NSEC_PER_SEC
+
3699 call
->bdc_ts
.tv_nsec
, &deadline
);
3700 clock_absolutetime_interval_to_deadline(deadline
, &deadline
);
3703 call
->bdc_flags
= BDCF_OUTSTANDING
;
3705 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3706 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
) {
3707 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3708 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3711 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3713 if (call
->bdc_ts
.tv_sec
|| call
->bdc_ts
.tv_nsec
) {
3714 thread_call_func_delayed(
3715 (thread_call_func_t
)bridge_delayed_callback
,
3718 if (call
->bdc_thread_call
== NULL
) {
3719 call
->bdc_thread_call
= thread_call_allocate(
3720 (thread_call_func_t
)bridge_delayed_callback
,
3723 thread_call_enter(call
->bdc_thread_call
);
3728 * bridge_cancel_delayed_call:
3730 * Cancel a queued or running delayed call.
3731 * If call is running, does not return until the call is done to
3732 * prevent race condition with the brigde interface getting destroyed
3735 bridge_cancel_delayed_call(struct bridge_delayed_call
*call
)
3738 struct bridge_softc
*sc
= call
->bdc_sc
;
3741 * The call was never scheduled
3747 BRIDGE_LOCK_ASSERT_HELD(sc
);
3749 call
->bdc_flags
|= BDCF_CANCELLING
;
3751 while (call
->bdc_flags
& BDCF_OUTSTANDING
) {
3753 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
) {
3754 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3755 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3758 #endif /* BRIDGE_DEBUG */
3759 result
= thread_call_func_cancel(
3760 (thread_call_func_t
)bridge_delayed_callback
, call
, FALSE
);
3764 * We managed to dequeue the delayed call
3766 call
->bdc_flags
&= ~BDCF_OUTSTANDING
;
3769 * Wait for delayed call do be done running
3771 msleep(call
, &sc
->sc_mtx
, PZERO
, __func__
, NULL
);
3774 call
->bdc_flags
&= ~BDCF_CANCELLING
;
3778 * bridge_cleanup_delayed_call:
3780 * Dispose resource allocated for a delayed call
3781 * Assume the delayed call is not queued or running .
3784 bridge_cleanup_delayed_call(struct bridge_delayed_call
*call
)
3787 struct bridge_softc
*sc
= call
->bdc_sc
;
3790 * The call was never scheduled
3796 BRIDGE_LOCK_ASSERT_HELD(sc
);
3798 VERIFY((call
->bdc_flags
& BDCF_OUTSTANDING
) == 0);
3799 VERIFY((call
->bdc_flags
& BDCF_CANCELLING
) == 0);
3801 if (call
->bdc_thread_call
!= NULL
) {
3802 result
= thread_call_free(call
->bdc_thread_call
);
3803 if (result
== FALSE
) {
3804 panic("%s thread_call_free() failed for call %p",
3807 call
->bdc_thread_call
= NULL
;
3814 * Initialize a bridge interface.
3817 bridge_init(struct ifnet
*ifp
)
3819 struct bridge_softc
*sc
= (struct bridge_softc
*)ifp
->if_softc
;
3822 BRIDGE_LOCK_ASSERT_HELD(sc
);
3824 if ((ifnet_flags(ifp
) & IFF_RUNNING
)) {
3828 error
= ifnet_set_flags(ifp
, IFF_RUNNING
, IFF_RUNNING
);
3830 if (bridge_in_bsd_mode(sc
)) {
3832 * Calling bridge_aging_timer() is OK as there are no entries to
3833 * age so we're just going to arm the timer
3835 bridge_aging_timer(sc
);
3838 bstp_init(&sc
->sc_stp
); /* Initialize Spanning Tree */
3840 #endif /* BRIDGESTP */
3848 * Stop the bridge interface.
3851 bridge_ifstop(struct ifnet
*ifp
, int disable
)
3853 #pragma unused(disable)
3854 struct bridge_softc
*sc
= ifp
->if_softc
;
3856 BRIDGE_LOCK_ASSERT_HELD(sc
);
3858 if ((ifnet_flags(ifp
) & IFF_RUNNING
) == 0) {
3862 if (bridge_in_bsd_mode(sc
)) {
3863 bridge_cancel_delayed_call(&sc
->sc_aging_timer
);
3866 bstp_stop(&sc
->sc_stp
);
3867 #endif /* BRIDGESTP */
3869 bridge_rtflush(sc
, IFBF_FLUSHDYN
);
3871 (void) ifnet_set_flags(ifp
, 0, IFF_RUNNING
);
3877 * Enqueue a packet on a bridge member interface.
3881 bridge_enqueue(struct bridge_softc
*sc
, struct ifnet
*dst_ifp
, struct mbuf
*m
)
3887 VERIFY(dst_ifp
!= NULL
);
3890 * We may be sending a fragment so traverse the mbuf
3892 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
3896 struct flowadv adv
= { FADV_SUCCESS
};
3899 m
->m_nextpkt
= NULL
;
3901 len
= m
->m_pkthdr
.len
;
3902 mflags
= m
->m_flags
;
3903 m
->m_flags
|= M_PROTO1
; /* set to avoid loops */
3905 bridge_finalize_cksum(dst_ifp
, m
);
3909 * If underlying interface can not do VLAN tag insertion itself
3910 * then attach a packet tag that holds it.
3912 if ((m
->m_flags
& M_VLANTAG
) &&
3913 (dst_ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) == 0) {
3914 m
= ether_vlanencap(m
, m
->m_pkthdr
.ether_vtag
);
3916 printf("%s: %s: unable to prepend VLAN "
3917 "header\n", __func__
, dst_ifp
->if_xname
);
3918 (void) ifnet_stat_increment_out(dst_ifp
,
3922 m
->m_flags
&= ~M_VLANTAG
;
3924 #endif /* HAS_IF_CAP */
3926 _error
= dlil_output(dst_ifp
, 0, m
, NULL
, NULL
, 1, &adv
);
3928 /* Preserve existing error value */
3932 } else if (adv
.code
== FADV_FLOW_CONTROLLED
) {
3934 } else if (adv
.code
== FADV_SUSPENDED
) {
3935 error
= EQSUSPENDED
;
3940 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 1, len
, 0);
3942 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 0, 0, 1);
3949 #if HAS_BRIDGE_DUMMYNET
3953 * Receive a queued packet from dummynet and pass it on to the output
3956 * The mbuf has the Ethernet header already attached.
3959 bridge_dummynet(struct mbuf
*m
, struct ifnet
*ifp
)
3961 struct bridge_softc
*sc
;
3963 sc
= ifp
->if_bridge
;
3966 * The packet didnt originate from a member interface. This should only
3967 * ever happen if a member interface is removed while packets are
3975 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
3976 if (bridge_pfil(&m
, sc
->sc_ifp
, ifp
, PFIL_OUT
) != 0) {
3984 (void) bridge_enqueue(sc
, ifp
, m
);
3986 #endif /* HAS_BRIDGE_DUMMYNET */
3988 #if BRIDGE_MEMBER_OUT_FILTER
3990 * bridge_member_output:
3992 * Send output from a bridge member interface. This
3993 * performs the bridging function for locally originated
3996 * The mbuf has the Ethernet header already attached. We must
3997 * enqueue or free the mbuf before returning.
4000 bridge_member_output(struct ifnet
*ifp
, struct mbuf
*m
, struct sockaddr
*sa
,
4003 #pragma unused(sa, rt)
4004 struct ether_header
*eh
;
4005 struct ifnet
*dst_if
;
4006 struct bridge_softc
*sc
;
4010 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
4011 printf("%s: ifp %s\n", __func__
, ifp
->if_xname
);
4013 #endif /* BRIDGE_DEBUG */
4015 if (m
->m_len
< ETHER_HDR_LEN
) {
4016 m
= m_pullup(m
, ETHER_HDR_LEN
);
4022 eh
= mtod(m
, struct ether_header
*);
4023 sc
= ifp
->if_bridge
;
4024 vlan
= VLANTAGOF(m
);
4029 * APPLE MODIFICATION
4030 * If the packet is an 802.1X ethertype, then only send on the
4031 * original output interface.
4033 if (eh
->ether_type
== htons(ETHERTYPE_PAE
)) {
4039 * If bridge is down, but the original output interface is up,
4040 * go ahead and send out that interface. Otherwise, the packet
4043 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) == 0) {
4049 * If the packet is a multicast, or we don't know a better way to
4050 * get there, send to all interfaces.
4052 if (ETHER_IS_MULTICAST(eh
->ether_dhost
)) {
4055 dst_if
= bridge_rtlookup(sc
, eh
->ether_dhost
, vlan
);
4057 if (dst_if
== NULL
) {
4058 struct bridge_iflist
*bif
;
4060 int error
= 0, used
= 0;
4064 BRIDGE_LOCK2REF(sc
, error
);
4070 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
4071 dst_if
= bif
->bif_ifp
;
4073 if (dst_if
->if_type
== IFT_GIF
) {
4076 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4081 * If this is not the original output interface,
4082 * and the interface is participating in spanning
4083 * tree, make sure the port is in a state that
4084 * allows forwarding.
4086 if (dst_if
!= ifp
&& (bif
->bif_ifflags
& IFBIF_STP
) &&
4087 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4091 if (LIST_NEXT(bif
, bif_next
) == NULL
) {
4095 mc
= m_copypacket(m
, M_DONTWAIT
);
4097 (void) ifnet_stat_increment_out(
4098 sc
->sc_ifp
, 0, 0, 1);
4103 (void) bridge_enqueue(sc
, dst_if
, mc
);
4114 * XXX Spanning tree consideration here?
4118 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4125 (void) bridge_enqueue(sc
, dst_if
, m
);
4128 #endif /* BRIDGE_MEMBER_OUT_FILTER */
4133 * This routine is called externally from above only when if_bridge_txstart
4134 * is disabled; otherwise it is called internally by bridge_start().
4137 bridge_output(struct ifnet
*ifp
, struct mbuf
*m
)
4139 struct bridge_softc
*sc
= ifnet_softc(ifp
);
4140 struct ether_header
*eh
;
4141 struct ifnet
*dst_if
;
4144 eh
= mtod(m
, struct ether_header
*);
4148 ASSERT(bridge_in_bsd_mode(sc
));
4150 if (!(m
->m_flags
& (M_BCAST
| M_MCAST
))) {
4151 dst_if
= bridge_rtlookup(sc
, eh
->ether_dhost
, 0);
4154 (void) ifnet_stat_increment_out(ifp
, 1, m
->m_pkthdr
.len
, 0);
4157 if (sc
->sc_bpf_output
) {
4158 bridge_bpf_output(ifp
, m
);
4162 if (dst_if
== NULL
) {
4163 /* callee will unlock */
4164 bridge_broadcast(sc
, ifp
, m
, 0);
4167 error
= bridge_enqueue(sc
, dst_if
, m
);
4174 bridge_finalize_cksum(struct ifnet
*ifp
, struct mbuf
*m
)
4176 struct ether_header
*eh
= mtod(m
, struct ether_header
*);
4177 uint32_t sw_csum
, hwcap
;
4180 hwcap
= (ifp
->if_hwassist
| CSUM_DATA_VALID
);
4185 /* do in software what the hardware cannot */
4186 sw_csum
= m
->m_pkthdr
.csum_flags
& ~IF_HWASSIST_CSUM_FLAGS(hwcap
);
4187 sw_csum
&= IF_HWASSIST_CSUM_MASK
;
4189 switch (ntohs(eh
->ether_type
)) {
4191 if ((hwcap
& CSUM_PARTIAL
) && !(sw_csum
& CSUM_DELAY_DATA
) &&
4192 (m
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
)) {
4193 if (m
->m_pkthdr
.csum_flags
& CSUM_TCP
) {
4195 sizeof(*eh
) + sizeof(struct ip
);
4197 m
->m_pkthdr
.csum_data
& 0xffff;
4198 m
->m_pkthdr
.csum_flags
|=
4199 (CSUM_DATA_VALID
| CSUM_PARTIAL
);
4200 m
->m_pkthdr
.csum_tx_stuff
= (ulpoff
+ start
);
4201 m
->m_pkthdr
.csum_tx_start
= start
;
4203 sw_csum
|= (CSUM_DELAY_DATA
&
4204 m
->m_pkthdr
.csum_flags
);
4207 (void) in_finalize_cksum(m
, sizeof(*eh
), sw_csum
);
4211 case ETHERTYPE_IPV6
:
4212 if ((hwcap
& CSUM_PARTIAL
) &&
4213 !(sw_csum
& CSUM_DELAY_IPV6_DATA
) &&
4214 (m
->m_pkthdr
.csum_flags
& CSUM_DELAY_IPV6_DATA
)) {
4215 if (m
->m_pkthdr
.csum_flags
& CSUM_TCPIPV6
) {
4217 sizeof(*eh
) + sizeof(struct ip6_hdr
);
4219 m
->m_pkthdr
.csum_data
& 0xffff;
4220 m
->m_pkthdr
.csum_flags
|=
4221 (CSUM_DATA_VALID
| CSUM_PARTIAL
);
4222 m
->m_pkthdr
.csum_tx_stuff
= (ulpoff
+ start
);
4223 m
->m_pkthdr
.csum_tx_start
= start
;
4225 sw_csum
|= (CSUM_DELAY_IPV6_DATA
&
4226 m
->m_pkthdr
.csum_flags
);
4229 (void) in6_finalize_cksum(m
, sizeof(*eh
), -1, -1, sw_csum
);
4238 * Start output on a bridge.
4240 * This routine is invoked by the start worker thread; because we never call
4241 * it directly, there is no need do deploy any serialization mechanism other
4242 * than what's already used by the worker thread, i.e. this is already single
4245 * This routine is called only when if_bridge_txstart is enabled.
4248 bridge_start(struct ifnet
*ifp
)
4253 if (ifnet_dequeue(ifp
, &m
) != 0) {
4257 (void) bridge_output(ifp
, m
);
4264 * The forwarding function of the bridge.
4266 * NOTE: Releases the lock on return.
4269 bridge_forward(struct bridge_softc
*sc
, struct bridge_iflist
*sbif
,
4272 struct bridge_iflist
*dbif
;
4273 struct ifnet
*src_if
, *dst_if
, *ifp
;
4274 struct ether_header
*eh
;
4279 BRIDGE_LOCK_ASSERT_HELD(sc
);
4280 ASSERT(bridge_in_bsd_mode(sc
));
4283 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
4284 printf("%s: %s m 0x%llx\n", __func__
, sc
->sc_ifp
->if_xname
,
4285 (uint64_t)VM_KERNEL_ADDRPERM(m
));
4287 #endif /* BRIDGE_DEBUG */
4289 src_if
= m
->m_pkthdr
.rcvif
;
4292 (void) ifnet_stat_increment_in(ifp
, 1, m
->m_pkthdr
.len
, 0);
4293 vlan
= VLANTAGOF(m
);
4296 if ((sbif
->bif_ifflags
& IFBIF_STP
) &&
4297 sbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4301 eh
= mtod(m
, struct ether_header
*);
4302 dst
= eh
->ether_dhost
;
4304 /* If the interface is learning, record the address. */
4305 if (sbif
->bif_ifflags
& IFBIF_LEARNING
) {
4306 error
= bridge_rtupdate(sc
, eh
->ether_shost
, vlan
,
4307 sbif
, 0, IFBAF_DYNAMIC
);
4309 * If the interface has addresses limits then deny any source
4310 * that is not in the cache.
4312 if (error
&& sbif
->bif_addrmax
) {
4317 if ((sbif
->bif_ifflags
& IFBIF_STP
) != 0 &&
4318 sbif
->bif_stp
.bp_state
== BSTP_IFSTATE_LEARNING
) {
4323 * At this point, the port either doesn't participate
4324 * in spanning tree or it is in the forwarding state.
4328 * If the packet is unicast, destined for someone on
4329 * "this" side of the bridge, drop it.
4331 if ((m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4332 dst_if
= bridge_rtlookup(sc
, dst
, vlan
);
4333 if (src_if
== dst_if
) {
4338 * Check if its a reserved multicast address, any address
4339 * listed in 802.1D section 7.12.6 may not be forwarded by the
4341 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
4343 if (dst
[0] == 0x01 && dst
[1] == 0x80 &&
4344 dst
[2] == 0xc2 && dst
[3] == 0x00 &&
4345 dst
[4] == 0x00 && dst
[5] <= 0x0f) {
4350 /* ...forward it to all interfaces. */
4351 atomic_add_64(&ifp
->if_imcasts
, 1);
4356 * If we have a destination interface which is a member of our bridge,
4357 * OR this is a unicast packet, push it through the bpf(4) machinery.
4358 * For broadcast or multicast packets, don't bother because it will
4359 * be reinjected into ether_input. We do this before we pass the packets
4360 * through the pfil(9) framework, as it is possible that pfil(9) will
4361 * drop the packet, or possibly modify it, making it difficult to debug
4362 * firewall issues on the bridge.
4365 if (eh
->ether_type
== htons(ETHERTYPE_RSN_PREAUTH
) ||
4366 dst_if
!= NULL
|| (m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4367 m
->m_pkthdr
.rcvif
= ifp
;
4368 if (sc
->sc_bpf_input
) {
4369 bridge_bpf_input(ifp
, m
);
4372 #endif /* NBPFILTER */
4374 #if defined(PFIL_HOOKS)
4375 /* run the packet filter */
4376 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
4378 if (bridge_pfil(&m
, ifp
, src_if
, PFIL_IN
) != 0) {
4386 #endif /* PFIL_HOOKS */
4388 if (dst_if
== NULL
) {
4389 bridge_broadcast(sc
, src_if
, m
, 1);
4394 * At this point, we're dealing with a unicast frame
4395 * going to a different interface.
4397 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4401 dbif
= bridge_lookup_member_if(sc
, dst_if
);
4403 /* Not a member of the bridge (anymore?) */
4407 /* Private segments can not talk to each other */
4408 if (sbif
->bif_ifflags
& dbif
->bif_ifflags
& IFBIF_PRIVATE
) {
4412 if ((dbif
->bif_ifflags
& IFBIF_STP
) &&
4413 dbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4418 /* APPLE MODIFICATION <rdar:6985737> */
4419 if ((dst_if
->if_extflags
& IFEXTF_DHCPRA_MASK
) != 0) {
4420 m
= ip_xdhcpra_output(dst_if
, m
);
4422 ++sc
->sc_sc
.sc_ifp
.if_xdhcpra
;
4426 #endif /* HAS_DHCPRA_MASK */
4430 #if defined(PFIL_HOOKS)
4431 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
4432 if (bridge_pfil(&m
, ifp
, dst_if
, PFIL_OUT
) != 0) {
4439 #endif /* PFIL_HOOKS */
4441 (void) bridge_enqueue(sc
, dst_if
, m
);
4451 char *ether_ntop(char *, size_t, const u_char
*);
4453 __private_extern__
char *
4454 ether_ntop(char *buf
, size_t len
, const u_char
*ap
)
4456 snprintf(buf
, len
, "%02x:%02x:%02x:%02x:%02x:%02x",
4457 ap
[0], ap
[1], ap
[2], ap
[3], ap
[4], ap
[5]);
4462 #endif /* BRIDGE_DEBUG */
4467 * Filter input from a member interface. Queue the packet for
4468 * bridging if it is not for us.
4470 __private_extern__ errno_t
4471 bridge_input(struct ifnet
*ifp
, struct mbuf
*m
, void *frame_header
)
4473 struct bridge_softc
*sc
= ifp
->if_bridge
;
4474 struct bridge_iflist
*bif
, *bif2
;
4476 struct ether_header
*eh
;
4477 struct mbuf
*mc
, *mc2
;
4481 ASSERT(bridge_in_bsd_mode(sc
));
4483 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4484 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__
,
4485 sc
->sc_ifp
->if_xname
, ifp
->if_xname
,
4486 (uint64_t)VM_KERNEL_ADDRPERM(m
),
4487 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)));
4489 #endif /* BRIDGE_DEBUG */
4491 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) == 0) {
4493 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4494 printf("%s: %s not running passing along\n",
4495 __func__
, sc
->sc_ifp
->if_xname
);
4497 #endif /* BRIDGE_DEBUG */
4502 vlan
= VLANTAGOF(m
);
4506 * Implement support for bridge monitoring. If this flag has been
4507 * set on this interface, discard the packet once we push it through
4508 * the bpf(4) machinery, but before we do, increment the byte and
4509 * packet counters associated with this interface.
4511 if ((bifp
->if_flags
& IFF_MONITOR
) != 0) {
4512 m
->m_pkthdr
.rcvif
= bifp
;
4513 BRIDGE_BPF_MTAP_INPUT(sc
, m
);
4514 (void) ifnet_stat_increment_in(bifp
, 1, m
->m_pkthdr
.len
, 0);
4518 #endif /* IFF_MONITOR */
4521 * Need to clear the promiscous flags otherwise it will be
4522 * dropped by DLIL after processing filters
4524 if ((mbuf_flags(m
) & MBUF_PROMISC
)) {
4525 mbuf_setflags_mask(m
, 0, MBUF_PROMISC
);
4529 bif
= bridge_lookup_member_if(sc
, ifp
);
4533 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4534 printf("%s: %s bridge_lookup_member_if failed\n",
4535 __func__
, sc
->sc_ifp
->if_xname
);
4537 #endif /* BRIDGE_DEBUG */
4541 if (bif
->bif_flags
& BIFF_HOST_FILTER
) {
4542 error
= bridge_host_filter(bif
, m
);
4544 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4545 printf("%s: %s bridge_host_filter failed\n",
4546 __func__
, bif
->bif_ifp
->if_xname
);
4553 eh
= mtod(m
, struct ether_header
*);
4557 if (m
->m_flags
& (M_BCAST
| M_MCAST
)) {
4559 if (if_bridge_debug
& BR_DBGF_MCAST
) {
4560 if ((m
->m_flags
& M_MCAST
)) {
4561 printf("%s: multicast: "
4562 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4564 eh
->ether_dhost
[0], eh
->ether_dhost
[1],
4565 eh
->ether_dhost
[2], eh
->ether_dhost
[3],
4566 eh
->ether_dhost
[4], eh
->ether_dhost
[5]);
4569 #endif /* BRIDGE_DEBUG */
4571 /* Tap off 802.1D packets; they do not get forwarded. */
4572 if (memcmp(eh
->ether_dhost
, bstp_etheraddr
,
4573 ETHER_ADDR_LEN
) == 0) {
4575 m
= bstp_input(&bif
->bif_stp
, ifp
, m
);
4576 #else /* !BRIDGESTP */
4579 #endif /* !BRIDGESTP */
4586 if ((bif
->bif_ifflags
& IFBIF_STP
) &&
4587 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4593 * Make a deep copy of the packet and enqueue the copy
4594 * for bridge processing; return the original packet for
4597 mc
= m_dup(m
, M_DONTWAIT
);
4604 * Perform the bridge forwarding function with the copy.
4606 * Note that bridge_forward calls BRIDGE_UNLOCK
4608 bridge_forward(sc
, bif
, mc
);
4611 * Reinject the mbuf as arriving on the bridge so we have a
4612 * chance at claiming multicast packets. We can not loop back
4613 * here from ether_input as a bridge is never a member of a
4616 VERIFY(bifp
->if_bridge
== NULL
);
4617 mc2
= m_dup(m
, M_DONTWAIT
);
4619 /* Keep the layer3 header aligned */
4620 int i
= min(mc2
->m_pkthdr
.len
, max_protohdr
);
4621 mc2
= m_copyup(mc2
, i
, ETHER_ALIGN
);
4624 /* mark packet as arriving on the bridge */
4625 mc2
->m_pkthdr
.rcvif
= bifp
;
4626 mc2
->m_pkthdr
.pkt_hdr
= mbuf_data(mc2
);
4629 if (sc
->sc_bpf_input
) {
4630 bridge_bpf_input(bifp
, mc2
);
4632 #endif /* NBPFILTER */
4633 (void) mbuf_setdata(mc2
,
4634 (char *)mbuf_data(mc2
) + ETHER_HDR_LEN
,
4635 mbuf_len(mc2
) - ETHER_HDR_LEN
);
4636 (void) mbuf_pkthdr_adjustlen(mc2
, -ETHER_HDR_LEN
);
4638 (void) ifnet_stat_increment_in(bifp
, 1,
4639 mbuf_pkthdr_len(mc2
), 0);
4642 if (if_bridge_debug
& BR_DBGF_MCAST
) {
4643 printf("%s: %s mcast for us\n", __func__
,
4644 sc
->sc_ifp
->if_xname
);
4646 #endif /* BRIDGE_DEBUG */
4648 dlil_input_packet_list(bifp
, mc2
);
4651 /* Return the original packet for local processing. */
4655 if ((bif
->bif_ifflags
& IFBIF_STP
) &&
4656 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4662 #define CARP_CHECK_WE_ARE_DST(iface) \
4663 ((iface)->if_carp &&\
4664 carp_forus((iface)->if_carp, eh->ether_dhost))
4665 #define CARP_CHECK_WE_ARE_SRC(iface) \
4666 ((iface)->if_carp &&\
4667 carp_forus((iface)->if_carp, eh->ether_shost))
4669 #define CARP_CHECK_WE_ARE_DST(iface) 0
4670 #define CARP_CHECK_WE_ARE_SRC(iface) 0
4674 #define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
4676 #define PFIL_HOOKED_INET6 0
4679 #if defined(PFIL_HOOKS)
4680 #define PFIL_PHYS(sc, ifp, m) do { \
4681 if (pfil_local_phys && \
4682 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \
4683 if (bridge_pfil(&m, NULL, ifp, \
4684 PFIL_IN) != 0 || m == NULL) { \
4685 BRIDGE_UNLOCK(sc); \
4690 #else /* PFIL_HOOKS */
4691 #define PFIL_PHYS(sc, ifp, m)
4692 #endif /* PFIL_HOOKS */
4694 #define GRAB_OUR_PACKETS(iface) \
4695 if ((iface)->if_type == IFT_GIF) \
4697 /* It is destined for us. */ \
4698 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
4699 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
4700 if ((iface)->if_type == IFT_BRIDGE) { \
4701 BRIDGE_BPF_MTAP_INPUT(sc, m); \
4702 /* Filter on the physical interface. */ \
4703 PFIL_PHYS(sc, iface, m); \
4705 if (bif->bif_ifflags & IFBIF_LEARNING) { \
4706 error = bridge_rtupdate(sc, eh->ether_shost, \
4707 vlan, bif, 0, IFBAF_DYNAMIC); \
4708 if (error && bif->bif_addrmax) { \
4709 BRIDGE_UNLOCK(sc); \
4710 return (EJUSTRETURN); \
4713 m->m_pkthdr.rcvif = iface; \
4714 BRIDGE_UNLOCK(sc); \
4718 /* We just received a packet that we sent out. */ \
4719 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
4720 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
4721 BRIDGE_UNLOCK(sc); \
4722 return (EJUSTRETURN); \
4729 * If the packet is for us, set the packets source as the
4730 * bridge, and return the packet back to ether_input for
4733 if (memcmp(eh
->ether_dhost
, IF_LLADDR(bifp
),
4734 ETHER_ADDR_LEN
) == 0 || CARP_CHECK_WE_ARE_DST(bifp
)) {
4735 /* Mark the packet as arriving on the bridge interface */
4736 (void) mbuf_pkthdr_setrcvif(m
, bifp
);
4737 mbuf_pkthdr_setheader(m
, frame_header
);
4740 * If the interface is learning, and the source
4741 * address is valid and not multicast, record
4744 if (bif
->bif_ifflags
& IFBIF_LEARNING
) {
4745 (void) bridge_rtupdate(sc
, eh
->ether_shost
,
4746 vlan
, bif
, 0, IFBAF_DYNAMIC
);
4749 BRIDGE_BPF_MTAP_INPUT(sc
, m
);
4751 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) + ETHER_HDR_LEN
,
4752 mbuf_len(m
) - ETHER_HDR_LEN
);
4753 (void) mbuf_pkthdr_adjustlen(m
, -ETHER_HDR_LEN
);
4755 (void) ifnet_stat_increment_in(bifp
, 1, mbuf_pkthdr_len(m
), 0);
4760 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4761 printf("%s: %s packet for bridge\n", __func__
,
4762 sc
->sc_ifp
->if_xname
);
4764 #endif /* BRIDGE_DEBUG */
4766 dlil_input_packet_list(bifp
, m
);
4772 * if the destination of the packet is for the MAC address of
4773 * the member interface itself, then we don't need to forward
4774 * it -- just pass it back. Note that it'll likely just be
4775 * dropped by the stack, but if something else is bound to
4776 * the interface directly (for example, the wireless stats
4777 * protocol -- although that actually uses BPF right now),
4778 * then it will consume the packet
4780 * ALSO, note that we do this check AFTER checking for the
4781 * bridge's own MAC address, because the bridge may be
4782 * using the SAME MAC address as one of its interfaces
4784 if (memcmp(eh
->ether_dhost
, IF_LLADDR(ifp
), ETHER_ADDR_LEN
) == 0) {
4786 #ifdef VERY_VERY_VERY_DIAGNOSTIC
4787 printf("%s: not forwarding packet bound for member "
4788 "interface\n", __func__
);
4794 /* Now check the all bridge members. */
4795 TAILQ_FOREACH(bif2
, &sc
->sc_iflist
, bif_next
) {
4796 GRAB_OUR_PACKETS(bif2
->bif_ifp
)
4799 #undef CARP_CHECK_WE_ARE_DST
4800 #undef CARP_CHECK_WE_ARE_SRC
4801 #undef GRAB_OUR_PACKETS
4804 * Perform the bridge forwarding function.
4806 * Note that bridge_forward calls BRIDGE_UNLOCK
4808 bridge_forward(sc
, bif
, m
);
4816 * Send a frame to all interfaces that are members of
4817 * the bridge, except for the one on which the packet
4820 * NOTE: Releases the lock on return.
4823 bridge_broadcast(struct bridge_softc
*sc
, struct ifnet
*src_if
,
4824 struct mbuf
*m
, int runfilt
)
4827 #pragma unused(runfilt)
4829 struct bridge_iflist
*dbif
, *sbif
;
4831 struct ifnet
*dst_if
;
4832 int error
= 0, used
= 0;
4834 sbif
= bridge_lookup_member_if(sc
, src_if
);
4836 BRIDGE_LOCK2REF(sc
, error
);
4843 /* Filter on the bridge interface before broadcasting */
4844 if (runfilt
&& (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
)) {
4845 if (bridge_pfil(&m
, sc
->sc_ifp
, NULL
, PFIL_OUT
) != 0) {
4852 #endif /* PFIL_HOOKS */
4854 TAILQ_FOREACH(dbif
, &sc
->sc_iflist
, bif_next
) {
4855 dst_if
= dbif
->bif_ifp
;
4856 if (dst_if
== src_if
) {
4860 /* Private segments can not talk to each other */
4862 (sbif
->bif_ifflags
& dbif
->bif_ifflags
& IFBIF_PRIVATE
)) {
4866 if ((dbif
->bif_ifflags
& IFBIF_STP
) &&
4867 dbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4871 if ((dbif
->bif_ifflags
& IFBIF_DISCOVER
) == 0 &&
4872 (m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4876 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4880 if (!(dbif
->bif_flags
& BIFF_MEDIA_ACTIVE
)) {
4884 if (TAILQ_NEXT(dbif
, bif_next
) == NULL
) {
4888 mc
= m_dup(m
, M_DONTWAIT
);
4890 (void) ifnet_stat_increment_out(sc
->sc_ifp
,
4898 * Filter on the output interface. Pass a NULL bridge interface
4899 * pointer so we do not redundantly filter on the bridge for
4900 * each interface we broadcast on.
4903 (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
)) {
4905 /* Keep the layer3 header aligned */
4906 int i
= min(mc
->m_pkthdr
.len
, max_protohdr
);
4907 mc
= m_copyup(mc
, i
, ETHER_ALIGN
);
4909 (void) ifnet_stat_increment_out(
4910 sc
->sc_ifp
, 0, 0, 1);
4914 if (bridge_pfil(&mc
, NULL
, dst_if
, PFIL_OUT
) != 0) {
4921 #endif /* PFIL_HOOKS */
4923 (void) bridge_enqueue(sc
, dst_if
, mc
);
4931 #endif /* PFIL_HOOKS */
4939 * Duplicate a packet out one or more interfaces that are in span mode,
4940 * the original mbuf is unmodified.
4943 bridge_span(struct bridge_softc
*sc
, struct mbuf
*m
)
4945 struct bridge_iflist
*bif
;
4946 struct ifnet
*dst_if
;
4949 if (TAILQ_EMPTY(&sc
->sc_spanlist
)) {
4953 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
) {
4954 dst_if
= bif
->bif_ifp
;
4956 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4960 mc
= m_copypacket(m
, M_DONTWAIT
);
4962 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 0, 0, 1);
4966 (void) bridge_enqueue(sc
, dst_if
, mc
);
4974 * Add a bridge routing entry.
4977 bridge_rtupdate(struct bridge_softc
*sc
, const uint8_t *dst
, uint16_t vlan
,
4978 struct bridge_iflist
*bif
, int setflags
, uint8_t flags
)
4980 struct bridge_rtnode
*brt
;
4983 BRIDGE_LOCK_ASSERT_HELD(sc
);
4984 ASSERT(bridge_in_bsd_mode(sc
));
4986 /* Check the source address is valid and not multicast. */
4987 if (ETHER_IS_MULTICAST(dst
) ||
4988 (dst
[0] == 0 && dst
[1] == 0 && dst
[2] == 0 &&
4989 dst
[3] == 0 && dst
[4] == 0 && dst
[5] == 0) != 0) {
4994 /* 802.1p frames map to vlan 1 */
5000 * A route for this destination might already exist. If so,
5001 * update it, otherwise create a new one.
5003 if ((brt
= bridge_rtnode_lookup(sc
, dst
, vlan
)) == NULL
) {
5004 if (sc
->sc_brtcnt
>= sc
->sc_brtmax
) {
5005 sc
->sc_brtexceeded
++;
5008 /* Check per interface address limits (if enabled) */
5009 if (bif
->bif_addrmax
&& bif
->bif_addrcnt
>= bif
->bif_addrmax
) {
5010 bif
->bif_addrexceeded
++;
5015 * Allocate a new bridge forwarding node, and
5016 * initialize the expiration time and Ethernet
5019 brt
= zalloc_noblock(bridge_rtnode_pool
);
5023 bzero(brt
, sizeof(struct bridge_rtnode
));
5025 if (bif
->bif_ifflags
& IFBIF_STICKY
) {
5026 brt
->brt_flags
= IFBAF_STICKY
;
5028 brt
->brt_flags
= IFBAF_DYNAMIC
;
5031 memcpy(brt
->brt_addr
, dst
, ETHER_ADDR_LEN
);
5032 brt
->brt_vlan
= vlan
;
5035 if ((error
= bridge_rtnode_insert(sc
, brt
)) != 0) {
5036 zfree(bridge_rtnode_pool
, brt
);
5042 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5043 printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
5044 "on %s count %u hashsize %u\n", __func__
,
5045 dst
[0], dst
[1], dst
[2], dst
[3], dst
[4], dst
[5],
5046 sc
->sc_ifp
->if_xname
, sc
->sc_brtcnt
,
5047 sc
->sc_rthash_size
);
5052 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
&&
5053 brt
->brt_dst
!= bif
) {
5054 brt
->brt_dst
->bif_addrcnt
--;
5056 brt
->brt_dst
->bif_addrcnt
++;
5059 if ((flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5062 now
= (unsigned long) net_uptime();
5063 brt
->brt_expire
= now
+ sc
->sc_brttimeout
;
5066 brt
->brt_flags
= flags
;
5076 * Lookup the destination interface for an address.
5078 static struct ifnet
*
5079 bridge_rtlookup(struct bridge_softc
*sc
, const uint8_t *addr
, uint16_t vlan
)
5081 struct bridge_rtnode
*brt
;
5083 BRIDGE_LOCK_ASSERT_HELD(sc
);
5085 if ((brt
= bridge_rtnode_lookup(sc
, addr
, vlan
)) == NULL
) {
5089 return brt
->brt_ifp
;
5095 * Trim the routine table so that we have a number
5096 * of routing entries less than or equal to the
5100 bridge_rttrim(struct bridge_softc
*sc
)
5102 struct bridge_rtnode
*brt
, *nbrt
;
5104 BRIDGE_LOCK_ASSERT_HELD(sc
);
5106 /* Make sure we actually need to do this. */
5107 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
) {
5111 /* Force an aging cycle; this might trim enough addresses. */
5113 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
) {
5117 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5118 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5119 bridge_rtnode_destroy(sc
, brt
);
5120 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
) {
5128 * bridge_aging_timer:
5130 * Aging periodic timer for the bridge routing table.
5133 bridge_aging_timer(struct bridge_softc
*sc
)
5135 BRIDGE_LOCK_ASSERT_HELD(sc
);
5139 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) &&
5140 (sc
->sc_flags
& SCF_DETACHING
) == 0) {
5141 sc
->sc_aging_timer
.bdc_sc
= sc
;
5142 sc
->sc_aging_timer
.bdc_func
= bridge_aging_timer
;
5143 sc
->sc_aging_timer
.bdc_ts
.tv_sec
= bridge_rtable_prune_period
;
5144 bridge_schedule_delayed_call(&sc
->sc_aging_timer
);
5151 * Perform an aging cycle.
5154 bridge_rtage(struct bridge_softc
*sc
)
5156 struct bridge_rtnode
*brt
, *nbrt
;
5159 BRIDGE_LOCK_ASSERT_HELD(sc
);
5161 now
= (unsigned long) net_uptime();
5163 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5164 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5165 if (now
>= brt
->brt_expire
) {
5166 bridge_rtnode_destroy(sc
, brt
);
5175 * Remove all dynamic addresses from the bridge.
5178 bridge_rtflush(struct bridge_softc
*sc
, int full
)
5180 struct bridge_rtnode
*brt
, *nbrt
;
5182 BRIDGE_LOCK_ASSERT_HELD(sc
);
5184 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5185 if (full
|| (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5186 bridge_rtnode_destroy(sc
, brt
);
5194 * Remove an address from the table.
5197 bridge_rtdaddr(struct bridge_softc
*sc
, const uint8_t *addr
, uint16_t vlan
)
5199 struct bridge_rtnode
*brt
;
5202 BRIDGE_LOCK_ASSERT_HELD(sc
);
5205 * If vlan is zero then we want to delete for all vlans so the lookup
5206 * may return more than one.
5208 while ((brt
= bridge_rtnode_lookup(sc
, addr
, vlan
)) != NULL
) {
5209 bridge_rtnode_destroy(sc
, brt
);
5213 return found
? 0 : ENOENT
;
5219 * Delete routes to a speicifc member interface.
5222 bridge_rtdelete(struct bridge_softc
*sc
, struct ifnet
*ifp
, int full
)
5224 struct bridge_rtnode
*brt
, *nbrt
;
5226 BRIDGE_LOCK_ASSERT_HELD(sc
);
5228 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5229 if (brt
->brt_ifp
== ifp
&& (full
||
5230 (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
)) {
5231 bridge_rtnode_destroy(sc
, brt
);
5237 * bridge_rtable_init:
5239 * Initialize the route table for this bridge.
5242 bridge_rtable_init(struct bridge_softc
*sc
)
5246 ASSERT(bridge_in_bsd_mode(sc
));
5248 sc
->sc_rthash
= _MALLOC(sizeof(*sc
->sc_rthash
) * BRIDGE_RTHASH_SIZE
,
5249 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5250 if (sc
->sc_rthash
== NULL
) {
5251 printf("%s: no memory\n", __func__
);
5254 sc
->sc_rthash_size
= BRIDGE_RTHASH_SIZE
;
5256 for (i
= 0; i
< sc
->sc_rthash_size
; i
++) {
5257 LIST_INIT(&sc
->sc_rthash
[i
]);
5260 sc
->sc_rthash_key
= RandomULong();
5262 LIST_INIT(&sc
->sc_rtlist
);
5268 * bridge_rthash_delayed_resize:
5270 * Resize the routing table hash on a delayed thread call.
5273 bridge_rthash_delayed_resize(struct bridge_softc
*sc
)
5275 u_int32_t new_rthash_size
;
5276 struct _bridge_rtnode_list
*new_rthash
= NULL
;
5277 struct _bridge_rtnode_list
*old_rthash
= NULL
;
5279 struct bridge_rtnode
*brt
;
5282 BRIDGE_LOCK_ASSERT_HELD(sc
);
5285 * Four entries per hash bucket is our ideal load factor
5287 if (sc
->sc_brtcnt
< sc
->sc_rthash_size
* 4) {
5292 * Doubling the number of hash buckets may be too simplistic
5293 * especially when facing a spike of new entries
5295 new_rthash_size
= sc
->sc_rthash_size
* 2;
5297 sc
->sc_flags
|= SCF_RESIZING
;
5300 new_rthash
= _MALLOC(sizeof(*sc
->sc_rthash
) * new_rthash_size
,
5301 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5304 sc
->sc_flags
&= ~SCF_RESIZING
;
5306 if (new_rthash
== NULL
) {
5310 if ((sc
->sc_flags
& SCF_DETACHING
)) {
5315 * Fail safe from here on
5317 old_rthash
= sc
->sc_rthash
;
5318 sc
->sc_rthash
= new_rthash
;
5319 sc
->sc_rthash_size
= new_rthash_size
;
5322 * Get a new key to force entries to be shuffled around to reduce
5323 * the likelihood they will land in the same buckets
5325 sc
->sc_rthash_key
= RandomULong();
5327 for (i
= 0; i
< sc
->sc_rthash_size
; i
++) {
5328 LIST_INIT(&sc
->sc_rthash
[i
]);
5331 LIST_FOREACH(brt
, &sc
->sc_rtlist
, brt_list
) {
5332 LIST_REMOVE(brt
, brt_hash
);
5333 (void) bridge_rtnode_hash(sc
, brt
);
5338 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5339 printf("%s: %s new size %u\n", __func__
,
5340 sc
->sc_ifp
->if_xname
, sc
->sc_rthash_size
);
5342 #endif /* BRIDGE_DEBUG */
5344 _FREE(old_rthash
, M_DEVBUF
);
5348 printf("%s: %s failed %d\n", __func__
,
5349 sc
->sc_ifp
->if_xname
, error
);
5350 #endif /* BRIDGE_DEBUG */
5351 if (new_rthash
!= NULL
) {
5352 _FREE(new_rthash
, M_DEVBUF
);
5358 * Resize the number of hash buckets based on the load factor
5359 * Currently only grow
5360 * Failing to resize the hash table is not fatal
5363 bridge_rthash_resize(struct bridge_softc
*sc
)
5365 BRIDGE_LOCK_ASSERT_HELD(sc
);
5367 if ((sc
->sc_flags
& SCF_DETACHING
) || (sc
->sc_flags
& SCF_RESIZING
)) {
5372 * Four entries per hash bucket is our ideal load factor
5374 if (sc
->sc_brtcnt
< sc
->sc_rthash_size
* 4) {
5378 * Hard limit on the size of the routing hash table
5380 if (sc
->sc_rthash_size
>= bridge_rtable_hash_size_max
) {
5384 sc
->sc_resize_call
.bdc_sc
= sc
;
5385 sc
->sc_resize_call
.bdc_func
= bridge_rthash_delayed_resize
;
5386 bridge_schedule_delayed_call(&sc
->sc_resize_call
);
5390 * bridge_rtable_fini:
5392 * Deconstruct the route table for this bridge.
5395 bridge_rtable_fini(struct bridge_softc
*sc
)
5397 KASSERT(sc
->sc_brtcnt
== 0,
5398 ("%s: %d bridge routes referenced", __func__
, sc
->sc_brtcnt
));
5399 if (sc
->sc_rthash
) {
5400 _FREE(sc
->sc_rthash
, M_DEVBUF
);
5401 sc
->sc_rthash
= NULL
;
5406 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
5407 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
5409 #define mix(a, b, c) \
5411 a -= b; a -= c; a ^= (c >> 13); \
5412 b -= c; b -= a; b ^= (a << 8); \
5413 c -= a; c -= b; c ^= (b >> 13); \
5414 a -= b; a -= c; a ^= (c >> 12); \
5415 b -= c; b -= a; b ^= (a << 16); \
5416 c -= a; c -= b; c ^= (b >> 5); \
5417 a -= b; a -= c; a ^= (c >> 3); \
5418 b -= c; b -= a; b ^= (a << 10); \
5419 c -= a; c -= b; c ^= (b >> 15); \
5420 } while ( /*CONSTCOND*/ 0)
5422 static __inline
uint32_t
5423 bridge_rthash(struct bridge_softc
*sc
, const uint8_t *addr
)
5425 uint32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= sc
->sc_rthash_key
;
5436 return c
& BRIDGE_RTHASH_MASK(sc
);
5442 bridge_rtnode_addr_cmp(const uint8_t *a
, const uint8_t *b
)
5446 for (i
= 0, d
= 0; i
< ETHER_ADDR_LEN
&& d
== 0; i
++) {
5447 d
= ((int)a
[i
]) - ((int)b
[i
]);
5454 * bridge_rtnode_lookup:
5456 * Look up a bridge route node for the specified destination. Compare the
5457 * vlan id or if zero then just return the first match.
5459 static struct bridge_rtnode
*
5460 bridge_rtnode_lookup(struct bridge_softc
*sc
, const uint8_t *addr
,
5463 struct bridge_rtnode
*brt
;
5467 BRIDGE_LOCK_ASSERT_HELD(sc
);
5468 ASSERT(bridge_in_bsd_mode(sc
));
5470 hash
= bridge_rthash(sc
, addr
);
5471 LIST_FOREACH(brt
, &sc
->sc_rthash
[hash
], brt_hash
) {
5472 dir
= bridge_rtnode_addr_cmp(addr
, brt
->brt_addr
);
5473 if (dir
== 0 && (brt
->brt_vlan
== vlan
|| vlan
== 0)) {
5485 * bridge_rtnode_hash:
5487 * Insert the specified bridge node into the route hash table.
5488 * This is used when adding a new node or to rehash when resizing
5492 bridge_rtnode_hash(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5494 struct bridge_rtnode
*lbrt
;
5498 BRIDGE_LOCK_ASSERT_HELD(sc
);
5500 hash
= bridge_rthash(sc
, brt
->brt_addr
);
5502 lbrt
= LIST_FIRST(&sc
->sc_rthash
[hash
]);
5504 LIST_INSERT_HEAD(&sc
->sc_rthash
[hash
], brt
, brt_hash
);
5509 dir
= bridge_rtnode_addr_cmp(brt
->brt_addr
, lbrt
->brt_addr
);
5510 if (dir
== 0 && brt
->brt_vlan
== lbrt
->brt_vlan
) {
5512 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5513 printf("%s: %s EEXIST "
5514 "%02x:%02x:%02x:%02x:%02x:%02x\n",
5515 __func__
, sc
->sc_ifp
->if_xname
,
5516 brt
->brt_addr
[0], brt
->brt_addr
[1],
5517 brt
->brt_addr
[2], brt
->brt_addr
[3],
5518 brt
->brt_addr
[4], brt
->brt_addr
[5]);
5524 LIST_INSERT_BEFORE(lbrt
, brt
, brt_hash
);
5527 if (LIST_NEXT(lbrt
, brt_hash
) == NULL
) {
5528 LIST_INSERT_AFTER(lbrt
, brt
, brt_hash
);
5531 lbrt
= LIST_NEXT(lbrt
, brt_hash
);
5532 } while (lbrt
!= NULL
);
5535 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5536 printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
5537 __func__
, sc
->sc_ifp
->if_xname
,
5538 brt
->brt_addr
[0], brt
->brt_addr
[1], brt
->brt_addr
[2],
5539 brt
->brt_addr
[3], brt
->brt_addr
[4], brt
->brt_addr
[5]);
5548 * bridge_rtnode_insert:
5550 * Insert the specified bridge node into the route table. We
5551 * assume the entry is not already in the table.
5554 bridge_rtnode_insert(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5558 error
= bridge_rtnode_hash(sc
, brt
);
5563 LIST_INSERT_HEAD(&sc
->sc_rtlist
, brt
, brt_list
);
5566 bridge_rthash_resize(sc
);
5572 * bridge_rtnode_destroy:
5574 * Destroy a bridge rtnode.
5577 bridge_rtnode_destroy(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5579 BRIDGE_LOCK_ASSERT_HELD(sc
);
5581 LIST_REMOVE(brt
, brt_hash
);
5583 LIST_REMOVE(brt
, brt_list
);
5585 brt
->brt_dst
->bif_addrcnt
--;
5586 zfree(bridge_rtnode_pool
, brt
);
5591 * bridge_rtable_expire:
5593 * Set the expiry time for all routes on an interface.
5596 bridge_rtable_expire(struct ifnet
*ifp
, int age
)
5598 struct bridge_softc
*sc
= ifp
->if_bridge
;
5599 struct bridge_rtnode
*brt
;
5604 * If the age is zero then flush, otherwise set all the expiry times to
5605 * age for the interface
5608 bridge_rtdelete(sc
, ifp
, IFBF_FLUSHDYN
);
5612 now
= (unsigned long) net_uptime();
5614 LIST_FOREACH(brt
, &sc
->sc_rtlist
, brt_list
) {
5615 /* Cap the expiry time to 'age' */
5616 if (brt
->brt_ifp
== ifp
&&
5617 brt
->brt_expire
> now
+ age
&&
5618 (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5619 brt
->brt_expire
= now
+ age
;
5627 * bridge_state_change:
5629 * Callback from the bridgestp code when a port changes states.
5632 bridge_state_change(struct ifnet
*ifp
, int state
)
5634 struct bridge_softc
*sc
= ifp
->if_bridge
;
5635 static const char *stpstates
[] = {
5645 log(LOG_NOTICE
, "%s: state changed to %s on %s\n",
5646 sc
->sc_ifp
->if_xname
,
5647 stpstates
[state
], ifp
->if_xname
);
5650 #endif /* BRIDGESTP */
5654 * Send bridge packets through pfil if they are one of the types pfil can deal
5655 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
5656 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
5660 bridge_pfil(struct mbuf
**mp
, struct ifnet
*bifp
, struct ifnet
*ifp
, int dir
)
5662 int snap
, error
, i
, hlen
;
5663 struct ether_header
*eh1
, eh2
;
5664 struct ip_fw_args args
;
5667 u_int16_t ether_type
;
5670 error
= -1; /* Default error if not error == 0 */
5673 /* we may return with the IP fields swapped, ensure its not shared */
5674 KASSERT(M_WRITABLE(*mp
), ("%s: modifying a shared mbuf", __func__
));
5677 if (pfil_bridge
== 0 && pfil_member
== 0 && pfil_ipfw
== 0) {
5678 return 0; /* filtering is disabled */
5680 i
= min((*mp
)->m_pkthdr
.len
, max_protohdr
);
5681 if ((*mp
)->m_len
< i
) {
5682 *mp
= m_pullup(*mp
, i
);
5684 printf("%s: m_pullup failed\n", __func__
);
5689 eh1
= mtod(*mp
, struct ether_header
*);
5690 ether_type
= ntohs(eh1
->ether_type
);
5693 * Check for SNAP/LLC.
5695 if (ether_type
< ETHERMTU
) {
5696 struct llc
*llc2
= (struct llc
*)(eh1
+ 1);
5698 if ((*mp
)->m_len
>= ETHER_HDR_LEN
+ 8 &&
5699 llc2
->llc_dsap
== LLC_SNAP_LSAP
&&
5700 llc2
->llc_ssap
== LLC_SNAP_LSAP
&&
5701 llc2
->llc_control
== LLC_UI
) {
5702 ether_type
= htons(llc2
->llc_un
.type_snap
.ether_type
);
5708 * If we're trying to filter bridge traffic, don't look at anything
5709 * other than IP and ARP traffic. If the filter doesn't understand
5710 * IPv6, don't allow IPv6 through the bridge either. This is lame
5711 * since if we really wanted, say, an AppleTalk filter, we are hosed,
5712 * but of course we don't have an AppleTalk filter to begin with.
5713 * (Note that since pfil doesn't understand ARP it will pass *ALL*
5716 switch (ether_type
) {
5718 case ETHERTYPE_REVARP
:
5719 if (pfil_ipfw_arp
== 0) {
5720 return 0; /* Automatically pass */
5726 case ETHERTYPE_IPV6
:
5731 * Check to see if the user wants to pass non-ip
5732 * packets, these will not be checked by pfil(9) and
5733 * passed unconditionally so the default is to drop.
5740 /* Strip off the Ethernet header and keep a copy. */
5741 m_copydata(*mp
, 0, ETHER_HDR_LEN
, (caddr_t
)&eh2
);
5742 m_adj(*mp
, ETHER_HDR_LEN
);
5744 /* Strip off snap header, if present */
5746 m_copydata(*mp
, 0, sizeof(struct llc
), (caddr_t
)&llc1
);
5747 m_adj(*mp
, sizeof(struct llc
));
5751 * Check the IP header for alignment and errors
5753 if (dir
== PFIL_IN
) {
5754 switch (ether_type
) {
5756 error
= bridge_ip_checkbasic(mp
);
5759 case ETHERTYPE_IPV6
:
5760 error
= bridge_ip6_checkbasic(mp
);
5771 if (IPFW_LOADED
&& pfil_ipfw
!= 0 && dir
== PFIL_OUT
&& ifp
!= NULL
) {
5773 args
.rule
= ip_dn_claim_rule(*mp
);
5774 if (args
.rule
!= NULL
&& fw_one_pass
) {
5775 goto ipfwpass
; /* packet already partially processed */
5779 args
.next_hop
= NULL
;
5781 args
.inp
= NULL
; /* used by ipfw uid/gid/jail rules */
5782 i
= ip_fw_chk_ptr(&args
);
5789 if (DUMMYNET_LOADED
&& (i
== IP_FW_DUMMYNET
)) {
5790 /* put the Ethernet header back on */
5791 M_PREPEND(*mp
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5795 bcopy(&eh2
, mtod(*mp
, caddr_t
), ETHER_HDR_LEN
);
5798 * Pass the pkt to dummynet, which consumes it. The
5799 * packet will return to us via bridge_dummynet().
5802 ip_dn_io_ptr(mp
, DN_TO_IFB_FWD
, &args
, DN_CLIENT_IPFW
);
5806 if (i
!= IP_FW_PASS
) { /* drop */
5815 * Run the packet through pfil
5817 switch (ether_type
) {
5820 * before calling the firewall, swap fields the same as
5821 * IP does. here we assume the header is contiguous
5823 ip
= mtod(*mp
, struct ip
*);
5825 ip
->ip_len
= ntohs(ip
->ip_len
);
5826 ip
->ip_off
= ntohs(ip
->ip_off
);
5829 * Run pfil on the member interface and the bridge, both can
5830 * be skipped by clearing pfil_member or pfil_bridge.
5833 * in_if -> bridge_if -> out_if
5835 if (pfil_bridge
&& dir
== PFIL_OUT
&& bifp
!= NULL
) {
5836 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, bifp
,
5840 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5844 if (pfil_member
&& ifp
!= NULL
) {
5845 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, ifp
,
5849 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5853 if (pfil_bridge
&& dir
== PFIL_IN
&& bifp
!= NULL
) {
5854 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, bifp
,
5858 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5862 /* check if we need to fragment the packet */
5863 if (pfil_member
&& ifp
!= NULL
&& dir
== PFIL_OUT
) {
5864 i
= (*mp
)->m_pkthdr
.len
;
5865 if (i
> ifp
->if_mtu
) {
5866 error
= bridge_fragment(ifp
, *mp
, &eh2
, snap
,
5872 /* Recalculate the ip checksum and restore byte ordering */
5873 ip
= mtod(*mp
, struct ip
*);
5874 hlen
= ip
->ip_hl
<< 2;
5875 if (hlen
< sizeof(struct ip
)) {
5878 if (hlen
> (*mp
)->m_len
) {
5879 if ((*mp
= m_pullup(*mp
, hlen
)) == 0) {
5882 ip
= mtod(*mp
, struct ip
*);
5887 ip
->ip_len
= htons(ip
->ip_len
);
5888 ip
->ip_off
= htons(ip
->ip_off
);
5890 if (hlen
== sizeof(struct ip
)) {
5891 ip
->ip_sum
= in_cksum_hdr(ip
);
5893 ip
->ip_sum
= in_cksum(*mp
, hlen
);
5898 case ETHERTYPE_IPV6
:
5899 if (pfil_bridge
&& dir
== PFIL_OUT
&& bifp
!= NULL
) {
5900 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, bifp
,
5904 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5908 if (pfil_member
&& ifp
!= NULL
) {
5909 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, ifp
,
5913 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5917 if (pfil_bridge
&& dir
== PFIL_IN
&& bifp
!= NULL
) {
5918 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, bifp
,
5938 * Finally, put everything back the way it was and return
5941 M_PREPEND(*mp
, sizeof(struct llc
), M_DONTWAIT
, 0);
5945 bcopy(&llc1
, mtod(*mp
, caddr_t
), sizeof(struct llc
));
5948 M_PREPEND(*mp
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5952 bcopy(&eh2
, mtod(*mp
, caddr_t
), ETHER_HDR_LEN
);
5963 * Perform basic checks on header size since
5964 * pfil assumes ip_input has already processed
5965 * it for it. Cut-and-pasted from ip_input.c.
5966 * Given how simple the IPv6 version is,
5967 * does the IPv4 version really need to be
5970 * XXX Should we update ipstat here, or not?
5971 * XXX Right now we update ipstat but not
5975 bridge_ip_checkbasic(struct mbuf
**mp
)
5977 struct mbuf
*m
= *mp
;
5986 if (IP_HDR_ALIGNED_P(mtod(m
, caddr_t
)) == 0) {
5987 /* max_linkhdr is already rounded up to nearest 4-byte */
5988 if ((m
= m_copyup(m
, sizeof(struct ip
),
5989 max_linkhdr
)) == NULL
) {
5990 /* XXXJRT new stat, please */
5991 ipstat
.ips_toosmall
++;
5994 } else if (__predict_false(m
->m_len
< sizeof(struct ip
))) {
5995 if ((m
= m_pullup(m
, sizeof(struct ip
))) == NULL
) {
5996 ipstat
.ips_toosmall
++;
6000 ip
= mtod(m
, struct ip
*);
6005 if (ip
->ip_v
!= IPVERSION
) {
6006 ipstat
.ips_badvers
++;
6009 hlen
= ip
->ip_hl
<< 2;
6010 if (hlen
< sizeof(struct ip
)) { /* minimum header length */
6011 ipstat
.ips_badhlen
++;
6014 if (hlen
> m
->m_len
) {
6015 if ((m
= m_pullup(m
, hlen
)) == 0) {
6016 ipstat
.ips_badhlen
++;
6019 ip
= mtod(m
, struct ip
*);
6025 if (m
->m_pkthdr
.csum_flags
& CSUM_IP_CHECKED
) {
6026 sum
= !(m
->m_pkthdr
.csum_flags
& CSUM_IP_VALID
);
6028 if (hlen
== sizeof(struct ip
)) {
6029 sum
= in_cksum_hdr(ip
);
6031 sum
= in_cksum(m
, hlen
);
6035 ipstat
.ips_badsum
++;
6039 /* Retrieve the packet length. */
6040 len
= ntohs(ip
->ip_len
);
6043 * Check for additional length bogosity
6046 ipstat
.ips_badlen
++;
6051 * Check that the amount of data in the buffers
6052 * is as at least much as the IP header would have us expect.
6053 * Drop packet if shorter than we expect.
6055 if (m
->m_pkthdr
.len
< len
) {
6056 ipstat
.ips_tooshort
++;
6060 /* Checks out, proceed */
6071 * Same as above, but for IPv6.
6072 * Cut-and-pasted from ip6_input.c.
6073 * XXX Should we update ip6stat, or not?
6076 bridge_ip6_checkbasic(struct mbuf
**mp
)
6078 struct mbuf
*m
= *mp
;
6079 struct ip6_hdr
*ip6
;
6082 * If the IPv6 header is not aligned, slurp it up into a new
6083 * mbuf with space for link headers, in the event we forward
6084 * it. Otherwise, if it is aligned, make sure the entire base
6085 * IPv6 header is in the first mbuf of the chain.
6087 if (IP6_HDR_ALIGNED_P(mtod(m
, caddr_t
)) == 0) {
6088 struct ifnet
*inifp
= m
->m_pkthdr
.rcvif
;
6089 /* max_linkhdr is already rounded up to nearest 4-byte */
6090 if ((m
= m_copyup(m
, sizeof(struct ip6_hdr
),
6091 max_linkhdr
)) == NULL
) {
6092 /* XXXJRT new stat, please */
6093 ip6stat
.ip6s_toosmall
++;
6094 in6_ifstat_inc(inifp
, ifs6_in_hdrerr
);
6097 } else if (__predict_false(m
->m_len
< sizeof(struct ip6_hdr
))) {
6098 struct ifnet
*inifp
= m
->m_pkthdr
.rcvif
;
6099 if ((m
= m_pullup(m
, sizeof(struct ip6_hdr
))) == NULL
) {
6100 ip6stat
.ip6s_toosmall
++;
6101 in6_ifstat_inc(inifp
, ifs6_in_hdrerr
);
6106 ip6
= mtod(m
, struct ip6_hdr
*);
6108 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) != IPV6_VERSION
) {
6109 ip6stat
.ip6s_badvers
++;
6110 in6_ifstat_inc(m
->m_pkthdr
.rcvif
, ifs6_in_hdrerr
);
6114 /* Checks out, proceed */
6127 * Return a fragmented mbuf chain.
6130 bridge_fragment(struct ifnet
*ifp
, struct mbuf
*m
, struct ether_header
*eh
,
6131 int snap
, struct llc
*llc
)
6137 if (m
->m_len
< sizeof(struct ip
) &&
6138 (m
= m_pullup(m
, sizeof(struct ip
))) == NULL
) {
6141 ip
= mtod(m
, struct ip
*);
6143 error
= ip_fragment(ip
, &m
, ifp
->if_mtu
, ifp
->if_hwassist
,
6149 /* walk the chain and re-add the Ethernet header */
6150 for (m0
= m
; m0
; m0
= m0
->m_nextpkt
) {
6153 M_PREPEND(m0
, sizeof(struct llc
), M_DONTWAIT
, 0);
6158 bcopy(llc
, mtod(m0
, caddr_t
),
6159 sizeof(struct llc
));
6161 M_PREPEND(m0
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
6166 bcopy(eh
, mtod(m0
, caddr_t
), ETHER_HDR_LEN
);
6173 ipstat
.ips_fragmented
++;
6184 #endif /* PFIL_HOOKS */
6187 * bridge_set_bpf_tap:
6189 * Sets ups the BPF callbacks.
6192 bridge_set_bpf_tap(ifnet_t ifp
, bpf_tap_mode mode
, bpf_packet_func bpf_callback
)
6194 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6197 if (sc
== NULL
|| (sc
->sc_flags
& SCF_DETACHING
)) {
6200 ASSERT(bridge_in_bsd_mode(sc
));
6202 case BPF_TAP_DISABLE
:
6203 sc
->sc_bpf_input
= sc
->sc_bpf_output
= NULL
;
6207 sc
->sc_bpf_input
= bpf_callback
;
6210 case BPF_TAP_OUTPUT
:
6211 sc
->sc_bpf_output
= bpf_callback
;
6214 case BPF_TAP_INPUT_OUTPUT
:
6215 sc
->sc_bpf_input
= sc
->sc_bpf_output
= bpf_callback
;
6228 * Callback when interface has been detached.
6231 bridge_detach(ifnet_t ifp
)
6233 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6236 bstp_detach(&sc
->sc_stp
);
6237 #endif /* BRIDGESTP */
6239 if (bridge_in_bsd_mode(sc
)) {
6240 /* Tear down the routing table. */
6241 bridge_rtable_fini(sc
);
6244 lck_mtx_lock(&bridge_list_mtx
);
6245 LIST_REMOVE(sc
, sc_list
);
6246 lck_mtx_unlock(&bridge_list_mtx
);
6250 lck_mtx_destroy(&sc
->sc_mtx
, bridge_lock_grp
);
6251 if_clone_softc_deallocate(&bridge_cloner
, sc
);
6257 * Invoke the input BPF callback if enabled
6259 __private_extern__ errno_t
6260 bridge_bpf_input(ifnet_t ifp
, struct mbuf
*m
)
6262 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6264 ASSERT(bridge_in_bsd_mode(sc
));
6265 if (sc
->sc_bpf_input
) {
6266 if (mbuf_pkthdr_rcvif(m
) != ifp
) {
6267 printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__
,
6268 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m
)),
6269 (uint64_t)VM_KERNEL_ADDRPERM(ifp
));
6271 (*sc
->sc_bpf_input
)(ifp
, m
);
6277 * bridge_bpf_output:
6279 * Invoke the output BPF callback if enabled
6281 __private_extern__ errno_t
6282 bridge_bpf_output(ifnet_t ifp
, struct mbuf
*m
)
6284 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6286 ASSERT(bridge_in_bsd_mode(sc
));
6287 if (sc
->sc_bpf_output
) {
6288 (*sc
->sc_bpf_output
)(ifp
, m
);
6294 * bridge_link_event:
6296 * Report a data link event on an interface
6299 bridge_link_event(struct ifnet
*ifp
, u_int32_t event_code
)
6302 struct kern_event_msg header
;
6304 char if_name
[IFNAMSIZ
];
6308 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
6309 printf("%s: %s event_code %u - %s\n", __func__
, ifp
->if_xname
,
6310 event_code
, dlil_kev_dl_code_str(event_code
));
6312 #endif /* BRIDGE_DEBUG */
6314 bzero(&event
, sizeof(event
));
6315 event
.header
.total_size
= sizeof(event
);
6316 event
.header
.vendor_code
= KEV_VENDOR_APPLE
;
6317 event
.header
.kev_class
= KEV_NETWORK_CLASS
;
6318 event
.header
.kev_subclass
= KEV_DL_SUBCLASS
;
6319 event
.header
.event_code
= event_code
;
6320 event
.header
.event_data
[0] = ifnet_family(ifp
);
6321 event
.unit
= (u_int32_t
)ifnet_unit(ifp
);
6322 strlcpy(event
.if_name
, ifnet_name(ifp
), IFNAMSIZ
);
6323 ifnet_event(ifp
, &event
.header
);
6326 #define BRIDGE_HF_DROP(reason, func, line) { \
6327 bridge_hostfilter_stats.reason++; \
6328 if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
6329 printf("%s.%d" #reason, func, line); \
6334 * Make sure this is a DHCP or Bootp request that match the host filter
6337 bridge_dhcp_filter(struct bridge_iflist
*bif
, struct mbuf
*m
, size_t offset
)
6343 * Note: We use the dhcp structure because bootp structure definition
6344 * is larger and some vendors do not pad the request
6346 error
= mbuf_copydata(m
, offset
, sizeof(struct dhcp
), &dhcp
);
6348 BRIDGE_HF_DROP(brhf_dhcp_too_small
, __func__
, __LINE__
);
6351 if (dhcp
.dp_op
!= BOOTREQUEST
) {
6352 BRIDGE_HF_DROP(brhf_dhcp_bad_op
, __func__
, __LINE__
);
6356 * The hardware address must be an exact match
6358 if (dhcp
.dp_htype
!= ARPHRD_ETHER
) {
6359 BRIDGE_HF_DROP(brhf_dhcp_bad_htype
, __func__
, __LINE__
);
6362 if (dhcp
.dp_hlen
!= ETHER_ADDR_LEN
) {
6363 BRIDGE_HF_DROP(brhf_dhcp_bad_hlen
, __func__
, __LINE__
);
6366 if (bcmp(dhcp
.dp_chaddr
, bif
->bif_hf_hwsrc
,
6367 ETHER_ADDR_LEN
) != 0) {
6368 BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr
, __func__
, __LINE__
);
6372 * Client address must match the host address or be not specified
6374 if (dhcp
.dp_ciaddr
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
&&
6375 dhcp
.dp_ciaddr
.s_addr
!= INADDR_ANY
) {
6376 BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr
, __func__
, __LINE__
);
6385 bridge_host_filter(struct bridge_iflist
*bif
, struct mbuf
*m
)
6388 struct ether_header
*eh
;
6389 static struct in_addr inaddr_any
= { .s_addr
= INADDR_ANY
};
6392 * Check the Ethernet header is large enough
6394 if (mbuf_pkthdr_len(m
) < sizeof(struct ether_header
)) {
6395 BRIDGE_HF_DROP(brhf_ether_too_small
, __func__
, __LINE__
);
6398 if (mbuf_len(m
) < sizeof(struct ether_header
) &&
6399 mbuf_pullup(&m
, sizeof(struct ether_header
)) != 0) {
6400 BRIDGE_HF_DROP(brhf_ether_pullup_failed
, __func__
, __LINE__
);
6403 eh
= mtod(m
, struct ether_header
*);
6406 * Restrict the source hardware address
6408 if ((bif
->bif_flags
& BIFF_HF_HWSRC
) == 0 ||
6409 bcmp(eh
->ether_shost
, bif
->bif_hf_hwsrc
,
6410 ETHER_ADDR_LEN
) != 0) {
6411 BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr
, __func__
, __LINE__
);
6416 * Restrict Ethernet protocols to ARP and IP
6418 if (eh
->ether_type
== htons(ETHERTYPE_ARP
)) {
6419 struct ether_arp
*ea
;
6420 size_t minlen
= sizeof(struct ether_header
) +
6421 sizeof(struct ether_arp
);
6424 * Make the Ethernet and ARP headers contiguous
6426 if (mbuf_pkthdr_len(m
) < minlen
) {
6427 BRIDGE_HF_DROP(brhf_arp_too_small
, __func__
, __LINE__
);
6430 if (mbuf_len(m
) < minlen
&& mbuf_pullup(&m
, minlen
) != 0) {
6431 BRIDGE_HF_DROP(brhf_arp_pullup_failed
,
6432 __func__
, __LINE__
);
6436 * Verify this is an ethernet/ip arp
6438 eh
= mtod(m
, struct ether_header
*);
6439 ea
= (struct ether_arp
*)(eh
+ 1);
6440 if (ea
->arp_hrd
!= htons(ARPHRD_ETHER
)) {
6441 BRIDGE_HF_DROP(brhf_arp_bad_hw_type
,
6442 __func__
, __LINE__
);
6445 if (ea
->arp_pro
!= htons(ETHERTYPE_IP
)) {
6446 BRIDGE_HF_DROP(brhf_arp_bad_pro_type
,
6447 __func__
, __LINE__
);
6451 * Verify the address lengths are correct
6453 if (ea
->arp_hln
!= ETHER_ADDR_LEN
) {
6454 BRIDGE_HF_DROP(brhf_arp_bad_hw_len
, __func__
, __LINE__
);
6457 if (ea
->arp_pln
!= sizeof(struct in_addr
)) {
6458 BRIDGE_HF_DROP(brhf_arp_bad_pro_len
,
6459 __func__
, __LINE__
);
6464 * Allow only ARP request or ARP reply
6466 if (ea
->arp_op
!= htons(ARPOP_REQUEST
) &&
6467 ea
->arp_op
!= htons(ARPOP_REPLY
)) {
6468 BRIDGE_HF_DROP(brhf_arp_bad_op
, __func__
, __LINE__
);
6472 * Verify source hardware address matches
6474 if (bcmp(ea
->arp_sha
, bif
->bif_hf_hwsrc
,
6475 ETHER_ADDR_LEN
) != 0) {
6476 BRIDGE_HF_DROP(brhf_arp_bad_sha
, __func__
, __LINE__
);
6480 * Verify source protocol address:
6481 * May be null for an ARP probe
6483 if (bcmp(ea
->arp_spa
, &bif
->bif_hf_ipsrc
.s_addr
,
6484 sizeof(struct in_addr
)) != 0 &&
6485 bcmp(ea
->arp_spa
, &inaddr_any
,
6486 sizeof(struct in_addr
)) != 0) {
6487 BRIDGE_HF_DROP(brhf_arp_bad_spa
, __func__
, __LINE__
);
6493 bridge_hostfilter_stats
.brhf_arp_ok
+= 1;
6495 } else if (eh
->ether_type
== htons(ETHERTYPE_IP
)) {
6496 size_t minlen
= sizeof(struct ether_header
) + sizeof(struct ip
);
6501 * Make the Ethernet and IP headers contiguous
6503 if (mbuf_pkthdr_len(m
) < minlen
) {
6504 BRIDGE_HF_DROP(brhf_ip_too_small
, __func__
, __LINE__
);
6507 offset
= sizeof(struct ether_header
);
6508 error
= mbuf_copydata(m
, offset
, sizeof(struct ip
), &iphdr
);
6510 BRIDGE_HF_DROP(brhf_ip_too_small
, __func__
, __LINE__
);
6514 * Verify the source IP address
6516 if (iphdr
.ip_p
== IPPROTO_UDP
) {
6519 minlen
+= sizeof(struct udphdr
);
6520 if (mbuf_pkthdr_len(m
) < minlen
) {
6521 BRIDGE_HF_DROP(brhf_ip_too_small
,
6522 __func__
, __LINE__
);
6527 * Allow all zero addresses for DHCP requests
6529 if (iphdr
.ip_src
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
&&
6530 iphdr
.ip_src
.s_addr
!= INADDR_ANY
) {
6531 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
,
6532 __func__
, __LINE__
);
6535 offset
= sizeof(struct ether_header
) +
6536 (IP_VHL_HL(iphdr
.ip_vhl
) << 2);
6537 error
= mbuf_copydata(m
, offset
,
6538 sizeof(struct udphdr
), &udp
);
6540 BRIDGE_HF_DROP(brhf_ip_too_small
,
6541 __func__
, __LINE__
);
6545 * Either it's a Bootp/DHCP packet that we like or
6546 * it's a UDP packet from the host IP as source address
6548 if (udp
.uh_sport
== htons(IPPORT_BOOTPC
) &&
6549 udp
.uh_dport
== htons(IPPORT_BOOTPS
)) {
6550 minlen
+= sizeof(struct dhcp
);
6551 if (mbuf_pkthdr_len(m
) < minlen
) {
6552 BRIDGE_HF_DROP(brhf_ip_too_small
,
6553 __func__
, __LINE__
);
6556 offset
+= sizeof(struct udphdr
);
6557 error
= bridge_dhcp_filter(bif
, m
, offset
);
6561 } else if (iphdr
.ip_src
.s_addr
== INADDR_ANY
) {
6562 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
,
6563 __func__
, __LINE__
);
6566 } else if (iphdr
.ip_src
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
||
6567 bif
->bif_hf_ipsrc
.s_addr
== INADDR_ANY
) {
6568 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
, __func__
, __LINE__
);
6572 * Allow only boring IP protocols
6574 if (iphdr
.ip_p
!= IPPROTO_TCP
&&
6575 iphdr
.ip_p
!= IPPROTO_UDP
&&
6576 iphdr
.ip_p
!= IPPROTO_ICMP
&&
6577 iphdr
.ip_p
!= IPPROTO_ESP
&&
6578 iphdr
.ip_p
!= IPPROTO_AH
&&
6579 iphdr
.ip_p
!= IPPROTO_GRE
) {
6580 BRIDGE_HF_DROP(brhf_ip_bad_proto
, __func__
, __LINE__
);
6583 bridge_hostfilter_stats
.brhf_ip_ok
+= 1;
6586 BRIDGE_HF_DROP(brhf_bad_ether_type
, __func__
, __LINE__
);
6591 if (if_bridge_debug
& BR_DBGF_HOSTFILTER
) {
6593 printf_mbuf_data(m
, 0,
6594 sizeof(struct ether_header
) +