2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
72 * 1. Redistributions of source code must retain the above copyright
73 * notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 * notice, this list of conditions and the following disclaimer in the
76 * documentation and/or other materials provided with the distribution.
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
94 * Network interface bridge support.
98 * - Currently only supports Ethernet-like interfaces (Ethernet,
99 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
100 * to bridge other types of interfaces (FDDI-FDDI, and maybe
101 * consider heterogenous bridges).
103 * - GIF isn't handled due to the lack of IPPROTO_ETHERIP support.
106 #include <sys/cdefs.h>
108 #define BRIDGE_DEBUG 1
110 #include <sys/param.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/protosw.h>
114 #include <sys/systm.h>
115 #include <sys/time.h>
116 #include <sys/socket.h> /* for net/if.h */
117 #include <sys/sockio.h>
118 #include <sys/kernel.h>
119 #include <sys/random.h>
120 #include <sys/syslog.h>
121 #include <sys/sysctl.h>
122 #include <sys/proc.h>
123 #include <sys/lock.h>
124 #include <sys/mcache.h>
126 #include <sys/kauth.h>
128 #include <kern/thread_call.h>
130 #include <libkern/libkern.h>
132 #include <kern/zalloc.h>
138 #include <net/if_dl.h>
139 #include <net/if_types.h>
140 #include <net/if_var.h>
141 #include <net/if_media.h>
142 #include <net/net_api_stats.h>
144 #include <netinet/in.h> /* for struct arpcom */
145 #include <netinet/in_systm.h>
146 #include <netinet/in_var.h>
148 #include <netinet/ip.h>
149 #include <netinet/ip_var.h>
151 #include <netinet/ip6.h>
152 #include <netinet6/ip6_var.h>
155 #include <netinet/ip_carp.h>
157 #include <netinet/if_ether.h> /* for struct arpcom */
158 #include <net/bridgestp.h>
159 #include <net/if_bridgevar.h>
160 #include <net/if_llc.h>
162 #include <net/if_vlan_var.h>
163 #endif /* NVLAN > 0 */
165 #include <net/if_ether.h>
166 #include <net/dlil.h>
167 #include <net/kpi_interfacefilter.h>
169 #include <net/route.h>
171 #include <netinet/ip_fw2.h>
172 #include <netinet/ip_dummynet.h>
173 #endif /* PFIL_HOOKS */
174 #include <dev/random/randomdev.h>
176 #include <netinet/bootp.h>
177 #include <netinet/dhcp.h>
181 #define BR_DBGF_LIFECYCLE 0x0001
182 #define BR_DBGF_INPUT 0x0002
183 #define BR_DBGF_OUTPUT 0x0004
184 #define BR_DBGF_RT_TABLE 0x0008
185 #define BR_DBGF_DELAYED_CALL 0x0010
186 #define BR_DBGF_IOCTL 0x0020
187 #define BR_DBGF_MBUF 0x0040
188 #define BR_DBGF_MCAST 0x0080
189 #define BR_DBGF_HOSTFILTER 0x0100
190 #define BR_DBGF_CHECKSUM 0x0200
191 #endif /* BRIDGE_DEBUG */
193 #define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
194 #define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
195 #define BRIDGE_LOCK_ASSERT_HELD(_sc) \
196 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
197 #define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
198 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
202 #define BR_LCKDBG_MAX 4
204 #define BRIDGE_LOCK(_sc) bridge_lock(_sc)
205 #define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
206 #define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
207 #define BRIDGE_UNREF(_sc) bridge_unref(_sc)
208 #define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
209 #define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
211 #else /* !BRIDGE_DEBUG */
213 #define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
214 #define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
215 #define BRIDGE_LOCK2REF(_sc, _err) do { \
216 BRIDGE_LOCK_ASSERT_HELD(_sc); \
217 if ((_sc)->sc_iflist_xcnt > 0) \
220 (_sc)->sc_iflist_ref++; \
221 _BRIDGE_UNLOCK(_sc); \
223 #define BRIDGE_UNREF(_sc) do { \
225 (_sc)->sc_iflist_ref--; \
226 if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
227 _BRIDGE_UNLOCK(_sc); \
228 wakeup(&(_sc)->sc_cv); \
230 _BRIDGE_UNLOCK(_sc); \
232 #define BRIDGE_XLOCK(_sc) do { \
233 BRIDGE_LOCK_ASSERT_HELD(_sc); \
234 (_sc)->sc_iflist_xcnt++; \
235 while ((_sc)->sc_iflist_ref > 0) \
236 msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
237 "BRIDGE_XLOCK", NULL); \
239 #define BRIDGE_XDROP(_sc) do { \
240 BRIDGE_LOCK_ASSERT_HELD(_sc); \
241 (_sc)->sc_iflist_xcnt--; \
244 #endif /* BRIDGE_DEBUG */
247 #define BRIDGE_BPF_MTAP_INPUT(sc, m) \
248 if (sc->sc_bpf_input) \
249 bridge_bpf_input(sc->sc_ifp, m)
250 #else /* NBPFILTER */
251 #define BRIDGE_BPF_MTAP_INPUT(ifp, m)
252 #endif /* NBPFILTER */
255 * Initial size of the route hash table. Must be a power of two.
257 #ifndef BRIDGE_RTHASH_SIZE
258 #define BRIDGE_RTHASH_SIZE 16
262 * Maximum size of the routing hash table
264 #define BRIDGE_RTHASH_SIZE_MAX 2048
266 #define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
269 * Maximum number of addresses to cache.
271 #ifndef BRIDGE_RTABLE_MAX
272 #define BRIDGE_RTABLE_MAX 100
277 * Timeout (in seconds) for entries learned dynamically.
279 #ifndef BRIDGE_RTABLE_TIMEOUT
280 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
284 * Number of seconds between walks of the route list.
286 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
287 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
291 * List of capabilities to possibly mask on the member interface.
293 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
295 * List of capabilities to disable on the member interface.
297 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
300 * Bridge interface list entry.
302 struct bridge_iflist
{
303 TAILQ_ENTRY(bridge_iflist
) bif_next
;
304 struct ifnet
*bif_ifp
; /* member if */
305 struct bstp_port bif_stp
; /* STP state */
306 uint32_t bif_ifflags
; /* member if flags */
307 int bif_savedcaps
; /* saved capabilities */
308 uint32_t bif_addrmax
; /* max # of addresses */
309 uint32_t bif_addrcnt
; /* cur. # of addresses */
310 uint32_t bif_addrexceeded
; /* # of address violations */
312 interface_filter_t bif_iff_ref
;
313 struct bridge_softc
*bif_sc
;
316 struct in_addr bif_hf_ipsrc
;
317 uint8_t bif_hf_hwsrc
[ETHER_ADDR_LEN
];
320 #define BIFF_PROMISC 0x01 /* promiscuous mode set */
321 #define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
322 #define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
323 #define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
324 #define BIFF_HOST_FILTER 0x10 /* host filter enabled */
325 #define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
326 #define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
327 #define BIFF_INPUT_BROADCAST 0x80 /* send broadcast packets in */
332 struct bridge_rtnode
{
333 LIST_ENTRY(bridge_rtnode
) brt_hash
; /* hash table linkage */
334 LIST_ENTRY(bridge_rtnode
) brt_list
; /* list linkage */
335 struct bridge_iflist
*brt_dst
; /* destination if */
336 unsigned long brt_expire
; /* expiration time */
337 uint8_t brt_flags
; /* address flags */
338 uint8_t brt_addr
[ETHER_ADDR_LEN
];
339 uint16_t brt_vlan
; /* vlan id */
342 #define brt_ifp brt_dst->bif_ifp
345 * Bridge delayed function call context
347 typedef void (*bridge_delayed_func_t
)(struct bridge_softc
*);
349 struct bridge_delayed_call
{
350 struct bridge_softc
*bdc_sc
;
351 bridge_delayed_func_t bdc_func
; /* Function to call */
352 struct timespec bdc_ts
; /* Time to call */
354 thread_call_t bdc_thread_call
;
357 #define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
358 #define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
361 * Software state for each bridge.
363 LIST_HEAD(_bridge_rtnode_list
, bridge_rtnode
);
365 struct bridge_softc
{
366 struct ifnet
*sc_ifp
; /* make this an interface */
368 LIST_ENTRY(bridge_softc
) sc_list
;
369 decl_lck_mtx_data(, sc_mtx
);
370 struct _bridge_rtnode_list
*sc_rthash
; /* our forwarding table */
371 struct _bridge_rtnode_list sc_rtlist
; /* list version of above */
372 uint32_t sc_rthash_key
; /* key for hash */
373 uint32_t sc_rthash_size
; /* size of the hash table */
374 struct bridge_delayed_call sc_aging_timer
;
375 struct bridge_delayed_call sc_resize_call
;
376 TAILQ_HEAD(, bridge_iflist
) sc_spanlist
; /* span ports list */
377 struct bstp_state sc_stp
; /* STP state */
378 bpf_packet_func sc_bpf_input
;
379 bpf_packet_func sc_bpf_output
;
381 uint32_t sc_brtmax
; /* max # of addresses */
382 uint32_t sc_brtcnt
; /* cur. # of addresses */
383 uint32_t sc_brttimeout
; /* rt timeout in seconds */
384 uint32_t sc_iflist_ref
; /* refcount for sc_iflist */
385 uint32_t sc_iflist_xcnt
; /* refcount for sc_iflist */
386 TAILQ_HEAD(, bridge_iflist
) sc_iflist
; /* member interface list */
387 uint32_t sc_brtexceeded
; /* # of cache drops */
388 uint32_t sc_filter_flags
; /* ipf and flags */
389 struct ifnet
*sc_ifaddr
; /* member mac copied from */
390 u_char sc_defaddr
[6]; /* Default MAC address */
391 char sc_if_xname
[IFNAMSIZ
];
395 * Locking and unlocking calling history
397 void *lock_lr
[BR_LCKDBG_MAX
];
399 void *unlock_lr
[BR_LCKDBG_MAX
];
401 #endif /* BRIDGE_DEBUG */
404 #define SCF_DETACHING 0x01
405 #define SCF_RESIZING 0x02
406 #define SCF_MEDIA_ACTIVE 0x04
409 kChecksumOperationNone
= 0,
410 kChecksumOperationClear
= 1,
411 kChecksumOperationFinalize
= 2,
412 kChecksumOperationCompute
= 3,
415 struct bridge_hostfilter_stats bridge_hostfilter_stats
;
417 decl_lck_mtx_data(static, bridge_list_mtx
);
419 static int bridge_rtable_prune_period
= BRIDGE_RTABLE_PRUNE_PERIOD
;
421 static zone_t bridge_rtnode_pool
= NULL
;
423 static int bridge_clone_create(struct if_clone
*, uint32_t, void *);
424 static int bridge_clone_destroy(struct ifnet
*);
426 static errno_t
bridge_ioctl(struct ifnet
*, u_long
, void *);
428 static void bridge_mutecaps(struct bridge_softc
*);
429 static void bridge_set_ifcap(struct bridge_softc
*, struct bridge_iflist
*,
432 static errno_t
bridge_set_tso(struct bridge_softc
*);
433 static void bridge_ifdetach(struct ifnet
*);
434 static void bridge_proto_attach_changed(struct ifnet
*);
435 static int bridge_init(struct ifnet
*);
436 #if HAS_BRIDGE_DUMMYNET
437 static void bridge_dummynet(struct mbuf
*, struct ifnet
*);
439 static void bridge_ifstop(struct ifnet
*, int);
440 static int bridge_output(struct ifnet
*, struct mbuf
*);
441 static void bridge_finalize_cksum(struct ifnet
*, struct mbuf
*);
442 static void bridge_start(struct ifnet
*);
443 __private_extern__ errno_t
bridge_input(struct ifnet
*, struct mbuf
*, void *);
444 static errno_t
bridge_iff_output(void *, ifnet_t
, protocol_family_t
,
446 static errno_t
bridge_member_output(struct bridge_softc
*sc
, ifnet_t ifp
,
449 static int bridge_enqueue(struct bridge_softc
*, struct ifnet
*,
450 struct ifnet
*, struct mbuf
*, ChecksumOperation
);
451 static void bridge_rtdelete(struct bridge_softc
*, struct ifnet
*ifp
, int);
453 static void bridge_forward(struct bridge_softc
*, struct bridge_iflist
*,
456 static void bridge_aging_timer(struct bridge_softc
*sc
);
458 static void bridge_broadcast(struct bridge_softc
*, struct ifnet
*,
460 static void bridge_span(struct bridge_softc
*, struct mbuf
*);
462 static int bridge_rtupdate(struct bridge_softc
*, const uint8_t *,
463 uint16_t, struct bridge_iflist
*, int, uint8_t);
464 static struct ifnet
*bridge_rtlookup(struct bridge_softc
*, const uint8_t *,
466 static void bridge_rttrim(struct bridge_softc
*);
467 static void bridge_rtage(struct bridge_softc
*);
468 static void bridge_rtflush(struct bridge_softc
*, int);
469 static int bridge_rtdaddr(struct bridge_softc
*, const uint8_t *,
472 static int bridge_rtable_init(struct bridge_softc
*);
473 static void bridge_rtable_fini(struct bridge_softc
*);
475 static void bridge_rthash_resize(struct bridge_softc
*);
477 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
478 static struct bridge_rtnode
*bridge_rtnode_lookup(struct bridge_softc
*,
479 const uint8_t *, uint16_t);
480 static int bridge_rtnode_hash(struct bridge_softc
*,
481 struct bridge_rtnode
*);
482 static int bridge_rtnode_insert(struct bridge_softc
*,
483 struct bridge_rtnode
*);
484 static void bridge_rtnode_destroy(struct bridge_softc
*,
485 struct bridge_rtnode
*);
487 static void bridge_rtable_expire(struct ifnet
*, int);
488 static void bridge_state_change(struct ifnet
*, int);
489 #endif /* BRIDGESTP */
491 static struct bridge_iflist
*bridge_lookup_member(struct bridge_softc
*,
493 static struct bridge_iflist
*bridge_lookup_member_if(struct bridge_softc
*,
495 static void bridge_delete_member(struct bridge_softc
*,
496 struct bridge_iflist
*, int);
497 static void bridge_delete_span(struct bridge_softc
*,
498 struct bridge_iflist
*);
500 static int bridge_ioctl_add(struct bridge_softc
*, void *);
501 static int bridge_ioctl_del(struct bridge_softc
*, void *);
502 static int bridge_ioctl_gifflags(struct bridge_softc
*, void *);
503 static int bridge_ioctl_sifflags(struct bridge_softc
*, void *);
504 static int bridge_ioctl_scache(struct bridge_softc
*, void *);
505 static int bridge_ioctl_gcache(struct bridge_softc
*, void *);
506 static int bridge_ioctl_gifs32(struct bridge_softc
*, void *);
507 static int bridge_ioctl_gifs64(struct bridge_softc
*, void *);
508 static int bridge_ioctl_rts32(struct bridge_softc
*, void *);
509 static int bridge_ioctl_rts64(struct bridge_softc
*, void *);
510 static int bridge_ioctl_saddr32(struct bridge_softc
*, void *);
511 static int bridge_ioctl_saddr64(struct bridge_softc
*, void *);
512 static int bridge_ioctl_sto(struct bridge_softc
*, void *);
513 static int bridge_ioctl_gto(struct bridge_softc
*, void *);
514 static int bridge_ioctl_daddr32(struct bridge_softc
*, void *);
515 static int bridge_ioctl_daddr64(struct bridge_softc
*, void *);
516 static int bridge_ioctl_flush(struct bridge_softc
*, void *);
517 static int bridge_ioctl_gpri(struct bridge_softc
*, void *);
518 static int bridge_ioctl_spri(struct bridge_softc
*, void *);
519 static int bridge_ioctl_ght(struct bridge_softc
*, void *);
520 static int bridge_ioctl_sht(struct bridge_softc
*, void *);
521 static int bridge_ioctl_gfd(struct bridge_softc
*, void *);
522 static int bridge_ioctl_sfd(struct bridge_softc
*, void *);
523 static int bridge_ioctl_gma(struct bridge_softc
*, void *);
524 static int bridge_ioctl_sma(struct bridge_softc
*, void *);
525 static int bridge_ioctl_sifprio(struct bridge_softc
*, void *);
526 static int bridge_ioctl_sifcost(struct bridge_softc
*, void *);
527 static int bridge_ioctl_sifmaxaddr(struct bridge_softc
*, void *);
528 static int bridge_ioctl_addspan(struct bridge_softc
*, void *);
529 static int bridge_ioctl_delspan(struct bridge_softc
*, void *);
530 static int bridge_ioctl_gbparam32(struct bridge_softc
*, void *);
531 static int bridge_ioctl_gbparam64(struct bridge_softc
*, void *);
532 static int bridge_ioctl_grte(struct bridge_softc
*, void *);
533 static int bridge_ioctl_gifsstp32(struct bridge_softc
*, void *);
534 static int bridge_ioctl_gifsstp64(struct bridge_softc
*, void *);
535 static int bridge_ioctl_sproto(struct bridge_softc
*, void *);
536 static int bridge_ioctl_stxhc(struct bridge_softc
*, void *);
537 static int bridge_ioctl_purge(struct bridge_softc
*sc
, void *);
538 static int bridge_ioctl_gfilt(struct bridge_softc
*, void *);
539 static int bridge_ioctl_sfilt(struct bridge_softc
*, void *);
540 static int bridge_ioctl_ghostfilter(struct bridge_softc
*, void *);
541 static int bridge_ioctl_shostfilter(struct bridge_softc
*, void *);
543 static int bridge_pfil(struct mbuf
**, struct ifnet
*, struct ifnet
*,
545 static int bridge_ip_checkbasic(struct mbuf
**);
547 static int bridge_ip6_checkbasic(struct mbuf
**);
549 static int bridge_fragment(struct ifnet
*, struct mbuf
*,
550 struct ether_header
*, int, struct llc
*);
551 #endif /* PFIL_HOOKS */
553 static errno_t
bridge_set_bpf_tap(ifnet_t
, bpf_tap_mode
, bpf_packet_func
);
554 __private_extern__ errno_t
bridge_bpf_input(ifnet_t
, struct mbuf
*);
555 __private_extern__ errno_t
bridge_bpf_output(ifnet_t
, struct mbuf
*);
557 static void bridge_detach(ifnet_t
);
558 static void bridge_link_event(struct ifnet
*, u_int32_t
);
559 static void bridge_iflinkevent(struct ifnet
*);
560 static u_int32_t
bridge_updatelinkstatus(struct bridge_softc
*);
561 static int interface_media_active(struct ifnet
*);
562 static void bridge_schedule_delayed_call(struct bridge_delayed_call
*);
563 static void bridge_cancel_delayed_call(struct bridge_delayed_call
*);
564 static void bridge_cleanup_delayed_call(struct bridge_delayed_call
*);
565 static int bridge_host_filter(struct bridge_iflist
*, struct mbuf
*);
567 #define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
569 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
570 #define VLANTAGOF(_m) 0
572 u_int8_t bstp_etheraddr
[ETHER_ADDR_LEN
] =
573 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
575 static u_int8_t ethernulladdr
[ETHER_ADDR_LEN
] =
576 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
579 static struct bstp_cb_ops bridge_ops
= {
580 .bcb_state
= bridge_state_change
,
581 .bcb_rtage
= bridge_rtable_expire
583 #endif /* BRIDGESTP */
585 SYSCTL_DECL(_net_link
);
586 SYSCTL_NODE(_net_link
, IFT_BRIDGE
, bridge
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
589 static int bridge_inherit_mac
= 0; /* share MAC with first bridge member */
590 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, inherit_mac
,
591 CTLFLAG_RW
| CTLFLAG_LOCKED
,
592 &bridge_inherit_mac
, 0,
593 "Inherit MAC address from the first bridge member");
595 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, rtable_prune_period
,
596 CTLFLAG_RW
| CTLFLAG_LOCKED
,
597 &bridge_rtable_prune_period
, 0,
598 "Interval between pruning of routing table");
600 static unsigned int bridge_rtable_hash_size_max
= BRIDGE_RTHASH_SIZE_MAX
;
601 SYSCTL_UINT(_net_link_bridge
, OID_AUTO
, rtable_hash_size_max
,
602 CTLFLAG_RW
| CTLFLAG_LOCKED
,
603 &bridge_rtable_hash_size_max
, 0,
604 "Maximum size of the routing hash table");
606 #if BRIDGE_DEBUG_DELAYED_CALLBACK
607 static int bridge_delayed_callback_delay
= 0;
608 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, delayed_callback_delay
,
609 CTLFLAG_RW
| CTLFLAG_LOCKED
,
610 &bridge_delayed_callback_delay
, 0,
611 "Delay before calling delayed function");
614 SYSCTL_STRUCT(_net_link_bridge
, OID_AUTO
,
615 hostfilterstats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
616 &bridge_hostfilter_stats
, bridge_hostfilter_stats
, "");
618 #if defined(PFIL_HOOKS)
619 static int pfil_onlyip
= 1; /* only pass IP[46] packets when pfil is enabled */
620 static int pfil_bridge
= 1; /* run pfil hooks on the bridge interface */
621 static int pfil_member
= 1; /* run pfil hooks on the member interface */
622 static int pfil_ipfw
= 0; /* layer2 filter with ipfw */
623 static int pfil_ipfw_arp
= 0; /* layer2 filter with ipfw */
624 static int pfil_local_phys
= 0; /* run pfil hooks on the physical interface */
625 /* for locally destined packets */
626 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_onlyip
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
627 &pfil_onlyip
, 0, "Only pass IP packets when pfil is enabled");
628 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, ipfw_arp
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
629 &pfil_ipfw_arp
, 0, "Filter ARP packets through IPFW layer2");
630 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_bridge
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
631 &pfil_bridge
, 0, "Packet filter on the bridge interface");
632 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_member
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
633 &pfil_member
, 0, "Packet filter on the member interface");
634 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, pfil_local_phys
,
635 CTLFLAG_RW
| CTLFLAG_LOCKED
, &pfil_local_phys
, 0,
636 "Packet filter on the physical interface for locally destined packets");
637 #endif /* PFIL_HOOKS */
640 static int log_stp
= 0; /* log STP state changes */
641 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, log_stp
, CTLFLAG_RW
,
642 &log_stp
, 0, "Log STP state changes");
643 #endif /* BRIDGESTP */
645 struct bridge_control
{
646 int (*bc_func
)(struct bridge_softc
*, void *);
647 unsigned int bc_argsize
;
648 unsigned int bc_flags
;
651 #define BC_F_COPYIN 0x01 /* copy arguments in */
652 #define BC_F_COPYOUT 0x02 /* copy arguments out */
653 #define BC_F_SUSER 0x04 /* do super-user check */
655 static const struct bridge_control bridge_control_table32
[] = {
656 { .bc_func
= bridge_ioctl_add
, .bc_argsize
= sizeof(struct ifbreq
), /* 0 */
657 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
658 { .bc_func
= bridge_ioctl_del
, .bc_argsize
= sizeof(struct ifbreq
),
659 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
661 { .bc_func
= bridge_ioctl_gifflags
, .bc_argsize
= sizeof(struct ifbreq
),
662 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
663 { .bc_func
= bridge_ioctl_sifflags
, .bc_argsize
= sizeof(struct ifbreq
),
664 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
666 { .bc_func
= bridge_ioctl_scache
, .bc_argsize
= sizeof(struct ifbrparam
),
667 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
668 { .bc_func
= bridge_ioctl_gcache
, .bc_argsize
= sizeof(struct ifbrparam
),
669 .bc_flags
= BC_F_COPYOUT
},
671 { .bc_func
= bridge_ioctl_gifs32
, .bc_argsize
= sizeof(struct ifbifconf32
),
672 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
673 { .bc_func
= bridge_ioctl_rts32
, .bc_argsize
= sizeof(struct ifbaconf32
),
674 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
676 { .bc_func
= bridge_ioctl_saddr32
, .bc_argsize
= sizeof(struct ifbareq32
),
677 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
679 { .bc_func
= bridge_ioctl_sto
, .bc_argsize
= sizeof(struct ifbrparam
),
680 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
681 { .bc_func
= bridge_ioctl_gto
, .bc_argsize
= sizeof(struct ifbrparam
), /* 10 */
682 .bc_flags
= BC_F_COPYOUT
},
684 { .bc_func
= bridge_ioctl_daddr32
, .bc_argsize
= sizeof(struct ifbareq32
),
685 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
687 { .bc_func
= bridge_ioctl_flush
, .bc_argsize
= sizeof(struct ifbreq
),
688 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
690 { .bc_func
= bridge_ioctl_gpri
, .bc_argsize
= sizeof(struct ifbrparam
),
691 .bc_flags
= BC_F_COPYOUT
},
692 { .bc_func
= bridge_ioctl_spri
, .bc_argsize
= sizeof(struct ifbrparam
),
693 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
695 { .bc_func
= bridge_ioctl_ght
, .bc_argsize
= sizeof(struct ifbrparam
),
696 .bc_flags
= BC_F_COPYOUT
},
697 { .bc_func
= bridge_ioctl_sht
, .bc_argsize
= sizeof(struct ifbrparam
),
698 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
700 { .bc_func
= bridge_ioctl_gfd
, .bc_argsize
= sizeof(struct ifbrparam
),
701 .bc_flags
= BC_F_COPYOUT
},
702 { .bc_func
= bridge_ioctl_sfd
, .bc_argsize
= sizeof(struct ifbrparam
),
703 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
705 { .bc_func
= bridge_ioctl_gma
, .bc_argsize
= sizeof(struct ifbrparam
),
706 .bc_flags
= BC_F_COPYOUT
},
707 { .bc_func
= bridge_ioctl_sma
, .bc_argsize
= sizeof(struct ifbrparam
), /* 20 */
708 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
710 { .bc_func
= bridge_ioctl_sifprio
, .bc_argsize
= sizeof(struct ifbreq
),
711 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
713 { .bc_func
= bridge_ioctl_sifcost
, .bc_argsize
= sizeof(struct ifbreq
),
714 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
716 { .bc_func
= bridge_ioctl_gfilt
, .bc_argsize
= sizeof(struct ifbrparam
),
717 .bc_flags
= BC_F_COPYOUT
},
718 { .bc_func
= bridge_ioctl_sfilt
, .bc_argsize
= sizeof(struct ifbrparam
),
719 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
721 { .bc_func
= bridge_ioctl_purge
, .bc_argsize
= sizeof(struct ifbreq
),
722 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
724 { .bc_func
= bridge_ioctl_addspan
, .bc_argsize
= sizeof(struct ifbreq
),
725 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
726 { .bc_func
= bridge_ioctl_delspan
, .bc_argsize
= sizeof(struct ifbreq
),
727 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
729 { .bc_func
= bridge_ioctl_gbparam32
, .bc_argsize
= sizeof(struct ifbropreq32
),
730 .bc_flags
= BC_F_COPYOUT
},
732 { .bc_func
= bridge_ioctl_grte
, .bc_argsize
= sizeof(struct ifbrparam
),
733 .bc_flags
= BC_F_COPYOUT
},
735 { .bc_func
= bridge_ioctl_gifsstp32
, .bc_argsize
= sizeof(struct ifbpstpconf32
), /* 30 */
736 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
738 { .bc_func
= bridge_ioctl_sproto
, .bc_argsize
= sizeof(struct ifbrparam
),
739 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
741 { .bc_func
= bridge_ioctl_stxhc
, .bc_argsize
= sizeof(struct ifbrparam
),
742 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
744 { .bc_func
= bridge_ioctl_sifmaxaddr
, .bc_argsize
= sizeof(struct ifbreq
),
745 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
747 { .bc_func
= bridge_ioctl_ghostfilter
, .bc_argsize
= sizeof(struct ifbrhostfilter
),
748 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
749 { .bc_func
= bridge_ioctl_shostfilter
, .bc_argsize
= sizeof(struct ifbrhostfilter
),
750 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
753 static const struct bridge_control bridge_control_table64
[] = {
754 { .bc_func
= bridge_ioctl_add
, .bc_argsize
= sizeof(struct ifbreq
), /* 0 */
755 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
756 { .bc_func
= bridge_ioctl_del
, .bc_argsize
= sizeof(struct ifbreq
),
757 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
759 { .bc_func
= bridge_ioctl_gifflags
, .bc_argsize
= sizeof(struct ifbreq
),
760 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
761 { .bc_func
= bridge_ioctl_sifflags
, .bc_argsize
= sizeof(struct ifbreq
),
762 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
764 { .bc_func
= bridge_ioctl_scache
, .bc_argsize
= sizeof(struct ifbrparam
),
765 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
766 { .bc_func
= bridge_ioctl_gcache
, .bc_argsize
= sizeof(struct ifbrparam
),
767 .bc_flags
= BC_F_COPYOUT
},
769 { .bc_func
= bridge_ioctl_gifs64
, .bc_argsize
= sizeof(struct ifbifconf64
),
770 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
771 { .bc_func
= bridge_ioctl_rts64
, .bc_argsize
= sizeof(struct ifbaconf64
),
772 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
774 { .bc_func
= bridge_ioctl_saddr64
, .bc_argsize
= sizeof(struct ifbareq64
),
775 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
777 { .bc_func
= bridge_ioctl_sto
, .bc_argsize
= sizeof(struct ifbrparam
),
778 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
779 { .bc_func
= bridge_ioctl_gto
, .bc_argsize
= sizeof(struct ifbrparam
), /* 10 */
780 .bc_flags
= BC_F_COPYOUT
},
782 { .bc_func
= bridge_ioctl_daddr64
, .bc_argsize
= sizeof(struct ifbareq64
),
783 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
785 { .bc_func
= bridge_ioctl_flush
, .bc_argsize
= sizeof(struct ifbreq
),
786 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
788 { .bc_func
= bridge_ioctl_gpri
, .bc_argsize
= sizeof(struct ifbrparam
),
789 .bc_flags
= BC_F_COPYOUT
},
790 { .bc_func
= bridge_ioctl_spri
, .bc_argsize
= sizeof(struct ifbrparam
),
791 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
793 { .bc_func
= bridge_ioctl_ght
, .bc_argsize
= sizeof(struct ifbrparam
),
794 .bc_flags
= BC_F_COPYOUT
},
795 { .bc_func
= bridge_ioctl_sht
, .bc_argsize
= sizeof(struct ifbrparam
),
796 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
798 { .bc_func
= bridge_ioctl_gfd
, .bc_argsize
= sizeof(struct ifbrparam
),
799 .bc_flags
= BC_F_COPYOUT
},
800 { .bc_func
= bridge_ioctl_sfd
, .bc_argsize
= sizeof(struct ifbrparam
),
801 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
803 { .bc_func
= bridge_ioctl_gma
, .bc_argsize
= sizeof(struct ifbrparam
),
804 .bc_flags
= BC_F_COPYOUT
},
805 { .bc_func
= bridge_ioctl_sma
, .bc_argsize
= sizeof(struct ifbrparam
), /* 20 */
806 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
808 { .bc_func
= bridge_ioctl_sifprio
, .bc_argsize
= sizeof(struct ifbreq
),
809 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
811 { .bc_func
= bridge_ioctl_sifcost
, .bc_argsize
= sizeof(struct ifbreq
),
812 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
814 { .bc_func
= bridge_ioctl_gfilt
, .bc_argsize
= sizeof(struct ifbrparam
),
815 .bc_flags
= BC_F_COPYOUT
},
816 { .bc_func
= bridge_ioctl_sfilt
, .bc_argsize
= sizeof(struct ifbrparam
),
817 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
819 { .bc_func
= bridge_ioctl_purge
, .bc_argsize
= sizeof(struct ifbreq
),
820 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
822 { .bc_func
= bridge_ioctl_addspan
, .bc_argsize
= sizeof(struct ifbreq
),
823 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
824 { .bc_func
= bridge_ioctl_delspan
, .bc_argsize
= sizeof(struct ifbreq
),
825 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
827 { .bc_func
= bridge_ioctl_gbparam64
, .bc_argsize
= sizeof(struct ifbropreq64
),
828 .bc_flags
= BC_F_COPYOUT
},
830 { .bc_func
= bridge_ioctl_grte
, .bc_argsize
= sizeof(struct ifbrparam
),
831 .bc_flags
= BC_F_COPYOUT
},
833 { .bc_func
= bridge_ioctl_gifsstp64
, .bc_argsize
= sizeof(struct ifbpstpconf64
), /* 30 */
834 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
836 { .bc_func
= bridge_ioctl_sproto
, .bc_argsize
= sizeof(struct ifbrparam
),
837 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
839 { .bc_func
= bridge_ioctl_stxhc
, .bc_argsize
= sizeof(struct ifbrparam
),
840 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
842 { .bc_func
= bridge_ioctl_sifmaxaddr
, .bc_argsize
= sizeof(struct ifbreq
),
843 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
845 { .bc_func
= bridge_ioctl_ghostfilter
, .bc_argsize
= sizeof(struct ifbrhostfilter
),
846 .bc_flags
= BC_F_COPYIN
| BC_F_COPYOUT
},
847 { .bc_func
= bridge_ioctl_shostfilter
, .bc_argsize
= sizeof(struct ifbrhostfilter
),
848 .bc_flags
= BC_F_COPYIN
| BC_F_SUSER
},
851 static const unsigned int bridge_control_table_size
=
852 sizeof(bridge_control_table32
) / sizeof(bridge_control_table32
[0]);
854 static LIST_HEAD(, bridge_softc
) bridge_list
=
855 LIST_HEAD_INITIALIZER(bridge_list
);
857 static lck_grp_t
*bridge_lock_grp
= NULL
;
858 static lck_attr_t
*bridge_lock_attr
= NULL
;
860 #define BRIDGENAME "bridge"
861 #define BRIDGES_MAX IF_MAXUNIT
862 #define BRIDGE_ZONE_MAX_ELEM MIN(IFNETS_MAX, BRIDGES_MAX)
864 static struct if_clone bridge_cloner
=
865 IF_CLONE_INITIALIZER(BRIDGENAME
, bridge_clone_create
, bridge_clone_destroy
,
866 0, BRIDGES_MAX
, BRIDGE_ZONE_MAX_ELEM
, sizeof(struct bridge_softc
));
868 static int if_bridge_txstart
= 0;
869 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, txstart
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
870 &if_bridge_txstart
, 0, "Bridge interface uses TXSTART model");
873 static int if_bridge_debug
= 0;
874 SYSCTL_INT(_net_link_bridge
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
875 &if_bridge_debug
, 0, "Bridge debug");
877 static void printf_ether_header(struct ether_header
*);
878 static void printf_mbuf_data(mbuf_t
, size_t, size_t);
879 static void printf_mbuf_pkthdr(mbuf_t
, const char *, const char *);
880 static void printf_mbuf(mbuf_t
, const char *, const char *);
881 static void link_print(struct bridge_softc
* sc
);
883 static void bridge_lock(struct bridge_softc
*);
884 static void bridge_unlock(struct bridge_softc
*);
885 static int bridge_lock2ref(struct bridge_softc
*);
886 static void bridge_unref(struct bridge_softc
*);
887 static void bridge_xlock(struct bridge_softc
*);
888 static void bridge_xdrop(struct bridge_softc
*);
891 bridge_lock(struct bridge_softc
*sc
)
893 void *lr_saved
= __builtin_return_address(0);
895 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
899 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
900 sc
->next_lock_lr
= (sc
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
904 bridge_unlock(struct bridge_softc
*sc
)
906 void *lr_saved
= __builtin_return_address(0);
908 BRIDGE_LOCK_ASSERT_HELD(sc
);
910 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
911 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
917 bridge_lock2ref(struct bridge_softc
*sc
)
920 void *lr_saved
= __builtin_return_address(0);
922 BRIDGE_LOCK_ASSERT_HELD(sc
);
924 if (sc
->sc_iflist_xcnt
> 0) {
930 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
931 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
939 bridge_unref(struct bridge_softc
*sc
)
941 void *lr_saved
= __builtin_return_address(0);
943 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
946 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
947 sc
->next_lock_lr
= (sc
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
951 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
952 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
953 if ((sc
->sc_iflist_xcnt
> 0) && (sc
->sc_iflist_ref
== 0)) {
962 bridge_xlock(struct bridge_softc
*sc
)
964 void *lr_saved
= __builtin_return_address(0);
966 BRIDGE_LOCK_ASSERT_HELD(sc
);
968 sc
->sc_iflist_xcnt
++;
969 while (sc
->sc_iflist_ref
> 0) {
970 sc
->unlock_lr
[sc
->next_unlock_lr
] = lr_saved
;
971 sc
->next_unlock_lr
= (sc
->next_unlock_lr
+ 1) % SO_LCKDBG_MAX
;
973 msleep(&sc
->sc_cv
, &sc
->sc_mtx
, PZERO
, "BRIDGE_XLOCK", NULL
);
975 sc
->lock_lr
[sc
->next_lock_lr
] = lr_saved
;
976 sc
->next_lock_lr
= (sc
->next_lock_lr
+ 1) % SO_LCKDBG_MAX
;
981 bridge_xdrop(struct bridge_softc
*sc
)
983 BRIDGE_LOCK_ASSERT_HELD(sc
);
985 sc
->sc_iflist_xcnt
--;
989 printf_mbuf_pkthdr(mbuf_t m
, const char *prefix
, const char *suffix
)
992 printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
994 prefix
? prefix
: "", (unsigned int)mbuf_pkthdr_len(m
),
995 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m
)),
996 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m
)),
997 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m
)),
998 suffix
? suffix
: "");
1000 printf("%s<NULL>%s\n", prefix
, suffix
);
1005 printf_mbuf(mbuf_t m
, const char *prefix
, const char *suffix
)
1008 printf("%s0x%llx type: %u flags: 0x%x len: %u data: 0x%llx "
1009 "maxlen: %u datastart: 0x%llx next: 0x%llx%s",
1010 prefix
? prefix
: "", (uint64_t)VM_KERNEL_ADDRPERM(m
),
1011 mbuf_type(m
), mbuf_flags(m
), (unsigned int)mbuf_len(m
),
1012 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)),
1013 (unsigned int)mbuf_maxlen(m
),
1014 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m
)),
1015 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m
)),
1016 !suffix
|| (mbuf_flags(m
) & MBUF_PKTHDR
) ? "" : suffix
);
1017 if ((mbuf_flags(m
) & MBUF_PKTHDR
)) {
1018 printf_mbuf_pkthdr(m
, " ", suffix
);
1021 printf("%s<NULL>%s\n", prefix
, suffix
);
1026 printf_mbuf_data(mbuf_t m
, size_t offset
, size_t len
)
1030 size_t pktlen
, mlen
, maxlen
;
1033 pktlen
= mbuf_pkthdr_len(m
);
1035 if (offset
> pktlen
) {
1039 maxlen
= (pktlen
- offset
> len
) ? len
: pktlen
- offset
;
1043 for (i
= 0, j
= 0; i
< maxlen
; i
++, j
++) {
1054 printf("%02x%s", ptr
[j
], i
% 2 ? " " : "");
1060 printf_ether_header(struct ether_header
*eh
)
1062 printf("%02x:%02x:%02x:%02x:%02x:%02x > "
1063 "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
1064 eh
->ether_shost
[0], eh
->ether_shost
[1], eh
->ether_shost
[2],
1065 eh
->ether_shost
[3], eh
->ether_shost
[4], eh
->ether_shost
[5],
1066 eh
->ether_dhost
[0], eh
->ether_dhost
[1], eh
->ether_dhost
[2],
1067 eh
->ether_dhost
[3], eh
->ether_dhost
[4], eh
->ether_dhost
[5],
1068 ntohs(eh
->ether_type
));
1072 link_print(struct bridge_softc
* sc
)
1075 uint32_t sdl_buffer
[offsetof(struct sockaddr_dl
, sdl_data
) +
1076 IFNAMSIZ
+ ETHER_ADDR_LEN
];
1077 struct sockaddr_dl
*sdl
= (struct sockaddr_dl
*)sdl_buffer
;
1079 memset(sdl
, 0, sizeof(sdl_buffer
));
1080 sdl
->sdl_family
= AF_LINK
;
1081 sdl
->sdl_nlen
= strlen(sc
->sc_if_xname
);
1082 sdl
->sdl_alen
= ETHER_ADDR_LEN
;
1083 sdl
->sdl_len
= offsetof(struct sockaddr_dl
, sdl_data
);
1084 memcpy(sdl
->sdl_data
, sc
->sc_if_xname
, sdl
->sdl_nlen
);
1085 memcpy(LLADDR(sdl
), sc
->sc_defaddr
, ETHER_ADDR_LEN
);
1088 printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
1089 " slen %d addr ", sdl
->sdl_len
, sdl
->sdl_index
,
1090 sdl
->sdl_family
, sdl
->sdl_type
, sdl
->sdl_nlen
,
1091 sdl
->sdl_alen
, sdl
->sdl_slen
);
1093 for (i
= 0; i
< sdl
->sdl_alen
; i
++) {
1094 printf("%s%x", i
? ":" : "", (CONST_LLADDR(sdl
))[i
]);
1099 #endif /* BRIDGE_DEBUG */
1104 * Pseudo-device attach routine.
1106 __private_extern__
int
1111 lck_grp_attr_t
*lck_grp_attr
= NULL
;
1113 bridge_rtnode_pool
= zinit(sizeof(struct bridge_rtnode
),
1114 1024 * sizeof(struct bridge_rtnode
), 0, "bridge_rtnode");
1115 zone_change(bridge_rtnode_pool
, Z_CALLERACCT
, FALSE
);
1117 lck_grp_attr
= lck_grp_attr_alloc_init();
1119 bridge_lock_grp
= lck_grp_alloc_init("if_bridge", lck_grp_attr
);
1121 bridge_lock_attr
= lck_attr_alloc_init();
1124 lck_attr_setdebug(bridge_lock_attr
);
1127 lck_mtx_init(&bridge_list_mtx
, bridge_lock_grp
, bridge_lock_attr
);
1129 /* can free the attributes once we've allocated the group lock */
1130 lck_grp_attr_free(lck_grp_attr
);
1132 LIST_INIT(&bridge_list
);
1136 #endif /* BRIDGESTP */
1138 error
= if_clone_attach(&bridge_cloner
);
1140 printf("%s: ifnet_clone_attach failed %d\n", __func__
, error
);
1146 #if defined(PFIL_HOOKS)
1148 * handler for net.link.bridge.pfil_ipfw
1151 sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1153 #pragma unused(arg1, arg2)
1154 int enable
= pfil_ipfw
;
1157 error
= sysctl_handle_int(oidp
, &enable
, 0, req
);
1158 enable
= (enable
) ? 1 : 0;
1160 if (enable
!= pfil_ipfw
) {
1164 * Disable pfil so that ipfw doesnt run twice, if the user
1165 * really wants both then they can re-enable pfil_bridge and/or
1166 * pfil_member. Also allow non-ip packets as ipfw can filter by
1179 SYSCTL_PROC(_net_link_bridge
, OID_AUTO
, ipfw
, CTLTYPE_INT
| CTLFLAG_RW
,
1180 &pfil_ipfw
, 0, &sysctl_pfil_ipfw
, "I", "Layer2 filter with IPFW");
1181 #endif /* PFIL_HOOKS */
1184 bridge_ifnet_set_attrs(struct ifnet
* ifp
)
1188 error
= ifnet_set_mtu(ifp
, ETHERMTU
);
1190 printf("%s: ifnet_set_mtu failed %d\n", __func__
, error
);
1193 error
= ifnet_set_addrlen(ifp
, ETHER_ADDR_LEN
);
1195 printf("%s: ifnet_set_addrlen failed %d\n", __func__
, error
);
1198 error
= ifnet_set_hdrlen(ifp
, ETHER_HDR_LEN
);
1200 printf("%s: ifnet_set_hdrlen failed %d\n", __func__
, error
);
1203 error
= ifnet_set_flags(ifp
,
1204 IFF_BROADCAST
| IFF_SIMPLEX
| IFF_NOTRAILERS
| IFF_MULTICAST
,
1208 printf("%s: ifnet_set_flags failed %d\n", __func__
, error
);
1216 * bridge_clone_create:
1218 * Create a new bridge instance.
1221 bridge_clone_create(struct if_clone
*ifc
, uint32_t unit
, void *params
)
1223 #pragma unused(params)
1224 struct ifnet
*ifp
= NULL
;
1225 struct bridge_softc
*sc
= NULL
;
1226 struct bridge_softc
*sc2
= NULL
;
1227 struct ifnet_init_eparams init_params
;
1229 uint8_t eth_hostid
[ETHER_ADDR_LEN
];
1230 int fb
, retry
, has_hostid
;
1232 sc
= if_clone_softc_allocate(&bridge_cloner
);
1238 lck_mtx_init(&sc
->sc_mtx
, bridge_lock_grp
, bridge_lock_attr
);
1239 sc
->sc_brtmax
= BRIDGE_RTABLE_MAX
;
1240 sc
->sc_brttimeout
= BRIDGE_RTABLE_TIMEOUT
;
1241 sc
->sc_filter_flags
= IFBF_FILT_DEFAULT
;
1244 * For backwards compatibility with previous behaviour...
1245 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1248 sc
->sc_filter_flags
&= ~IFBF_FILT_USEIPF
;
1251 TAILQ_INIT(&sc
->sc_iflist
);
1253 /* use the interface name as the unique id for ifp recycle */
1254 snprintf(sc
->sc_if_xname
, sizeof(sc
->sc_if_xname
), "%s%d",
1255 ifc
->ifc_name
, unit
);
1256 bzero(&init_params
, sizeof(init_params
));
1257 init_params
.ver
= IFNET_INIT_CURRENT_VERSION
;
1258 init_params
.len
= sizeof(init_params
);
1259 /* Initialize our routing table. */
1260 error
= bridge_rtable_init(sc
);
1262 printf("%s: bridge_rtable_init failed %d\n",
1266 TAILQ_INIT(&sc
->sc_spanlist
);
1267 if (if_bridge_txstart
) {
1268 init_params
.start
= bridge_start
;
1270 init_params
.flags
= IFNET_INIT_LEGACY
;
1271 init_params
.output
= bridge_output
;
1273 init_params
.set_bpf_tap
= bridge_set_bpf_tap
;
1274 init_params
.uniqueid
= sc
->sc_if_xname
;
1275 init_params
.uniqueid_len
= strlen(sc
->sc_if_xname
);
1276 init_params
.sndq_maxlen
= IFQ_MAXLEN
;
1277 init_params
.name
= ifc
->ifc_name
;
1278 init_params
.unit
= unit
;
1279 init_params
.family
= IFNET_FAMILY_ETHERNET
;
1280 init_params
.type
= IFT_BRIDGE
;
1281 init_params
.demux
= ether_demux
;
1282 init_params
.add_proto
= ether_add_proto
;
1283 init_params
.del_proto
= ether_del_proto
;
1284 init_params
.check_multi
= ether_check_multi
;
1285 init_params
.framer_extended
= ether_frameout_extended
;
1286 init_params
.softc
= sc
;
1287 init_params
.ioctl
= bridge_ioctl
;
1288 init_params
.detach
= bridge_detach
;
1289 init_params
.broadcast_addr
= etherbroadcastaddr
;
1290 init_params
.broadcast_len
= ETHER_ADDR_LEN
;
1292 error
= ifnet_allocate_extended(&init_params
, &ifp
);
1294 printf("%s: ifnet_allocate failed %d\n",
1299 error
= bridge_ifnet_set_attrs(ifp
);
1301 printf("%s: bridge_ifnet_set_attrs failed %d\n",
1306 * Generate an ethernet address with a locally administered address.
1308 * Since we are using random ethernet addresses for the bridge, it is
1309 * possible that we might have address collisions, so make sure that
1310 * this hardware address isn't already in use on another bridge.
1311 * The first try uses the "hostid" and falls back to read_frandom();
1312 * for "hostid", we use the MAC address of the first-encountered
1313 * Ethernet-type interface that is currently configured.
1316 has_hostid
= (uuid_get_ethernet(ð_hostid
[0]) == 0);
1317 for (retry
= 1; retry
!= 0;) {
1318 if (fb
|| has_hostid
== 0) {
1319 read_frandom(&sc
->sc_defaddr
, ETHER_ADDR_LEN
);
1320 sc
->sc_defaddr
[0] &= ~1; /* clear multicast bit */
1321 sc
->sc_defaddr
[0] |= 2; /* set the LAA bit */
1323 bcopy(ð_hostid
[0], &sc
->sc_defaddr
,
1325 sc
->sc_defaddr
[0] &= ~1; /* clear multicast bit */
1326 sc
->sc_defaddr
[0] |= 2; /* set the LAA bit */
1327 sc
->sc_defaddr
[3] = /* stir it up a bit */
1328 ((sc
->sc_defaddr
[3] & 0x0f) << 4) |
1329 ((sc
->sc_defaddr
[3] & 0xf0) >> 4);
1331 * Mix in the LSB as it's actually pretty significant,
1332 * see rdar://14076061
1335 (((sc
->sc_defaddr
[4] & 0x0f) << 4) |
1336 ((sc
->sc_defaddr
[4] & 0xf0) >> 4)) ^
1338 sc
->sc_defaddr
[5] = ifp
->if_unit
& 0xff;
1343 lck_mtx_lock(&bridge_list_mtx
);
1344 LIST_FOREACH(sc2
, &bridge_list
, sc_list
) {
1345 if (memcmp(sc
->sc_defaddr
,
1346 IF_LLADDR(sc2
->sc_ifp
), ETHER_ADDR_LEN
) == 0) {
1350 lck_mtx_unlock(&bridge_list_mtx
);
1353 sc
->sc_flags
&= ~SCF_MEDIA_ACTIVE
;
1356 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1360 error
= ifnet_attach(ifp
, NULL
);
1362 printf("%s: ifnet_attach failed %d\n", __func__
, error
);
1366 error
= ifnet_set_lladdr_and_type(ifp
, sc
->sc_defaddr
, ETHER_ADDR_LEN
,
1369 printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__
,
1374 ifnet_set_offload(ifp
,
1375 IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
|
1376 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
| IFNET_MULTIPAGES
);
1377 error
= bridge_set_tso(sc
);
1379 printf("%s: bridge_set_tso failed %d\n",
1384 bstp_attach(&sc
->sc_stp
, &bridge_ops
);
1385 #endif /* BRIDGESTP */
1387 lck_mtx_lock(&bridge_list_mtx
);
1388 LIST_INSERT_HEAD(&bridge_list
, sc
, sc_list
);
1389 lck_mtx_unlock(&bridge_list_mtx
);
1391 /* attach as ethernet */
1392 error
= bpf_attach(ifp
, DLT_EN10MB
, sizeof(struct ether_header
),
1397 printf("%s failed error %d\n", __func__
, error
);
1398 /* TBD: Clean up: sc, sc_rthash etc */
1405 * bridge_clone_destroy:
1407 * Destroy a bridge instance.
1410 bridge_clone_destroy(struct ifnet
*ifp
)
1412 struct bridge_softc
*sc
= ifp
->if_softc
;
1413 struct bridge_iflist
*bif
;
1417 if ((sc
->sc_flags
& SCF_DETACHING
)) {
1421 sc
->sc_flags
|= SCF_DETACHING
;
1423 bridge_ifstop(ifp
, 1);
1425 bridge_cancel_delayed_call(&sc
->sc_resize_call
);
1427 bridge_cleanup_delayed_call(&sc
->sc_resize_call
);
1428 bridge_cleanup_delayed_call(&sc
->sc_aging_timer
);
1430 error
= ifnet_set_flags(ifp
, 0, IFF_UP
);
1432 printf("%s: ifnet_set_flags failed %d\n", __func__
, error
);
1435 while ((bif
= TAILQ_FIRST(&sc
->sc_iflist
)) != NULL
) {
1436 bridge_delete_member(sc
, bif
, 0);
1439 while ((bif
= TAILQ_FIRST(&sc
->sc_spanlist
)) != NULL
) {
1440 bridge_delete_span(sc
, bif
);
1444 error
= ifnet_detach(ifp
);
1446 panic("%s: ifnet_detach(%p) failed %d\n",
1447 __func__
, ifp
, error
);
1452 #define DRVSPEC do { \
1453 if (ifd->ifd_cmd >= bridge_control_table_size) { \
1457 bc = &bridge_control_table[ifd->ifd_cmd]; \
1459 if (cmd == SIOCGDRVSPEC && \
1460 (bc->bc_flags & BC_F_COPYOUT) == 0) { \
1463 } else if (cmd == SIOCSDRVSPEC && \
1464 (bc->bc_flags & BC_F_COPYOUT) != 0) { \
1469 if (bc->bc_flags & BC_F_SUSER) { \
1470 error = kauth_authorize_generic(kauth_cred_get(), \
1471 KAUTH_GENERIC_ISSUSER); \
1476 if (ifd->ifd_len != bc->bc_argsize || \
1477 ifd->ifd_len > sizeof (args)) { \
1482 bzero(&args, sizeof (args)); \
1483 if (bc->bc_flags & BC_F_COPYIN) { \
1484 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
1490 error = (*bc->bc_func)(sc, &args); \
1491 BRIDGE_UNLOCK(sc); \
1495 if (bc->bc_flags & BC_F_COPYOUT) \
1496 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
1502 * Handle a control request from the operator.
1505 bridge_ioctl(struct ifnet
*ifp
, u_long cmd
, void *data
)
1507 struct bridge_softc
*sc
= ifp
->if_softc
;
1508 struct ifreq
*ifr
= (struct ifreq
*)data
;
1509 struct bridge_iflist
*bif
;
1512 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1515 if (if_bridge_debug
& BR_DBGF_IOCTL
) {
1516 printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1517 __func__
, ifp
->if_xname
, cmd
, (cmd
& IOC_IN
) ? 'I' : ' ',
1518 (cmd
& IOC_OUT
) ? 'O' : ' ', IOCPARM_LEN(cmd
),
1519 (char)IOCGROUP(cmd
), cmd
& 0xff);
1521 #endif /* BRIDGE_DEBUG */
1526 ifnet_set_flags(ifp
, IFF_UP
, IFF_UP
);
1529 case SIOCGIFMEDIA32
:
1530 case SIOCGIFMEDIA64
: {
1531 struct ifmediareq
*ifmr
= (struct ifmediareq
*)data
;
1532 user_addr_t user_addr
;
1534 user_addr
= (cmd
== SIOCGIFMEDIA64
) ?
1535 ((struct ifmediareq64
*)ifmr
)->ifmu_ulist
:
1536 CAST_USER_ADDR_T(((struct ifmediareq32
*)ifmr
)->ifmu_ulist
);
1538 ifmr
->ifm_status
= IFM_AVALID
;
1540 ifmr
->ifm_count
= 1;
1543 if (!(sc
->sc_flags
& SCF_DETACHING
) &&
1544 (sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
1545 ifmr
->ifm_status
|= IFM_ACTIVE
;
1546 ifmr
->ifm_active
= ifmr
->ifm_current
=
1547 IFM_ETHER
| IFM_AUTO
;
1549 ifmr
->ifm_active
= ifmr
->ifm_current
= IFM_NONE
;
1553 if (user_addr
!= USER_ADDR_NULL
) {
1554 error
= copyout(&ifmr
->ifm_current
, user_addr
,
1564 case SIOCSDRVSPEC32
:
1565 case SIOCGDRVSPEC32
: {
1567 struct ifbreq ifbreq
;
1568 struct ifbifconf32 ifbifconf
;
1569 struct ifbareq32 ifbareq
;
1570 struct ifbaconf32 ifbaconf
;
1571 struct ifbrparam ifbrparam
;
1572 struct ifbropreq32 ifbropreq
;
1574 struct ifdrv32
*ifd
= (struct ifdrv32
*)data
;
1575 const struct bridge_control
*bridge_control_table
=
1576 bridge_control_table32
, *bc
;
1582 case SIOCSDRVSPEC64
:
1583 case SIOCGDRVSPEC64
: {
1585 struct ifbreq ifbreq
;
1586 struct ifbifconf64 ifbifconf
;
1587 struct ifbareq64 ifbareq
;
1588 struct ifbaconf64 ifbaconf
;
1589 struct ifbrparam ifbrparam
;
1590 struct ifbropreq64 ifbropreq
;
1592 struct ifdrv64
*ifd
= (struct ifdrv64
*)data
;
1593 const struct bridge_control
*bridge_control_table
=
1594 bridge_control_table64
, *bc
;
1602 if (!(ifp
->if_flags
& IFF_UP
) &&
1603 (ifp
->if_flags
& IFF_RUNNING
)) {
1605 * If interface is marked down and it is running,
1606 * then stop and disable it.
1609 bridge_ifstop(ifp
, 1);
1611 } else if ((ifp
->if_flags
& IFF_UP
) &&
1612 !(ifp
->if_flags
& IFF_RUNNING
)) {
1614 * If interface is marked up and it is stopped, then
1618 error
= bridge_init(ifp
);
1624 error
= ifnet_set_lladdr(ifp
, ifr
->ifr_addr
.sa_data
,
1625 ifr
->ifr_addr
.sa_len
);
1627 printf("%s: SIOCSIFLLADDR error %d\n", ifp
->if_xname
,
1633 if (ifr
->ifr_mtu
< 576) {
1638 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
1639 sc
->sc_ifp
->if_mtu
= ifr
->ifr_mtu
;
1643 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1644 if (bif
->bif_ifp
->if_mtu
!= (unsigned)ifr
->ifr_mtu
) {
1645 printf("%s: invalid MTU: %u(%s) != %d\n",
1646 sc
->sc_ifp
->if_xname
,
1647 bif
->bif_ifp
->if_mtu
,
1648 bif
->bif_ifp
->if_xname
, ifr
->ifr_mtu
);
1654 sc
->sc_ifp
->if_mtu
= ifr
->ifr_mtu
;
1660 error
= ether_ioctl(ifp
, cmd
, data
);
1662 if (error
!= 0 && error
!= EOPNOTSUPP
) {
1663 printf("%s: ifp %s cmd 0x%08lx "
1664 "(%c%c [%lu] %c %lu) failed error: %d\n",
1665 __func__
, ifp
->if_xname
, cmd
,
1666 (cmd
& IOC_IN
) ? 'I' : ' ',
1667 (cmd
& IOC_OUT
) ? 'O' : ' ',
1668 IOCPARM_LEN(cmd
), (char)IOCGROUP(cmd
),
1671 #endif /* BRIDGE_DEBUG */
1674 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1683 * Clear or restore unwanted capabilities on the member interface
1686 bridge_mutecaps(struct bridge_softc
*sc
)
1688 struct bridge_iflist
*bif
;
1691 /* Initial bitmask of capabilities to test */
1692 mask
= BRIDGE_IFCAPS_MASK
;
1694 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1695 /* Every member must support it or its disabled */
1696 mask
&= bif
->bif_savedcaps
;
1699 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1700 enabled
= bif
->bif_ifp
->if_capenable
;
1701 enabled
&= ~BRIDGE_IFCAPS_STRIP
;
1702 /* strip off mask bits and enable them again if allowed */
1703 enabled
&= ~BRIDGE_IFCAPS_MASK
;
1706 bridge_set_ifcap(sc
, bif
, enabled
);
1711 bridge_set_ifcap(struct bridge_softc
*sc
, struct bridge_iflist
*bif
, int set
)
1713 struct ifnet
*ifp
= bif
->bif_ifp
;
1717 bzero(&ifr
, sizeof(ifr
));
1718 ifr
.ifr_reqcap
= set
;
1720 if (ifp
->if_capenable
!= set
) {
1722 error
= (*ifp
->if_ioctl
)(ifp
, SIOCSIFCAP
, (caddr_t
)&ifr
);
1723 IFF_UNLOCKGIANT(ifp
);
1725 printf("%s: %s error setting interface capabilities "
1726 "on %s\n", __func__
, sc
->sc_ifp
->if_xname
,
1731 #endif /* HAS_IF_CAP */
1734 bridge_set_tso(struct bridge_softc
*sc
)
1736 struct bridge_iflist
*bif
;
1737 u_int32_t tso_v4_mtu
;
1738 u_int32_t tso_v6_mtu
;
1739 ifnet_offload_t offload
;
1742 /* By default, support TSO */
1743 offload
= sc
->sc_ifp
->if_hwassist
| IFNET_TSO_IPV4
| IFNET_TSO_IPV6
;
1744 tso_v4_mtu
= IP_MAXPACKET
;
1745 tso_v6_mtu
= IP_MAXPACKET
;
1747 /* Use the lowest common denominator of the members */
1748 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1749 ifnet_t ifp
= bif
->bif_ifp
;
1755 if (offload
& IFNET_TSO_IPV4
) {
1756 if (ifp
->if_hwassist
& IFNET_TSO_IPV4
) {
1757 if (tso_v4_mtu
> ifp
->if_tso_v4_mtu
) {
1758 tso_v4_mtu
= ifp
->if_tso_v4_mtu
;
1761 offload
&= ~IFNET_TSO_IPV4
;
1765 if (offload
& IFNET_TSO_IPV6
) {
1766 if (ifp
->if_hwassist
& IFNET_TSO_IPV6
) {
1767 if (tso_v6_mtu
> ifp
->if_tso_v6_mtu
) {
1768 tso_v6_mtu
= ifp
->if_tso_v6_mtu
;
1771 offload
&= ~IFNET_TSO_IPV6
;
1777 if (offload
!= sc
->sc_ifp
->if_hwassist
) {
1778 error
= ifnet_set_offload(sc
->sc_ifp
, offload
);
1781 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1782 printf("%s: ifnet_set_offload(%s, 0x%x) "
1783 "failed %d\n", __func__
,
1784 sc
->sc_ifp
->if_xname
, offload
, error
);
1786 #endif /* BRIDGE_DEBUG */
1790 * For ifnet_set_tso_mtu() sake, the TSO MTU must be at least
1791 * as large as the interface MTU
1793 if (sc
->sc_ifp
->if_hwassist
& IFNET_TSO_IPV4
) {
1794 if (tso_v4_mtu
< sc
->sc_ifp
->if_mtu
) {
1795 tso_v4_mtu
= sc
->sc_ifp
->if_mtu
;
1797 error
= ifnet_set_tso_mtu(sc
->sc_ifp
, AF_INET
,
1801 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1802 printf("%s: ifnet_set_tso_mtu(%s, "
1803 "AF_INET, %u) failed %d\n",
1804 __func__
, sc
->sc_ifp
->if_xname
,
1807 #endif /* BRIDGE_DEBUG */
1811 if (sc
->sc_ifp
->if_hwassist
& IFNET_TSO_IPV6
) {
1812 if (tso_v6_mtu
< sc
->sc_ifp
->if_mtu
) {
1813 tso_v6_mtu
= sc
->sc_ifp
->if_mtu
;
1815 error
= ifnet_set_tso_mtu(sc
->sc_ifp
, AF_INET6
,
1819 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1820 printf("%s: ifnet_set_tso_mtu(%s, "
1821 "AF_INET6, %u) failed %d\n",
1822 __func__
, sc
->sc_ifp
->if_xname
,
1825 #endif /* BRIDGE_DEBUG */
1835 * bridge_lookup_member:
1837 * Lookup a bridge member interface.
1839 static struct bridge_iflist
*
1840 bridge_lookup_member(struct bridge_softc
*sc
, const char *name
)
1842 struct bridge_iflist
*bif
;
1845 BRIDGE_LOCK_ASSERT_HELD(sc
);
1847 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1849 if (strcmp(ifp
->if_xname
, name
) == 0) {
1858 * bridge_lookup_member_if:
1860 * Lookup a bridge member interface by ifnet*.
1862 static struct bridge_iflist
*
1863 bridge_lookup_member_if(struct bridge_softc
*sc
, struct ifnet
*member_ifp
)
1865 struct bridge_iflist
*bif
;
1867 BRIDGE_LOCK_ASSERT_HELD(sc
);
1869 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
1870 if (bif
->bif_ifp
== member_ifp
) {
1879 bridge_iff_input(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
1880 mbuf_t
*data
, char **frame_ptr
)
1882 #pragma unused(protocol)
1884 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
1885 struct bridge_softc
*sc
= bif
->bif_sc
;
1890 if ((m
->m_flags
& M_PROTO1
)) {
1894 if (*frame_ptr
>= (char *)mbuf_datastart(m
) &&
1895 *frame_ptr
<= (char *)mbuf_data(m
)) {
1897 frmlen
= (char *)mbuf_data(m
) - *frame_ptr
;
1900 if (if_bridge_debug
& BR_DBGF_INPUT
) {
1901 printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
1902 "frmlen %lu\n", __func__
, sc
->sc_ifp
->if_xname
,
1903 ifp
->if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(m
),
1904 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)),
1905 (uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr
),
1906 included
? "inside" : "outside", frmlen
);
1908 if (if_bridge_debug
& BR_DBGF_MBUF
) {
1909 printf_mbuf(m
, "bridge_iff_input[", "\n");
1910 printf_ether_header((struct ether_header
*)
1911 (void *)*frame_ptr
);
1912 printf_mbuf_data(m
, 0, 20);
1916 #endif /* BRIDGE_DEBUG */
1918 /* Move data pointer to start of frame to the link layer header */
1920 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) - frmlen
,
1921 mbuf_len(m
) + frmlen
);
1922 (void) mbuf_pkthdr_adjustlen(m
, frmlen
);
1924 printf("%s: frame_ptr outside mbuf\n", __func__
);
1928 error
= bridge_input(ifp
, m
, *frame_ptr
);
1930 /* Adjust packet back to original */
1932 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) + frmlen
,
1933 mbuf_len(m
) - frmlen
);
1934 (void) mbuf_pkthdr_adjustlen(m
, -frmlen
);
1937 if ((if_bridge_debug
& BR_DBGF_INPUT
) &&
1938 (if_bridge_debug
& BR_DBGF_MBUF
)) {
1940 printf_mbuf(m
, "bridge_iff_input]", "\n");
1942 #endif /* BRIDGE_DEBUG */
1945 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1951 bridge_iff_output(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
1954 #pragma unused(protocol)
1956 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
1957 struct bridge_softc
*sc
= bif
->bif_sc
;
1960 if ((m
->m_flags
& M_PROTO1
)) {
1965 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
1966 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__
,
1967 sc
->sc_ifp
->if_xname
, ifp
->if_xname
,
1968 (uint64_t)VM_KERNEL_ADDRPERM(m
),
1969 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)));
1971 #endif /* BRIDGE_DEBUG */
1973 error
= bridge_member_output(sc
, ifp
, m
);
1974 if (error
!= 0 && error
!= EJUSTRETURN
) {
1975 printf("%s: bridge_member_output failed error %d\n", __func__
,
1979 BRIDGE_LOCK_ASSERT_NOTHELD(sc
);
1985 bridge_iff_event(void *cookie
, ifnet_t ifp
, protocol_family_t protocol
,
1986 const struct kev_msg
*event_msg
)
1988 #pragma unused(protocol)
1989 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
1990 struct bridge_softc
*sc
= bif
->bif_sc
;
1992 if (event_msg
->vendor_code
== KEV_VENDOR_APPLE
&&
1993 event_msg
->kev_class
== KEV_NETWORK_CLASS
&&
1994 event_msg
->kev_subclass
== KEV_DL_SUBCLASS
) {
1996 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
1997 printf("%s: %s event_code %u - %s\n", __func__
,
1998 ifp
->if_xname
, event_msg
->event_code
,
1999 dlil_kev_dl_code_str(event_msg
->event_code
));
2001 #endif /* BRIDGE_DEBUG */
2003 switch (event_msg
->event_code
) {
2004 case KEV_DL_IF_DETACHING
:
2005 case KEV_DL_IF_DETACHED
: {
2006 bridge_ifdetach(ifp
);
2009 case KEV_DL_LINK_OFF
:
2010 case KEV_DL_LINK_ON
: {
2011 bridge_iflinkevent(ifp
);
2013 bstp_linkstate(ifp
, event_msg
->event_code
);
2014 #endif /* BRIDGESTP */
2017 case KEV_DL_SIFFLAGS
: {
2018 if ((bif
->bif_flags
& BIFF_PROMISC
) == 0 &&
2019 (ifp
->if_flags
& IFF_UP
)) {
2022 error
= ifnet_set_promiscuous(ifp
, 1);
2025 "ifnet_set_promiscuous (%s)"
2027 __func__
, ifp
->if_xname
,
2030 bif
->bif_flags
|= BIFF_PROMISC
;
2035 case KEV_DL_IFCAP_CHANGED
: {
2041 case KEV_DL_PROTO_DETACHED
:
2042 case KEV_DL_PROTO_ATTACHED
: {
2043 bridge_proto_attach_changed(ifp
);
2053 * bridge_iff_detached:
2055 * Detach an interface from a bridge. Called when a member
2056 * interface is detaching.
2059 bridge_iff_detached(void *cookie
, ifnet_t ifp
)
2061 struct bridge_iflist
*bif
= (struct bridge_iflist
*)cookie
;
2064 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2065 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2067 #endif /* BRIDGE_DEBUG */
2069 bridge_ifdetach(ifp
);
2071 _FREE(bif
, M_DEVBUF
);
2075 bridge_proto_input(ifnet_t ifp
, protocol_family_t protocol
, mbuf_t packet
,
2078 #pragma unused(protocol, packet, header)
2080 printf("%s: unexpected packet from %s\n", __func__
,
2082 #endif /* BRIDGE_DEBUG */
2087 bridge_attach_protocol(struct ifnet
*ifp
)
2090 struct ifnet_attach_proto_param reg
;
2093 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2094 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2096 #endif /* BRIDGE_DEBUG */
2098 bzero(®
, sizeof(reg
));
2099 reg
.input
= bridge_proto_input
;
2101 error
= ifnet_attach_protocol(ifp
, PF_BRIDGE
, ®
);
2103 printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
2104 __func__
, ifp
->if_xname
, error
);
2111 bridge_detach_protocol(struct ifnet
*ifp
)
2116 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
2117 printf("%s: %s\n", __func__
, ifp
->if_xname
);
2119 #endif /* BRIDGE_DEBUG */
2120 error
= ifnet_detach_protocol(ifp
, PF_BRIDGE
);
2122 printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
2123 __func__
, ifp
->if_xname
, error
);
2130 * bridge_delete_member:
2132 * Delete the specified member interface.
2135 bridge_delete_member(struct bridge_softc
*sc
, struct bridge_iflist
*bif
,
2138 struct ifnet
*ifs
= bif
->bif_ifp
, *bifp
= sc
->sc_ifp
;
2139 int lladdr_changed
= 0, error
, filt_attached
;
2140 uint8_t eaddr
[ETHER_ADDR_LEN
];
2141 u_int32_t event_code
= 0;
2143 BRIDGE_LOCK_ASSERT_HELD(sc
);
2144 VERIFY(ifs
!= NULL
);
2147 * First, remove the member from the list first so it cannot be found anymore
2148 * when we release the bridge lock below
2151 TAILQ_REMOVE(&sc
->sc_iflist
, bif
, bif_next
);
2155 switch (ifs
->if_type
) {
2159 * Take the interface out of promiscuous mode.
2161 if (bif
->bif_flags
& BIFF_PROMISC
) {
2163 * Unlock to prevent deadlock with bridge_iff_event() in
2164 * case the driver generates an interface event
2167 (void) ifnet_set_promiscuous(ifs
, 0);
2173 /* currently not supported */
2181 /* reneable any interface capabilities */
2182 bridge_set_ifcap(sc
, bif
, bif
->bif_savedcaps
);
2186 if (bif
->bif_flags
& BIFF_PROTO_ATTACHED
) {
2187 /* Respect lock ordering with DLIL lock */
2189 (void) bridge_detach_protocol(ifs
);
2193 if ((bif
->bif_ifflags
& IFBIF_STP
) != 0) {
2194 bstp_disable(&bif
->bif_stp
);
2196 #endif /* BRIDGESTP */
2199 * If removing the interface that gave the bridge its mac address, set
2200 * the mac address of the bridge to the address of the next member, or
2201 * to its default address if no members are left.
2203 if (bridge_inherit_mac
&& sc
->sc_ifaddr
== ifs
) {
2204 ifnet_release(sc
->sc_ifaddr
);
2205 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
2206 bcopy(sc
->sc_defaddr
, eaddr
, ETHER_ADDR_LEN
);
2207 sc
->sc_ifaddr
= NULL
;
2210 TAILQ_FIRST(&sc
->sc_iflist
)->bif_ifp
;
2211 bcopy(IF_LLADDR(fif
), eaddr
, ETHER_ADDR_LEN
);
2212 sc
->sc_ifaddr
= fif
;
2213 ifnet_reference(fif
); /* for sc_ifaddr */
2219 bridge_mutecaps(sc
); /* recalculate now this interface is removed */
2220 #endif /* HAS_IF_CAP */
2222 error
= bridge_set_tso(sc
);
2224 printf("%s: bridge_set_tso failed %d\n", __func__
, error
);
2227 bridge_rtdelete(sc
, ifs
, IFBF_FLUSHALL
);
2229 KASSERT(bif
->bif_addrcnt
== 0,
2230 ("%s: %d bridge routes referenced", __func__
, bif
->bif_addrcnt
));
2232 filt_attached
= bif
->bif_flags
& BIFF_FILTER_ATTACHED
;
2235 * Update link status of the bridge based on its remaining members
2237 event_code
= bridge_updatelinkstatus(sc
);
2242 if (lladdr_changed
&&
2243 (error
= ifnet_set_lladdr(bifp
, eaddr
, ETHER_ADDR_LEN
)) != 0) {
2244 printf("%s: ifnet_set_lladdr failed %d\n", __func__
, error
);
2247 if (event_code
!= 0) {
2248 bridge_link_event(bifp
, event_code
);
2252 bstp_destroy(&bif
->bif_stp
); /* prepare to free */
2253 #endif /* BRIDGESTP */
2255 if (filt_attached
) {
2256 iflt_detach(bif
->bif_iff_ref
);
2258 _FREE(bif
, M_DEVBUF
);
2261 ifs
->if_bridge
= NULL
;
2268 * bridge_delete_span:
2270 * Delete the specified span interface.
2273 bridge_delete_span(struct bridge_softc
*sc
, struct bridge_iflist
*bif
)
2275 BRIDGE_LOCK_ASSERT_HELD(sc
);
2277 KASSERT(bif
->bif_ifp
->if_bridge
== NULL
,
2278 ("%s: not a span interface", __func__
));
2280 ifnet_release(bif
->bif_ifp
);
2282 TAILQ_REMOVE(&sc
->sc_spanlist
, bif
, bif_next
);
2283 _FREE(bif
, M_DEVBUF
);
2287 bridge_ioctl_add(struct bridge_softc
*sc
, void *arg
)
2289 struct ifbreq
*req
= arg
;
2290 struct bridge_iflist
*bif
= NULL
;
2291 struct ifnet
*ifs
, *bifp
= sc
->sc_ifp
;
2292 int error
= 0, lladdr_changed
= 0;
2293 uint8_t eaddr
[ETHER_ADDR_LEN
];
2294 struct iff_filter iff
;
2295 u_int32_t event_code
= 0;
2297 ifs
= ifunit(req
->ifbr_ifsname
);
2301 if (ifs
->if_ioctl
== NULL
) { /* must be supported */
2305 if (IFNET_IS_INTCOPROC(ifs
)) {
2309 /* If it's in the span list, it can't be a member. */
2310 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
2311 if (ifs
== bif
->bif_ifp
) {
2315 if (ifs
->if_bridge
== sc
) {
2319 if (ifs
->if_bridge
!= NULL
) {
2323 switch (ifs
->if_type
) {
2326 /* permitted interface types */
2329 /* currently not supported */
2335 bif
= _MALLOC(sizeof(*bif
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
2341 ifnet_reference(ifs
);
2342 bif
->bif_ifflags
= IFBIF_LEARNING
| IFBIF_DISCOVER
;
2344 bif
->bif_savedcaps
= ifs
->if_capenable
;
2345 #endif /* HAS_IF_CAP */
2348 /* Allow the first Ethernet member to define the MTU */
2349 if (TAILQ_EMPTY(&sc
->sc_iflist
)) {
2350 sc
->sc_ifp
->if_mtu
= ifs
->if_mtu
;
2351 } else if (sc
->sc_ifp
->if_mtu
!= ifs
->if_mtu
) {
2352 printf("%s: %s: invalid MTU for %s", __func__
,
2353 sc
->sc_ifp
->if_xname
,
2359 * Assign the interface's MAC address to the bridge if it's the first
2360 * member and the MAC address of the bridge has not been changed from
2361 * the default (randomly) generated one.
2363 if (bridge_inherit_mac
&& TAILQ_EMPTY(&sc
->sc_iflist
) &&
2364 !memcmp(IF_LLADDR(sc
->sc_ifp
), sc
->sc_defaddr
, ETHER_ADDR_LEN
)) {
2365 bcopy(IF_LLADDR(ifs
), eaddr
, ETHER_ADDR_LEN
);
2366 sc
->sc_ifaddr
= ifs
;
2367 ifnet_reference(ifs
); /* for sc_ifaddr */
2371 ifs
->if_bridge
= sc
;
2373 bstp_create(&sc
->sc_stp
, &bif
->bif_stp
, bif
->bif_ifp
);
2374 #endif /* BRIDGESTP */
2377 * XXX: XLOCK HERE!?!
2379 TAILQ_INSERT_TAIL(&sc
->sc_iflist
, bif
, bif_next
);
2382 /* Set interface capabilities to the intersection set of all members */
2383 bridge_mutecaps(sc
);
2384 #endif /* HAS_IF_CAP */
2390 * Place the interface into promiscuous mode.
2392 switch (ifs
->if_type
) {
2395 error
= ifnet_set_promiscuous(ifs
, 1);
2397 /* Ignore error when device is not up */
2398 if (error
!= ENETDOWN
) {
2403 bif
->bif_flags
|= BIFF_PROMISC
;
2412 * The new member may change the link status of the bridge interface
2414 if (interface_media_active(ifs
)) {
2415 bif
->bif_flags
|= BIFF_MEDIA_ACTIVE
;
2417 bif
->bif_flags
&= ~BIFF_MEDIA_ACTIVE
;
2420 event_code
= bridge_updatelinkstatus(sc
);
2423 * Respect lock ordering with DLIL lock for the following operations
2429 * install an interface filter
2431 memset(&iff
, 0, sizeof(struct iff_filter
));
2432 iff
.iff_cookie
= bif
;
2433 iff
.iff_name
= "com.apple.kernel.bsd.net.if_bridge";
2434 iff
.iff_input
= bridge_iff_input
;
2435 iff
.iff_output
= bridge_iff_output
;
2436 iff
.iff_event
= bridge_iff_event
;
2437 iff
.iff_detached
= bridge_iff_detached
;
2438 error
= dlil_attach_filter(ifs
, &iff
, &bif
->bif_iff_ref
,
2439 DLIL_IFF_TSO
| DLIL_IFF_INTERNAL
);
2441 printf("%s: iflt_attach failed %d\n", __func__
, error
);
2446 bif
->bif_flags
|= BIFF_FILTER_ATTACHED
;
2450 * install a dummy "bridge" protocol
2452 if ((error
= bridge_attach_protocol(ifs
)) != 0) {
2454 printf("%s: bridge_attach_protocol failed %d\n",
2461 bif
->bif_flags
|= BIFF_PROTO_ATTACHED
;
2464 if (lladdr_changed
&&
2465 (error
= ifnet_set_lladdr(bifp
, eaddr
, ETHER_ADDR_LEN
)) != 0) {
2466 printf("%s: ifnet_set_lladdr failed %d\n", __func__
, error
);
2469 if (event_code
!= 0) {
2470 bridge_link_event(bifp
, event_code
);
2476 if (error
&& bif
!= NULL
) {
2477 bridge_delete_member(sc
, bif
, 1);
2484 bridge_ioctl_del(struct bridge_softc
*sc
, void *arg
)
2486 struct ifbreq
*req
= arg
;
2487 struct bridge_iflist
*bif
;
2489 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2494 bridge_delete_member(sc
, bif
, 0);
2500 bridge_ioctl_purge(struct bridge_softc
*sc
, void *arg
)
2502 #pragma unused(sc, arg)
2507 bridge_ioctl_gifflags(struct bridge_softc
*sc
, void *arg
)
2509 struct ifbreq
*req
= arg
;
2510 struct bridge_iflist
*bif
;
2512 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2517 struct bstp_port
*bp
;
2520 req
->ifbr_state
= bp
->bp_state
;
2521 req
->ifbr_priority
= bp
->bp_priority
;
2522 req
->ifbr_path_cost
= bp
->bp_path_cost
;
2523 req
->ifbr_proto
= bp
->bp_protover
;
2524 req
->ifbr_role
= bp
->bp_role
;
2525 req
->ifbr_stpflags
= bp
->bp_flags
;
2526 /* Copy STP state options as flags */
2527 if (bp
->bp_operedge
) {
2528 req
->ifbr_ifsflags
|= IFBIF_BSTP_EDGE
;
2530 if (bp
->bp_flags
& BSTP_PORT_AUTOEDGE
) {
2531 req
->ifbr_ifsflags
|= IFBIF_BSTP_AUTOEDGE
;
2533 if (bp
->bp_ptp_link
) {
2534 req
->ifbr_ifsflags
|= IFBIF_BSTP_PTP
;
2536 if (bp
->bp_flags
& BSTP_PORT_AUTOPTP
) {
2537 req
->ifbr_ifsflags
|= IFBIF_BSTP_AUTOPTP
;
2539 if (bp
->bp_flags
& BSTP_PORT_ADMEDGE
) {
2540 req
->ifbr_ifsflags
|= IFBIF_BSTP_ADMEDGE
;
2542 if (bp
->bp_flags
& BSTP_PORT_ADMCOST
) {
2543 req
->ifbr_ifsflags
|= IFBIF_BSTP_ADMCOST
;
2546 req
->ifbr_ifsflags
= bif
->bif_ifflags
;
2547 req
->ifbr_portno
= bif
->bif_ifp
->if_index
& 0xfff;
2548 req
->ifbr_addrcnt
= bif
->bif_addrcnt
;
2549 req
->ifbr_addrmax
= bif
->bif_addrmax
;
2550 req
->ifbr_addrexceeded
= bif
->bif_addrexceeded
;
2556 bridge_ioctl_sifflags(struct bridge_softc
*sc
, void *arg
)
2558 struct ifbreq
*req
= arg
;
2559 struct bridge_iflist
*bif
;
2561 struct bstp_port
*bp
;
2563 #endif /* BRIDGESTP */
2565 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2570 if (req
->ifbr_ifsflags
& IFBIF_SPAN
) {
2571 /* SPAN is readonly */
2577 if (req
->ifbr_ifsflags
& IFBIF_STP
) {
2578 if ((bif
->bif_ifflags
& IFBIF_STP
) == 0) {
2579 error
= bstp_enable(&bif
->bif_stp
);
2585 if ((bif
->bif_ifflags
& IFBIF_STP
) != 0) {
2586 bstp_disable(&bif
->bif_stp
);
2590 /* Pass on STP flags */
2592 bstp_set_edge(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_EDGE
? 1 : 0);
2593 bstp_set_autoedge(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_AUTOEDGE
? 1 : 0);
2594 bstp_set_ptp(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_PTP
? 1 : 0);
2595 bstp_set_autoptp(bp
, req
->ifbr_ifsflags
& IFBIF_BSTP_AUTOPTP
? 1 : 0);
2596 #else /* !BRIDGESTP */
2597 if (req
->ifbr_ifsflags
& IFBIF_STP
) {
2600 #endif /* !BRIDGESTP */
2602 /* Save the bits relating to the bridge */
2603 bif
->bif_ifflags
= req
->ifbr_ifsflags
& IFBIFMASK
;
2610 bridge_ioctl_scache(struct bridge_softc
*sc
, void *arg
)
2612 struct ifbrparam
*param
= arg
;
2614 sc
->sc_brtmax
= param
->ifbrp_csize
;
2620 bridge_ioctl_gcache(struct bridge_softc
*sc
, void *arg
)
2622 struct ifbrparam
*param
= arg
;
2624 param
->ifbrp_csize
= sc
->sc_brtmax
;
2629 #define BRIDGE_IOCTL_GIFS do { \
2630 struct bridge_iflist *bif; \
2631 struct ifbreq breq; \
2632 char *buf, *outbuf; \
2633 unsigned int count, buflen, len; \
2636 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
2638 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
2641 buflen = sizeof (breq) * count; \
2642 if (bifc->ifbic_len == 0) { \
2643 bifc->ifbic_len = buflen; \
2646 BRIDGE_UNLOCK(sc); \
2647 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2652 len = min(bifc->ifbic_len, buflen); \
2653 bzero(&breq, sizeof (breq)); \
2654 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2655 if (len < sizeof (breq)) \
2658 snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
2659 "%s", bif->bif_ifp->if_xname); \
2660 /* Fill in the ifbreq structure */ \
2661 error = bridge_ioctl_gifflags(sc, &breq); \
2664 memcpy(buf, &breq, sizeof (breq)); \
2666 buf += sizeof (breq); \
2667 len -= sizeof (breq); \
2669 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
2670 if (len < sizeof (breq)) \
2673 snprintf(breq.ifbr_ifsname, \
2674 sizeof (breq.ifbr_ifsname), \
2675 "%s", bif->bif_ifp->if_xname); \
2676 breq.ifbr_ifsflags = bif->bif_ifflags; \
2678 = bif->bif_ifp->if_index & 0xfff; \
2679 memcpy(buf, &breq, sizeof (breq)); \
2681 buf += sizeof (breq); \
2682 len -= sizeof (breq); \
2685 BRIDGE_UNLOCK(sc); \
2686 bifc->ifbic_len = sizeof (breq) * count; \
2687 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
2689 _FREE(outbuf, M_TEMP); \
2693 bridge_ioctl_gifs64(struct bridge_softc
*sc
, void *arg
)
2695 struct ifbifconf64
*bifc
= arg
;
2704 bridge_ioctl_gifs32(struct bridge_softc
*sc
, void *arg
)
2706 struct ifbifconf32
*bifc
= arg
;
2714 #define BRIDGE_IOCTL_RTS do { \
2715 struct bridge_rtnode *brt; \
2717 char *outbuf = NULL; \
2718 unsigned int count, buflen, len; \
2719 unsigned long now; \
2721 if (bac->ifbac_len == 0) \
2724 bzero(&bareq, sizeof (bareq)); \
2726 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
2728 buflen = sizeof (bareq) * count; \
2730 BRIDGE_UNLOCK(sc); \
2731 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2736 len = min(bac->ifbac_len, buflen); \
2737 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
2738 if (len < sizeof (bareq)) \
2740 snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
2741 "%s", brt->brt_ifp->if_xname); \
2742 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2743 bareq.ifba_vlan = brt->brt_vlan; \
2744 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
2745 now = (unsigned long) net_uptime(); \
2746 if (now < brt->brt_expire) \
2747 bareq.ifba_expire = \
2748 brt->brt_expire - now; \
2750 bareq.ifba_expire = 0; \
2751 bareq.ifba_flags = brt->brt_flags; \
2753 memcpy(buf, &bareq, sizeof (bareq)); \
2755 buf += sizeof (bareq); \
2756 len -= sizeof (bareq); \
2759 bac->ifbac_len = sizeof (bareq) * count; \
2760 if (outbuf != NULL) { \
2761 BRIDGE_UNLOCK(sc); \
2762 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
2763 _FREE(outbuf, M_TEMP); \
2770 bridge_ioctl_rts64(struct bridge_softc
*sc
, void *arg
)
2772 struct ifbaconf64
*bac
= arg
;
2773 struct ifbareq64 bareq
;
2781 bridge_ioctl_rts32(struct bridge_softc
*sc
, void *arg
)
2783 struct ifbaconf32
*bac
= arg
;
2784 struct ifbareq32 bareq
;
2792 bridge_ioctl_saddr32(struct bridge_softc
*sc
, void *arg
)
2794 struct ifbareq32
*req
= arg
;
2795 struct bridge_iflist
*bif
;
2798 bif
= bridge_lookup_member(sc
, req
->ifba_ifsname
);
2803 error
= bridge_rtupdate(sc
, req
->ifba_dst
, req
->ifba_vlan
, bif
, 1,
2810 bridge_ioctl_saddr64(struct bridge_softc
*sc
, void *arg
)
2812 struct ifbareq64
*req
= arg
;
2813 struct bridge_iflist
*bif
;
2816 bif
= bridge_lookup_member(sc
, req
->ifba_ifsname
);
2821 error
= bridge_rtupdate(sc
, req
->ifba_dst
, req
->ifba_vlan
, bif
, 1,
2828 bridge_ioctl_sto(struct bridge_softc
*sc
, void *arg
)
2830 struct ifbrparam
*param
= arg
;
2832 sc
->sc_brttimeout
= param
->ifbrp_ctime
;
2837 bridge_ioctl_gto(struct bridge_softc
*sc
, void *arg
)
2839 struct ifbrparam
*param
= arg
;
2841 param
->ifbrp_ctime
= sc
->sc_brttimeout
;
2846 bridge_ioctl_daddr32(struct bridge_softc
*sc
, void *arg
)
2848 struct ifbareq32
*req
= arg
;
2850 return bridge_rtdaddr(sc
, req
->ifba_dst
, req
->ifba_vlan
);
2854 bridge_ioctl_daddr64(struct bridge_softc
*sc
, void *arg
)
2856 struct ifbareq64
*req
= arg
;
2858 return bridge_rtdaddr(sc
, req
->ifba_dst
, req
->ifba_vlan
);
2862 bridge_ioctl_flush(struct bridge_softc
*sc
, void *arg
)
2864 struct ifbreq
*req
= arg
;
2866 bridge_rtflush(sc
, req
->ifbr_ifsflags
);
2871 bridge_ioctl_gpri(struct bridge_softc
*sc
, void *arg
)
2873 struct ifbrparam
*param
= arg
;
2874 struct bstp_state
*bs
= &sc
->sc_stp
;
2876 param
->ifbrp_prio
= bs
->bs_bridge_priority
;
2881 bridge_ioctl_spri(struct bridge_softc
*sc
, void *arg
)
2884 struct ifbrparam
*param
= arg
;
2886 return bstp_set_priority(&sc
->sc_stp
, param
->ifbrp_prio
);
2887 #else /* !BRIDGESTP */
2888 #pragma unused(sc, arg)
2890 #endif /* !BRIDGESTP */
2894 bridge_ioctl_ght(struct bridge_softc
*sc
, void *arg
)
2896 struct ifbrparam
*param
= arg
;
2897 struct bstp_state
*bs
= &sc
->sc_stp
;
2899 param
->ifbrp_hellotime
= bs
->bs_bridge_htime
>> 8;
2904 bridge_ioctl_sht(struct bridge_softc
*sc
, void *arg
)
2907 struct ifbrparam
*param
= arg
;
2909 return bstp_set_htime(&sc
->sc_stp
, param
->ifbrp_hellotime
);
2910 #else /* !BRIDGESTP */
2911 #pragma unused(sc, arg)
2913 #endif /* !BRIDGESTP */
2917 bridge_ioctl_gfd(struct bridge_softc
*sc
, void *arg
)
2919 struct ifbrparam
*param
;
2920 struct bstp_state
*bs
;
2924 param
->ifbrp_fwddelay
= bs
->bs_bridge_fdelay
>> 8;
2929 bridge_ioctl_sfd(struct bridge_softc
*sc
, void *arg
)
2932 struct ifbrparam
*param
= arg
;
2934 return bstp_set_fdelay(&sc
->sc_stp
, param
->ifbrp_fwddelay
);
2935 #else /* !BRIDGESTP */
2936 #pragma unused(sc, arg)
2938 #endif /* !BRIDGESTP */
2942 bridge_ioctl_gma(struct bridge_softc
*sc
, void *arg
)
2944 struct ifbrparam
*param
;
2945 struct bstp_state
*bs
;
2949 param
->ifbrp_maxage
= bs
->bs_bridge_max_age
>> 8;
2954 bridge_ioctl_sma(struct bridge_softc
*sc
, void *arg
)
2957 struct ifbrparam
*param
= arg
;
2959 return bstp_set_maxage(&sc
->sc_stp
, param
->ifbrp_maxage
);
2960 #else /* !BRIDGESTP */
2961 #pragma unused(sc, arg)
2963 #endif /* !BRIDGESTP */
2967 bridge_ioctl_sifprio(struct bridge_softc
*sc
, void *arg
)
2970 struct ifbreq
*req
= arg
;
2971 struct bridge_iflist
*bif
;
2973 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2978 return bstp_set_port_priority(&bif
->bif_stp
, req
->ifbr_priority
);
2979 #else /* !BRIDGESTP */
2980 #pragma unused(sc, arg)
2982 #endif /* !BRIDGESTP */
2986 bridge_ioctl_sifcost(struct bridge_softc
*sc
, void *arg
)
2989 struct ifbreq
*req
= arg
;
2990 struct bridge_iflist
*bif
;
2992 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
2997 return bstp_set_path_cost(&bif
->bif_stp
, req
->ifbr_path_cost
);
2998 #else /* !BRIDGESTP */
2999 #pragma unused(sc, arg)
3001 #endif /* !BRIDGESTP */
3005 bridge_ioctl_gfilt(struct bridge_softc
*sc
, void *arg
)
3007 struct ifbrparam
*param
= arg
;
3009 param
->ifbrp_filter
= sc
->sc_filter_flags
;
3015 bridge_ioctl_sfilt(struct bridge_softc
*sc
, void *arg
)
3017 struct ifbrparam
*param
= arg
;
3019 if (param
->ifbrp_filter
& ~IFBF_FILT_MASK
) {
3024 if (param
->ifbrp_filter
& IFBF_FILT_USEIPF
) {
3029 sc
->sc_filter_flags
= param
->ifbrp_filter
;
3035 bridge_ioctl_sifmaxaddr(struct bridge_softc
*sc
, void *arg
)
3037 struct ifbreq
*req
= arg
;
3038 struct bridge_iflist
*bif
;
3040 bif
= bridge_lookup_member(sc
, req
->ifbr_ifsname
);
3045 bif
->bif_addrmax
= req
->ifbr_addrmax
;
3050 bridge_ioctl_addspan(struct bridge_softc
*sc
, void *arg
)
3052 struct ifbreq
*req
= arg
;
3053 struct bridge_iflist
*bif
= NULL
;
3056 ifs
= ifunit(req
->ifbr_ifsname
);
3061 if (IFNET_IS_INTCOPROC(ifs
)) {
3065 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3066 if (ifs
== bif
->bif_ifp
) {
3070 if (ifs
->if_bridge
!= NULL
) {
3074 switch (ifs
->if_type
) {
3079 /* currently not supported */
3085 bif
= _MALLOC(sizeof(*bif
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
3091 bif
->bif_ifflags
= IFBIF_SPAN
;
3093 ifnet_reference(bif
->bif_ifp
);
3095 TAILQ_INSERT_HEAD(&sc
->sc_spanlist
, bif
, bif_next
);
3101 bridge_ioctl_delspan(struct bridge_softc
*sc
, void *arg
)
3103 struct ifbreq
*req
= arg
;
3104 struct bridge_iflist
*bif
;
3107 ifs
= ifunit(req
->ifbr_ifsname
);
3112 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3113 if (ifs
== bif
->bif_ifp
) {
3121 bridge_delete_span(sc
, bif
);
3126 #define BRIDGE_IOCTL_GBPARAM do { \
3127 struct bstp_state *bs = &sc->sc_stp; \
3128 struct bstp_port *root_port; \
3130 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
3131 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
3132 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
3134 root_port = bs->bs_root_port; \
3135 if (root_port == NULL) \
3136 req->ifbop_root_port = 0; \
3138 req->ifbop_root_port = root_port->bp_ifp->if_index; \
3140 req->ifbop_holdcount = bs->bs_txholdcount; \
3141 req->ifbop_priority = bs->bs_bridge_priority; \
3142 req->ifbop_protocol = bs->bs_protover; \
3143 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
3144 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
3145 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
3146 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
3147 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
3148 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
3152 bridge_ioctl_gbparam32(struct bridge_softc
*sc
, void *arg
)
3154 struct ifbropreq32
*req
= arg
;
3156 BRIDGE_IOCTL_GBPARAM
;
3161 bridge_ioctl_gbparam64(struct bridge_softc
*sc
, void *arg
)
3163 struct ifbropreq64
*req
= arg
;
3165 BRIDGE_IOCTL_GBPARAM
;
3170 bridge_ioctl_grte(struct bridge_softc
*sc
, void *arg
)
3172 struct ifbrparam
*param
= arg
;
3174 param
->ifbrp_cexceeded
= sc
->sc_brtexceeded
;
3178 #define BRIDGE_IOCTL_GIFSSTP do { \
3179 struct bridge_iflist *bif; \
3180 struct bstp_port *bp; \
3181 struct ifbpstpreq bpreq; \
3182 char *buf, *outbuf; \
3183 unsigned int count, buflen, len; \
3186 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3187 if ((bif->bif_ifflags & IFBIF_STP) != 0) \
3191 buflen = sizeof (bpreq) * count; \
3192 if (bifstp->ifbpstp_len == 0) { \
3193 bifstp->ifbpstp_len = buflen; \
3197 BRIDGE_UNLOCK(sc); \
3198 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
3203 len = min(bifstp->ifbpstp_len, buflen); \
3204 bzero(&bpreq, sizeof (bpreq)); \
3205 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3206 if (len < sizeof (bpreq)) \
3209 if ((bif->bif_ifflags & IFBIF_STP) == 0) \
3212 bp = &bif->bif_stp; \
3213 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
3214 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
3215 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
3216 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
3217 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
3218 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
3220 memcpy(buf, &bpreq, sizeof (bpreq)); \
3222 buf += sizeof (bpreq); \
3223 len -= sizeof (bpreq); \
3226 BRIDGE_UNLOCK(sc); \
3227 bifstp->ifbpstp_len = sizeof (bpreq) * count; \
3228 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
3230 _FREE(outbuf, M_TEMP); \
3235 bridge_ioctl_gifsstp32(struct bridge_softc
*sc
, void *arg
)
3237 struct ifbpstpconf32
*bifstp
= arg
;
3240 BRIDGE_IOCTL_GIFSSTP
;
3245 bridge_ioctl_gifsstp64(struct bridge_softc
*sc
, void *arg
)
3247 struct ifbpstpconf64
*bifstp
= arg
;
3250 BRIDGE_IOCTL_GIFSSTP
;
3255 bridge_ioctl_sproto(struct bridge_softc
*sc
, void *arg
)
3258 struct ifbrparam
*param
= arg
;
3260 return bstp_set_protocol(&sc
->sc_stp
, param
->ifbrp_proto
);
3261 #else /* !BRIDGESTP */
3262 #pragma unused(sc, arg)
3264 #endif /* !BRIDGESTP */
3268 bridge_ioctl_stxhc(struct bridge_softc
*sc
, void *arg
)
3271 struct ifbrparam
*param
= arg
;
3273 return bstp_set_holdcount(&sc
->sc_stp
, param
->ifbrp_txhc
);
3274 #else /* !BRIDGESTP */
3275 #pragma unused(sc, arg)
3277 #endif /* !BRIDGESTP */
3282 bridge_ioctl_ghostfilter(struct bridge_softc
*sc
, void *arg
)
3284 struct ifbrhostfilter
*req
= arg
;
3285 struct bridge_iflist
*bif
;
3287 bif
= bridge_lookup_member(sc
, req
->ifbrhf_ifsname
);
3292 bzero(req
, sizeof(struct ifbrhostfilter
));
3293 if (bif
->bif_flags
& BIFF_HOST_FILTER
) {
3294 req
->ifbrhf_flags
|= IFBRHF_ENABLED
;
3295 bcopy(bif
->bif_hf_hwsrc
, req
->ifbrhf_hwsrca
,
3297 req
->ifbrhf_ipsrc
= bif
->bif_hf_ipsrc
.s_addr
;
3303 bridge_ioctl_shostfilter(struct bridge_softc
*sc
, void *arg
)
3305 struct ifbrhostfilter
*req
= arg
;
3306 struct bridge_iflist
*bif
;
3308 bif
= bridge_lookup_member(sc
, req
->ifbrhf_ifsname
);
3313 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_vmnet_total
);
3315 if (req
->ifbrhf_flags
& IFBRHF_ENABLED
) {
3316 bif
->bif_flags
|= BIFF_HOST_FILTER
;
3318 if (req
->ifbrhf_flags
& IFBRHF_HWSRC
) {
3319 bcopy(req
->ifbrhf_hwsrca
, bif
->bif_hf_hwsrc
,
3321 if (bcmp(req
->ifbrhf_hwsrca
, ethernulladdr
,
3322 ETHER_ADDR_LEN
) != 0) {
3323 bif
->bif_flags
|= BIFF_HF_HWSRC
;
3325 bif
->bif_flags
&= ~BIFF_HF_HWSRC
;
3328 if (req
->ifbrhf_flags
& IFBRHF_IPSRC
) {
3329 bif
->bif_hf_ipsrc
.s_addr
= req
->ifbrhf_ipsrc
;
3330 if (bif
->bif_hf_ipsrc
.s_addr
!= INADDR_ANY
) {
3331 bif
->bif_flags
|= BIFF_HF_IPSRC
;
3333 bif
->bif_flags
&= ~BIFF_HF_IPSRC
;
3337 bif
->bif_flags
&= ~(BIFF_HOST_FILTER
| BIFF_HF_HWSRC
|
3339 bzero(bif
->bif_hf_hwsrc
, ETHER_ADDR_LEN
);
3340 bif
->bif_hf_ipsrc
.s_addr
= INADDR_ANY
;
3350 * Detach an interface from a bridge. Called when a member
3351 * interface is detaching.
3354 bridge_ifdetach(struct ifnet
*ifp
)
3356 struct bridge_iflist
*bif
;
3357 struct bridge_softc
*sc
= ifp
->if_bridge
;
3360 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
3361 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3363 #endif /* BRIDGE_DEBUG */
3365 /* Check if the interface is a bridge member */
3368 bif
= bridge_lookup_member_if(sc
, ifp
);
3370 bridge_delete_member(sc
, bif
, 1);
3375 /* Check if the interface is a span port */
3376 lck_mtx_lock(&bridge_list_mtx
);
3377 LIST_FOREACH(sc
, &bridge_list
, sc_list
) {
3379 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
)
3380 if (ifp
== bif
->bif_ifp
) {
3381 bridge_delete_span(sc
, bif
);
3386 lck_mtx_unlock(&bridge_list_mtx
);
3390 * bridge_proto_attach_changed
3392 * Called when protocol attachment on the interface changes.
3395 bridge_proto_attach_changed(struct ifnet
*ifp
)
3397 boolean_t changed
= FALSE
;
3398 struct bridge_iflist
*bif
;
3399 boolean_t input_broadcast
;
3400 struct bridge_softc
*sc
= ifp
->if_bridge
;
3403 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
3404 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3406 #endif /* BRIDGE_DEBUG */
3411 * Selectively enable input broadcast only when necessary.
3412 * The bridge interface itself attaches a fake protocol
3413 * so checking for at least two protocols means that the
3414 * interface is being used for something besides bridging.
3416 input_broadcast
= if_get_protolist(ifp
, NULL
, 0) >= 2;
3418 bif
= bridge_lookup_member_if(sc
, ifp
);
3420 if (input_broadcast
) {
3421 if ((bif
->bif_flags
& BIFF_INPUT_BROADCAST
) == 0) {
3422 bif
->bif_flags
|= BIFF_INPUT_BROADCAST
;
3425 } else if ((bif
->bif_flags
& BIFF_INPUT_BROADCAST
) != 0) {
3427 bif
->bif_flags
&= ~BIFF_INPUT_BROADCAST
;
3432 if ((if_bridge_debug
& BR_DBGF_LIFECYCLE
) != 0 && changed
) {
3433 printf("%s: input broadcast %s", ifp
->if_xname
,
3434 input_broadcast
? "ENABLED" : "DISABLED");
3436 #endif /* BRIDGE_DEBUG */
3441 * interface_media_active:
3443 * Tells if an interface media is active.
3446 interface_media_active(struct ifnet
*ifp
)
3448 struct ifmediareq ifmr
;
3451 bzero(&ifmr
, sizeof(ifmr
));
3452 if (ifnet_ioctl(ifp
, 0, SIOCGIFMEDIA
, &ifmr
) == 0) {
3453 if ((ifmr
.ifm_status
& IFM_AVALID
) && ifmr
.ifm_count
> 0) {
3454 status
= ifmr
.ifm_status
& IFM_ACTIVE
? 1 : 0;
3462 * bridge_updatelinkstatus:
3464 * Update the media active status of the bridge based on the
3465 * media active status of its member.
3466 * If changed, return the corresponding onf/off link event.
3469 bridge_updatelinkstatus(struct bridge_softc
*sc
)
3471 struct bridge_iflist
*bif
;
3472 int active_member
= 0;
3473 u_int32_t event_code
= 0;
3475 BRIDGE_LOCK_ASSERT_HELD(sc
);
3478 * Find out if we have an active interface
3480 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
3481 if (bif
->bif_flags
& BIFF_MEDIA_ACTIVE
) {
3487 if (active_member
&& !(sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
3488 sc
->sc_flags
|= SCF_MEDIA_ACTIVE
;
3489 event_code
= KEV_DL_LINK_ON
;
3490 } else if (!active_member
&& (sc
->sc_flags
& SCF_MEDIA_ACTIVE
)) {
3491 sc
->sc_flags
&= ~SCF_MEDIA_ACTIVE
;
3492 event_code
= KEV_DL_LINK_OFF
;
3499 * bridge_iflinkevent:
3502 bridge_iflinkevent(struct ifnet
*ifp
)
3504 struct bridge_softc
*sc
= ifp
->if_bridge
;
3505 struct bridge_iflist
*bif
;
3506 u_int32_t event_code
= 0;
3509 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
3510 printf("%s: %s\n", __func__
, ifp
->if_xname
);
3512 #endif /* BRIDGE_DEBUG */
3514 /* Check if the interface is a bridge member */
3520 bif
= bridge_lookup_member_if(sc
, ifp
);
3522 if (interface_media_active(ifp
)) {
3523 bif
->bif_flags
|= BIFF_MEDIA_ACTIVE
;
3525 bif
->bif_flags
&= ~BIFF_MEDIA_ACTIVE
;
3528 event_code
= bridge_updatelinkstatus(sc
);
3532 if (event_code
!= 0) {
3533 bridge_link_event(sc
->sc_ifp
, event_code
);
3538 * bridge_delayed_callback:
3540 * Makes a delayed call
3543 bridge_delayed_callback(void *param
)
3545 struct bridge_delayed_call
*call
= (struct bridge_delayed_call
*)param
;
3546 struct bridge_softc
*sc
= call
->bdc_sc
;
3548 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3549 if (bridge_delayed_callback_delay
> 0) {
3552 ts
.tv_sec
= bridge_delayed_callback_delay
;
3555 printf("%s: sleeping for %d seconds\n",
3556 __func__
, bridge_delayed_callback_delay
);
3558 msleep(&bridge_delayed_callback_delay
, NULL
, PZERO
,
3561 printf("%s: awoken\n", __func__
);
3563 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3567 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3568 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
) {
3569 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3570 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3573 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3575 if (call
->bdc_flags
& BDCF_CANCELLING
) {
3578 if ((sc
->sc_flags
& SCF_DETACHING
) == 0) {
3579 (*call
->bdc_func
)(sc
);
3582 call
->bdc_flags
&= ~BDCF_OUTSTANDING
;
3587 * bridge_schedule_delayed_call:
3589 * Schedule a function to be called on a separate thread
3590 * The actual call may be scheduled to run at a given time or ASAP.
3593 bridge_schedule_delayed_call(struct bridge_delayed_call
*call
)
3595 uint64_t deadline
= 0;
3596 struct bridge_softc
*sc
= call
->bdc_sc
;
3598 BRIDGE_LOCK_ASSERT_HELD(sc
);
3600 if ((sc
->sc_flags
& SCF_DETACHING
) ||
3601 (call
->bdc_flags
& (BDCF_OUTSTANDING
| BDCF_CANCELLING
))) {
3605 if (call
->bdc_ts
.tv_sec
|| call
->bdc_ts
.tv_nsec
) {
3606 nanoseconds_to_absolutetime(
3607 (uint64_t)call
->bdc_ts
.tv_sec
* NSEC_PER_SEC
+
3608 call
->bdc_ts
.tv_nsec
, &deadline
);
3609 clock_absolutetime_interval_to_deadline(deadline
, &deadline
);
3612 call
->bdc_flags
= BDCF_OUTSTANDING
;
3614 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3615 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
) {
3616 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3617 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3620 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3622 if (call
->bdc_ts
.tv_sec
|| call
->bdc_ts
.tv_nsec
) {
3623 thread_call_func_delayed(
3624 (thread_call_func_t
)bridge_delayed_callback
,
3627 if (call
->bdc_thread_call
== NULL
) {
3628 call
->bdc_thread_call
= thread_call_allocate(
3629 (thread_call_func_t
)bridge_delayed_callback
,
3632 thread_call_enter(call
->bdc_thread_call
);
3637 * bridge_cancel_delayed_call:
3639 * Cancel a queued or running delayed call.
3640 * If call is running, does not return until the call is done to
3641 * prevent race condition with the brigde interface getting destroyed
3644 bridge_cancel_delayed_call(struct bridge_delayed_call
*call
)
3647 struct bridge_softc
*sc
= call
->bdc_sc
;
3650 * The call was never scheduled
3656 BRIDGE_LOCK_ASSERT_HELD(sc
);
3658 call
->bdc_flags
|= BDCF_CANCELLING
;
3660 while (call
->bdc_flags
& BDCF_OUTSTANDING
) {
3662 if (if_bridge_debug
& BR_DBGF_DELAYED_CALL
) {
3663 printf("%s: %s call 0x%llx flags 0x%x\n", __func__
,
3664 sc
->sc_if_xname
, (uint64_t)VM_KERNEL_ADDRPERM(call
),
3667 #endif /* BRIDGE_DEBUG */
3668 result
= thread_call_func_cancel(
3669 (thread_call_func_t
)bridge_delayed_callback
, call
, FALSE
);
3673 * We managed to dequeue the delayed call
3675 call
->bdc_flags
&= ~BDCF_OUTSTANDING
;
3678 * Wait for delayed call do be done running
3680 msleep(call
, &sc
->sc_mtx
, PZERO
, __func__
, NULL
);
3683 call
->bdc_flags
&= ~BDCF_CANCELLING
;
3687 * bridge_cleanup_delayed_call:
3689 * Dispose resource allocated for a delayed call
3690 * Assume the delayed call is not queued or running .
3693 bridge_cleanup_delayed_call(struct bridge_delayed_call
*call
)
3696 struct bridge_softc
*sc
= call
->bdc_sc
;
3699 * The call was never scheduled
3705 BRIDGE_LOCK_ASSERT_HELD(sc
);
3707 VERIFY((call
->bdc_flags
& BDCF_OUTSTANDING
) == 0);
3708 VERIFY((call
->bdc_flags
& BDCF_CANCELLING
) == 0);
3710 if (call
->bdc_thread_call
!= NULL
) {
3711 result
= thread_call_free(call
->bdc_thread_call
);
3712 if (result
== FALSE
) {
3713 panic("%s thread_call_free() failed for call %p",
3716 call
->bdc_thread_call
= NULL
;
3723 * Initialize a bridge interface.
3726 bridge_init(struct ifnet
*ifp
)
3728 struct bridge_softc
*sc
= (struct bridge_softc
*)ifp
->if_softc
;
3731 BRIDGE_LOCK_ASSERT_HELD(sc
);
3733 if ((ifnet_flags(ifp
) & IFF_RUNNING
)) {
3737 error
= ifnet_set_flags(ifp
, IFF_RUNNING
, IFF_RUNNING
);
3740 * Calling bridge_aging_timer() is OK as there are no entries to
3741 * age so we're just going to arm the timer
3743 bridge_aging_timer(sc
);
3746 bstp_init(&sc
->sc_stp
); /* Initialize Spanning Tree */
3748 #endif /* BRIDGESTP */
3755 * Stop the bridge interface.
3758 bridge_ifstop(struct ifnet
*ifp
, int disable
)
3760 #pragma unused(disable)
3761 struct bridge_softc
*sc
= ifp
->if_softc
;
3763 BRIDGE_LOCK_ASSERT_HELD(sc
);
3765 if ((ifnet_flags(ifp
) & IFF_RUNNING
) == 0) {
3769 bridge_cancel_delayed_call(&sc
->sc_aging_timer
);
3772 bstp_stop(&sc
->sc_stp
);
3773 #endif /* BRIDGESTP */
3775 bridge_rtflush(sc
, IFBF_FLUSHDYN
);
3776 (void) ifnet_set_flags(ifp
, 0, IFF_RUNNING
);
3780 * bridge_compute_cksum:
3782 * If the packet has checksum flags, compare the hardware checksum
3783 * capabilities of the source and destination interfaces. If they
3784 * are the same, there's nothing to do. If they are different,
3785 * finalize the checksum so that it can be sent on the destination
3789 bridge_compute_cksum(struct ifnet
*src_if
, struct ifnet
*dst_if
, struct mbuf
*m
)
3791 uint32_t csum_flags
;
3792 uint16_t dst_hw_csum
;
3794 struct ether_header
*eh
;
3795 uint16_t src_hw_csum
;
3797 csum_flags
= m
->m_pkthdr
.csum_flags
& IF_HWASSIST_CSUM_MASK
;
3798 if (csum_flags
== 0) {
3799 /* no checksum offload */
3804 * if destination/source differ in checksum offload
3805 * capabilities, finalize/compute the checksum
3807 dst_hw_csum
= IF_HWASSIST_CSUM_FLAGS(dst_if
->if_hwassist
);
3808 src_hw_csum
= IF_HWASSIST_CSUM_FLAGS(src_if
->if_hwassist
);
3809 if (dst_hw_csum
== src_hw_csum
) {
3812 eh
= mtod(m
, struct ether_header
*);
3813 switch (ntohs(eh
->ether_type
)) {
3815 did_sw
= in_finalize_cksum(m
, sizeof(*eh
), csum_flags
);
3818 case ETHERTYPE_IPV6
:
3819 did_sw
= in6_finalize_cksum(m
, sizeof(*eh
), -1, -1, csum_flags
);
3824 if (if_bridge_debug
& BR_DBGF_CHECKSUM
) {
3825 printf("%s: [%s -> %s] before 0x%x did 0x%x after 0x%x\n",
3827 src_if
->if_xname
, dst_if
->if_xname
, csum_flags
, did_sw
,
3828 m
->m_pkthdr
.csum_flags
);
3830 #endif /* BRIDGE_DEBUG */
3836 * Enqueue a packet on a bridge member interface.
3840 bridge_enqueue(struct bridge_softc
*sc
, struct ifnet
*src_ifp
,
3841 struct ifnet
*dst_ifp
, struct mbuf
*m
, ChecksumOperation cksum_op
)
3844 struct mbuf
*next_m
;
3846 VERIFY(dst_ifp
!= NULL
);
3849 * We may be sending a fragment so traverse the mbuf
3851 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
3853 for (; m
; m
= next_m
) {
3855 struct flowadv adv
= { .code
= FADV_SUCCESS
};
3857 next_m
= m
->m_nextpkt
;
3858 m
->m_nextpkt
= NULL
;
3860 len
= m
->m_pkthdr
.len
;
3861 m
->m_flags
|= M_PROTO1
; /* set to avoid loops */
3864 case kChecksumOperationClear
:
3865 m
->m_pkthdr
.csum_flags
= 0;
3867 case kChecksumOperationFinalize
:
3868 /* the checksum might not be correct, finalize now */
3869 bridge_finalize_cksum(dst_ifp
, m
);
3871 case kChecksumOperationCompute
:
3872 bridge_compute_cksum(src_ifp
, dst_ifp
, m
);
3879 * If underlying interface can not do VLAN tag insertion itself
3880 * then attach a packet tag that holds it.
3882 if ((m
->m_flags
& M_VLANTAG
) &&
3883 (dst_ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) == 0) {
3884 m
= ether_vlanencap(m
, m
->m_pkthdr
.ether_vtag
);
3886 printf("%s: %s: unable to prepend VLAN "
3887 "header\n", __func__
, dst_ifp
->if_xname
);
3888 (void) ifnet_stat_increment_out(dst_ifp
,
3892 m
->m_flags
&= ~M_VLANTAG
;
3894 #endif /* HAS_IF_CAP */
3896 _error
= dlil_output(dst_ifp
, 0, m
, NULL
, NULL
, 1, &adv
);
3898 /* Preserve existing error value */
3902 } else if (adv
.code
== FADV_FLOW_CONTROLLED
) {
3904 } else if (adv
.code
== FADV_SUSPENDED
) {
3905 error
= EQSUSPENDED
;
3910 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 1, len
, 0);
3912 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 0, 0, 1);
3919 #if HAS_BRIDGE_DUMMYNET
3923 * Receive a queued packet from dummynet and pass it on to the output
3926 * The mbuf has the Ethernet header already attached.
3929 bridge_dummynet(struct mbuf
*m
, struct ifnet
*ifp
)
3931 struct bridge_softc
*sc
;
3933 sc
= ifp
->if_bridge
;
3936 * The packet didn't originate from a member interface. This should only
3937 * ever happen if a member interface is removed while packets are
3945 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
3946 if (bridge_pfil(&m
, sc
->sc_ifp
, ifp
, PFIL_OUT
) != 0) {
3954 (void) bridge_enqueue(sc
, NULL
, ifp
, m
, kChecksumOperationNone
);
3956 #endif /* HAS_BRIDGE_DUMMYNET */
3959 * bridge_member_output:
3961 * Send output from a bridge member interface. This
3962 * performs the bridging function for locally originated
3965 * The mbuf has the Ethernet header already attached.
3968 bridge_member_output(struct bridge_softc
*sc
, ifnet_t ifp
, mbuf_t m
)
3970 struct ether_header
*eh
;
3971 struct ifnet
*dst_if
;
3975 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
3976 printf("%s: ifp %s\n", __func__
, ifp
->if_xname
);
3978 #endif /* BRIDGE_DEBUG */
3980 if (m
->m_len
< ETHER_HDR_LEN
) {
3981 m
= m_pullup(m
, ETHER_HDR_LEN
);
3987 eh
= mtod(m
, struct ether_header
*);
3988 vlan
= VLANTAGOF(m
);
3993 * APPLE MODIFICATION
3994 * If the packet is an 802.1X ethertype, then only send on the
3995 * original output interface.
3997 if (eh
->ether_type
== htons(ETHERTYPE_PAE
)) {
4003 * If bridge is down, but the original output interface is up,
4004 * go ahead and send out that interface. Otherwise, the packet
4007 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) == 0) {
4013 * If the packet is a multicast, or we don't know a better way to
4014 * get there, send to all interfaces.
4016 if (ETHER_IS_MULTICAST(eh
->ether_dhost
)) {
4019 dst_if
= bridge_rtlookup(sc
, eh
->ether_dhost
, vlan
);
4021 if (dst_if
== NULL
) {
4022 struct bridge_iflist
*bif
;
4030 BRIDGE_LOCK2REF(sc
, error
);
4036 TAILQ_FOREACH(bif
, &sc
->sc_iflist
, bif_next
) {
4037 /* skip interface with inactive link status */
4038 if ((bif
->bif_flags
& BIFF_MEDIA_ACTIVE
) == 0) {
4041 dst_if
= bif
->bif_ifp
;
4043 if (dst_if
->if_type
== IFT_GIF
) {
4046 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4051 * If this is not the original output interface,
4052 * and the interface is participating in spanning
4053 * tree, make sure the port is in a state that
4054 * allows forwarding.
4056 if (dst_if
!= ifp
&& (bif
->bif_ifflags
& IFBIF_STP
) &&
4057 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4060 if (TAILQ_NEXT(bif
, bif_next
) == NULL
) {
4064 mc
= m_dup(m
, M_DONTWAIT
);
4066 (void) ifnet_stat_increment_out(
4067 sc
->sc_ifp
, 0, 0, 1);
4071 (void) bridge_enqueue(sc
, ifp
, dst_if
, mc
,
4072 kChecksumOperationCompute
);
4083 * XXX Spanning tree consideration here?
4087 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4094 if (dst_if
== ifp
) {
4095 /* just let the packet continue on its way */
4098 (void) bridge_enqueue(sc
, ifp
, dst_if
, m
,
4099 kChecksumOperationCompute
);
4106 * This routine is called externally from above only when if_bridge_txstart
4107 * is disabled; otherwise it is called internally by bridge_start().
4110 bridge_output(struct ifnet
*ifp
, struct mbuf
*m
)
4112 struct bridge_softc
*sc
= ifnet_softc(ifp
);
4113 struct ether_header
*eh
;
4114 struct ifnet
*dst_if
;
4117 eh
= mtod(m
, struct ether_header
*);
4122 if (!(m
->m_flags
& (M_BCAST
| M_MCAST
))) {
4123 dst_if
= bridge_rtlookup(sc
, eh
->ether_dhost
, 0);
4126 (void) ifnet_stat_increment_out(ifp
, 1, m
->m_pkthdr
.len
, 0);
4129 if (sc
->sc_bpf_output
) {
4130 bridge_bpf_output(ifp
, m
);
4134 if (dst_if
== NULL
) {
4135 /* callee will unlock */
4136 bridge_broadcast(sc
, NULL
, m
, 0);
4139 error
= bridge_enqueue(sc
, NULL
, dst_if
, m
,
4140 kChecksumOperationFinalize
);
4147 bridge_finalize_cksum(struct ifnet
*ifp
, struct mbuf
*m
)
4149 struct ether_header
*eh
= mtod(m
, struct ether_header
*);
4150 uint32_t sw_csum
, hwcap
;
4154 hwcap
= (ifp
->if_hwassist
| CSUM_DATA_VALID
);
4159 /* do in software what the hardware cannot */
4160 sw_csum
= m
->m_pkthdr
.csum_flags
& ~IF_HWASSIST_CSUM_FLAGS(hwcap
);
4161 sw_csum
&= IF_HWASSIST_CSUM_MASK
;
4163 switch (ntohs(eh
->ether_type
)) {
4165 if ((hwcap
& CSUM_PARTIAL
) && !(sw_csum
& CSUM_DELAY_DATA
) &&
4166 (m
->m_pkthdr
.csum_flags
& CSUM_DELAY_DATA
)) {
4167 if (m
->m_pkthdr
.csum_flags
& CSUM_TCP
) {
4169 sizeof(*eh
) + sizeof(struct ip
);
4171 m
->m_pkthdr
.csum_data
& 0xffff;
4172 m
->m_pkthdr
.csum_flags
|=
4173 (CSUM_DATA_VALID
| CSUM_PARTIAL
);
4174 m
->m_pkthdr
.csum_tx_stuff
= (ulpoff
+ start
);
4175 m
->m_pkthdr
.csum_tx_start
= start
;
4177 sw_csum
|= (CSUM_DELAY_DATA
&
4178 m
->m_pkthdr
.csum_flags
);
4181 (void) in_finalize_cksum(m
, sizeof(*eh
), sw_csum
);
4185 case ETHERTYPE_IPV6
:
4186 if ((hwcap
& CSUM_PARTIAL
) &&
4187 !(sw_csum
& CSUM_DELAY_IPV6_DATA
) &&
4188 (m
->m_pkthdr
.csum_flags
& CSUM_DELAY_IPV6_DATA
)) {
4189 if (m
->m_pkthdr
.csum_flags
& CSUM_TCPIPV6
) {
4191 sizeof(*eh
) + sizeof(struct ip6_hdr
);
4193 m
->m_pkthdr
.csum_data
& 0xffff;
4194 m
->m_pkthdr
.csum_flags
|=
4195 (CSUM_DATA_VALID
| CSUM_PARTIAL
);
4196 m
->m_pkthdr
.csum_tx_stuff
= (ulpoff
+ start
);
4197 m
->m_pkthdr
.csum_tx_start
= start
;
4199 sw_csum
|= (CSUM_DELAY_IPV6_DATA
&
4200 m
->m_pkthdr
.csum_flags
);
4203 (void) in6_finalize_cksum(m
, sizeof(*eh
), -1, -1, sw_csum
);
4212 * Start output on a bridge.
4214 * This routine is invoked by the start worker thread; because we never call
4215 * it directly, there is no need do deploy any serialization mechanism other
4216 * than what's already used by the worker thread, i.e. this is already single
4219 * This routine is called only when if_bridge_txstart is enabled.
4222 bridge_start(struct ifnet
*ifp
)
4227 if (ifnet_dequeue(ifp
, &m
) != 0) {
4231 (void) bridge_output(ifp
, m
);
4238 * The forwarding function of the bridge.
4240 * NOTE: Releases the lock on return.
4243 bridge_forward(struct bridge_softc
*sc
, struct bridge_iflist
*sbif
,
4246 struct bridge_iflist
*dbif
;
4247 struct ifnet
*src_if
, *dst_if
, *ifp
;
4248 struct ether_header
*eh
;
4253 BRIDGE_LOCK_ASSERT_HELD(sc
);
4256 if (if_bridge_debug
& BR_DBGF_OUTPUT
) {
4257 printf("%s: %s m 0x%llx\n", __func__
, sc
->sc_ifp
->if_xname
,
4258 (uint64_t)VM_KERNEL_ADDRPERM(m
));
4260 #endif /* BRIDGE_DEBUG */
4262 src_if
= m
->m_pkthdr
.rcvif
;
4265 (void) ifnet_stat_increment_in(ifp
, 1, m
->m_pkthdr
.len
, 0);
4266 vlan
= VLANTAGOF(m
);
4269 if ((sbif
->bif_ifflags
& IFBIF_STP
) &&
4270 sbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4274 eh
= mtod(m
, struct ether_header
*);
4275 dst
= eh
->ether_dhost
;
4277 /* If the interface is learning, record the address. */
4278 if (sbif
->bif_ifflags
& IFBIF_LEARNING
) {
4279 error
= bridge_rtupdate(sc
, eh
->ether_shost
, vlan
,
4280 sbif
, 0, IFBAF_DYNAMIC
);
4282 * If the interface has addresses limits then deny any source
4283 * that is not in the cache.
4285 if (error
&& sbif
->bif_addrmax
) {
4290 if ((sbif
->bif_ifflags
& IFBIF_STP
) != 0 &&
4291 sbif
->bif_stp
.bp_state
== BSTP_IFSTATE_LEARNING
) {
4296 * At this point, the port either doesn't participate
4297 * in spanning tree or it is in the forwarding state.
4301 * If the packet is unicast, destined for someone on
4302 * "this" side of the bridge, drop it.
4304 if ((m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4306 dst_if
= bridge_rtlookup(sc
, dst
, vlan
);
4307 if (src_if
== dst_if
) {
4311 /* broadcast/multicast */
4314 * Check if its a reserved multicast address, any address
4315 * listed in 802.1D section 7.12.6 may not be forwarded by the
4317 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
4319 if (dst
[0] == 0x01 && dst
[1] == 0x80 &&
4320 dst
[2] == 0xc2 && dst
[3] == 0x00 &&
4321 dst
[4] == 0x00 && dst
[5] <= 0x0f) {
4326 /* ...forward it to all interfaces. */
4327 atomic_add_64(&ifp
->if_imcasts
, 1);
4332 * If we have a destination interface which is a member of our bridge,
4333 * OR this is a unicast packet, push it through the bpf(4) machinery.
4334 * For broadcast or multicast packets, don't bother because it will
4335 * be reinjected into ether_input. We do this before we pass the packets
4336 * through the pfil(9) framework, as it is possible that pfil(9) will
4337 * drop the packet, or possibly modify it, making it difficult to debug
4338 * firewall issues on the bridge.
4341 if (eh
->ether_type
== htons(ETHERTYPE_RSN_PREAUTH
) ||
4342 dst_if
!= NULL
|| (m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4343 m
->m_pkthdr
.rcvif
= ifp
;
4344 if (sc
->sc_bpf_input
) {
4345 bridge_bpf_input(ifp
, m
);
4348 #endif /* NBPFILTER */
4350 #if defined(PFIL_HOOKS)
4351 /* run the packet filter */
4352 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
4354 if (bridge_pfil(&m
, ifp
, src_if
, PFIL_IN
) != 0) {
4362 #endif /* PFIL_HOOKS */
4364 if (dst_if
== NULL
) {
4365 bridge_broadcast(sc
, src_if
, m
, 1);
4373 * At this point, we're dealing with a unicast frame
4374 * going to a different interface.
4376 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4380 dbif
= bridge_lookup_member_if(sc
, dst_if
);
4382 /* Not a member of the bridge (anymore?) */
4386 /* Private segments can not talk to each other */
4387 if (sbif
->bif_ifflags
& dbif
->bif_ifflags
& IFBIF_PRIVATE
) {
4391 if ((dbif
->bif_ifflags
& IFBIF_STP
) &&
4392 dbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4397 /* APPLE MODIFICATION <rdar:6985737> */
4398 if ((dst_if
->if_extflags
& IFEXTF_DHCPRA_MASK
) != 0) {
4399 m
= ip_xdhcpra_output(dst_if
, m
);
4401 ++sc
->sc_sc
.sc_ifp
.if_xdhcpra
;
4405 #endif /* HAS_DHCPRA_MASK */
4409 #if defined(PFIL_HOOKS)
4410 if (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
) {
4411 if (bridge_pfil(&m
, ifp
, dst_if
, PFIL_OUT
) != 0) {
4418 #endif /* PFIL_HOOKS */
4421 * This is an inbound packet where the checksum
4422 * (if applicable) is already present/valid. Since
4423 * we are just doing layer 2 forwarding (not IP
4424 * forwarding), there's no need to validate the checksum.
4425 * Clear the checksum offload flags and send it along.
4427 (void) bridge_enqueue(sc
, NULL
, dst_if
, m
, kChecksumOperationClear
);
4437 char *ether_ntop(char *, size_t, const u_char
*);
4439 __private_extern__
char *
4440 ether_ntop(char *buf
, size_t len
, const u_char
*ap
)
4442 snprintf(buf
, len
, "%02x:%02x:%02x:%02x:%02x:%02x",
4443 ap
[0], ap
[1], ap
[2], ap
[3], ap
[4], ap
[5]);
4448 #endif /* BRIDGE_DEBUG */
4453 * Filter input from a member interface. Queue the packet for
4454 * bridging if it is not for us.
4456 __private_extern__ errno_t
4457 bridge_input(struct ifnet
*ifp
, struct mbuf
*m
, void *frame_header
)
4459 struct bridge_softc
*sc
= ifp
->if_bridge
;
4460 struct bridge_iflist
*bif
, *bif2
;
4462 struct ether_header
*eh
;
4463 struct mbuf
*mc
, *mc2
;
4468 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4469 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__
,
4470 sc
->sc_ifp
->if_xname
, ifp
->if_xname
,
4471 (uint64_t)VM_KERNEL_ADDRPERM(m
),
4472 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m
)));
4474 #endif /* BRIDGE_DEBUG */
4476 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) == 0) {
4478 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4479 printf("%s: %s not running passing along\n",
4480 __func__
, sc
->sc_ifp
->if_xname
);
4482 #endif /* BRIDGE_DEBUG */
4487 vlan
= VLANTAGOF(m
);
4491 * Implement support for bridge monitoring. If this flag has been
4492 * set on this interface, discard the packet once we push it through
4493 * the bpf(4) machinery, but before we do, increment the byte and
4494 * packet counters associated with this interface.
4496 if ((bifp
->if_flags
& IFF_MONITOR
) != 0) {
4497 m
->m_pkthdr
.rcvif
= bifp
;
4498 BRIDGE_BPF_MTAP_INPUT(sc
, m
);
4499 (void) ifnet_stat_increment_in(bifp
, 1, m
->m_pkthdr
.len
, 0);
4503 #endif /* IFF_MONITOR */
4506 * Need to clear the promiscous flags otherwise it will be
4507 * dropped by DLIL after processing filters
4509 if ((mbuf_flags(m
) & MBUF_PROMISC
)) {
4510 mbuf_setflags_mask(m
, 0, MBUF_PROMISC
);
4514 bif
= bridge_lookup_member_if(sc
, ifp
);
4518 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4519 printf("%s: %s bridge_lookup_member_if failed\n",
4520 __func__
, sc
->sc_ifp
->if_xname
);
4522 #endif /* BRIDGE_DEBUG */
4526 if (bif
->bif_flags
& BIFF_HOST_FILTER
) {
4527 error
= bridge_host_filter(bif
, m
);
4529 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4530 printf("%s: %s bridge_host_filter failed\n",
4531 __func__
, bif
->bif_ifp
->if_xname
);
4538 eh
= mtod(m
, struct ether_header
*);
4542 if (m
->m_flags
& (M_BCAST
| M_MCAST
)) {
4544 if (if_bridge_debug
& BR_DBGF_MCAST
) {
4545 if ((m
->m_flags
& M_MCAST
)) {
4546 printf("%s: multicast: "
4547 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4549 eh
->ether_dhost
[0], eh
->ether_dhost
[1],
4550 eh
->ether_dhost
[2], eh
->ether_dhost
[3],
4551 eh
->ether_dhost
[4], eh
->ether_dhost
[5]);
4554 #endif /* BRIDGE_DEBUG */
4556 /* Tap off 802.1D packets; they do not get forwarded. */
4557 if (memcmp(eh
->ether_dhost
, bstp_etheraddr
,
4558 ETHER_ADDR_LEN
) == 0) {
4560 m
= bstp_input(&bif
->bif_stp
, ifp
, m
);
4561 #else /* !BRIDGESTP */
4564 #endif /* !BRIDGESTP */
4571 if ((bif
->bif_ifflags
& IFBIF_STP
) &&
4572 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4578 * Make a deep copy of the packet and enqueue the copy
4579 * for bridge processing; return the original packet for
4582 mc
= m_dup(m
, M_DONTWAIT
);
4589 * Perform the bridge forwarding function with the copy.
4591 * Note that bridge_forward calls BRIDGE_UNLOCK
4593 bridge_forward(sc
, bif
, mc
);
4596 * Reinject the mbuf as arriving on the bridge so we have a
4597 * chance at claiming multicast packets. We can not loop back
4598 * here from ether_input as a bridge is never a member of a
4601 VERIFY(bifp
->if_bridge
== NULL
);
4602 mc2
= m_dup(m
, M_DONTWAIT
);
4604 /* Keep the layer3 header aligned */
4605 int i
= min(mc2
->m_pkthdr
.len
, max_protohdr
);
4606 mc2
= m_copyup(mc2
, i
, ETHER_ALIGN
);
4609 /* mark packet as arriving on the bridge */
4610 mc2
->m_pkthdr
.rcvif
= bifp
;
4611 mc2
->m_pkthdr
.pkt_hdr
= mbuf_data(mc2
);
4614 if (sc
->sc_bpf_input
) {
4615 bridge_bpf_input(bifp
, mc2
);
4617 #endif /* NBPFILTER */
4618 (void) mbuf_setdata(mc2
,
4619 (char *)mbuf_data(mc2
) + ETHER_HDR_LEN
,
4620 mbuf_len(mc2
) - ETHER_HDR_LEN
);
4621 (void) mbuf_pkthdr_adjustlen(mc2
, -ETHER_HDR_LEN
);
4623 (void) ifnet_stat_increment_in(bifp
, 1,
4624 mbuf_pkthdr_len(mc2
), 0);
4627 if (if_bridge_debug
& BR_DBGF_MCAST
) {
4628 printf("%s: %s mcast for us\n", __func__
,
4629 sc
->sc_ifp
->if_xname
);
4631 #endif /* BRIDGE_DEBUG */
4633 dlil_input_packet_list(bifp
, mc2
);
4636 /* Return the original packet for local processing. */
4640 if ((bif
->bif_ifflags
& IFBIF_STP
) &&
4641 bif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4647 #define CARP_CHECK_WE_ARE_DST(iface) \
4648 ((iface)->if_carp &&\
4649 carp_forus((iface)->if_carp, eh->ether_dhost))
4650 #define CARP_CHECK_WE_ARE_SRC(iface) \
4651 ((iface)->if_carp &&\
4652 carp_forus((iface)->if_carp, eh->ether_shost))
4654 #define CARP_CHECK_WE_ARE_DST(iface) 0
4655 #define CARP_CHECK_WE_ARE_SRC(iface) 0
4659 #define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
4661 #define PFIL_HOOKED_INET6 0
4664 #if defined(PFIL_HOOKS)
4665 #define PFIL_PHYS(sc, ifp, m) do { \
4666 if (pfil_local_phys && \
4667 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \
4668 if (bridge_pfil(&m, NULL, ifp, \
4669 PFIL_IN) != 0 || m == NULL) { \
4670 BRIDGE_UNLOCK(sc); \
4675 #else /* PFIL_HOOKS */
4676 #define PFIL_PHYS(sc, ifp, m)
4677 #endif /* PFIL_HOOKS */
4679 #define GRAB_OUR_PACKETS(iface) \
4680 if ((iface)->if_type == IFT_GIF) \
4682 /* It is destined for us. */ \
4683 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
4684 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
4685 if ((iface)->if_type == IFT_BRIDGE) { \
4686 BRIDGE_BPF_MTAP_INPUT(sc, m); \
4687 /* Filter on the physical interface. */ \
4688 PFIL_PHYS(sc, iface, m); \
4690 bpf_tap_in(iface, DLT_EN10MB, m, NULL, 0); \
4692 if (bif->bif_ifflags & IFBIF_LEARNING) { \
4693 error = bridge_rtupdate(sc, eh->ether_shost, \
4694 vlan, bif, 0, IFBAF_DYNAMIC); \
4695 if (error && bif->bif_addrmax) { \
4696 BRIDGE_UNLOCK(sc); \
4698 return (EJUSTRETURN); \
4701 BRIDGE_UNLOCK(sc); \
4702 mbuf_pkthdr_setrcvif(m, iface); \
4703 mbuf_pkthdr_setheader(m, mbuf_data(m)); \
4704 mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, \
4705 mbuf_len(m) - ETHER_HDR_LEN); \
4706 mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN); \
4707 m->m_flags |= M_PROTO1; /* set to avoid loops */ \
4708 dlil_input_packet_list(iface, m); \
4709 return (EJUSTRETURN); \
4712 /* We just received a packet that we sent out. */ \
4713 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
4714 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
4715 BRIDGE_UNLOCK(sc); \
4717 return (EJUSTRETURN); \
4724 * If the packet is for us, set the packets source as the
4725 * bridge, and return the packet back to ether_input for
4728 if (memcmp(eh
->ether_dhost
, IF_LLADDR(bifp
),
4729 ETHER_ADDR_LEN
) == 0 || CARP_CHECK_WE_ARE_DST(bifp
)) {
4730 /* Mark the packet as arriving on the bridge interface */
4731 (void) mbuf_pkthdr_setrcvif(m
, bifp
);
4732 mbuf_pkthdr_setheader(m
, frame_header
);
4735 * If the interface is learning, and the source
4736 * address is valid and not multicast, record
4739 if (bif
->bif_ifflags
& IFBIF_LEARNING
) {
4740 (void) bridge_rtupdate(sc
, eh
->ether_shost
,
4741 vlan
, bif
, 0, IFBAF_DYNAMIC
);
4744 BRIDGE_BPF_MTAP_INPUT(sc
, m
);
4746 (void) mbuf_setdata(m
, (char *)mbuf_data(m
) + ETHER_HDR_LEN
,
4747 mbuf_len(m
) - ETHER_HDR_LEN
);
4748 (void) mbuf_pkthdr_adjustlen(m
, -ETHER_HDR_LEN
);
4750 (void) ifnet_stat_increment_in(bifp
, 1, mbuf_pkthdr_len(m
), 0);
4755 if (if_bridge_debug
& BR_DBGF_INPUT
) {
4756 printf("%s: %s packet for bridge\n", __func__
,
4757 sc
->sc_ifp
->if_xname
);
4759 #endif /* BRIDGE_DEBUG */
4761 dlil_input_packet_list(bifp
, m
);
4767 * if the destination of the packet is for the MAC address of
4768 * the member interface itself, then we don't need to forward
4769 * it -- just pass it back. Note that it'll likely just be
4770 * dropped by the stack, but if something else is bound to
4771 * the interface directly (for example, the wireless stats
4772 * protocol -- although that actually uses BPF right now),
4773 * then it will consume the packet
4775 * ALSO, note that we do this check AFTER checking for the
4776 * bridge's own MAC address, because the bridge may be
4777 * using the SAME MAC address as one of its interfaces
4779 if (memcmp(eh
->ether_dhost
, IF_LLADDR(ifp
), ETHER_ADDR_LEN
) == 0) {
4781 #ifdef VERY_VERY_VERY_DIAGNOSTIC
4782 printf("%s: not forwarding packet bound for member "
4783 "interface\n", __func__
);
4790 /* Now check the remaining bridge members. */
4791 TAILQ_FOREACH(bif2
, &sc
->sc_iflist
, bif_next
) {
4792 if (bif2
->bif_ifp
!= ifp
) {
4793 GRAB_OUR_PACKETS(bif2
->bif_ifp
);
4797 #undef CARP_CHECK_WE_ARE_DST
4798 #undef CARP_CHECK_WE_ARE_SRC
4799 #undef GRAB_OUR_PACKETS
4802 * Perform the bridge forwarding function.
4804 * Note that bridge_forward calls BRIDGE_UNLOCK
4806 bridge_forward(sc
, bif
, m
);
4814 * Send a frame to all interfaces that are members of
4815 * the bridge, except for the one on which the packet
4818 * NOTE: Releases the lock on return.
4821 bridge_broadcast(struct bridge_softc
*sc
, struct ifnet
*src_if
,
4822 struct mbuf
*m
, int runfilt
)
4825 #pragma unused(runfilt)
4827 struct bridge_iflist
*dbif
, *sbif
;
4830 struct ifnet
*dst_if
;
4831 int error
= 0, used
= 0;
4832 boolean_t is_output
;
4833 ChecksumOperation cksum_op
;
4835 if (src_if
!= NULL
) {
4837 cksum_op
= kChecksumOperationClear
;
4838 sbif
= bridge_lookup_member_if(sc
, src_if
);
4841 * src_if is NULL when the bridge interface calls
4842 * bridge_broadcast().
4845 cksum_op
= kChecksumOperationFinalize
;
4849 BRIDGE_LOCK2REF(sc
, error
);
4856 /* Filter on the bridge interface before broadcasting */
4857 if (runfilt
&& (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
)) {
4858 if (bridge_pfil(&m
, sc
->sc_ifp
, NULL
, PFIL_OUT
) != 0) {
4865 #endif /* PFIL_HOOKS */
4867 TAILQ_FOREACH(dbif
, &sc
->sc_iflist
, bif_next
) {
4868 dst_if
= dbif
->bif_ifp
;
4869 if (dst_if
== src_if
) {
4870 /* skip the interface that the packet came in on */
4874 /* Private segments can not talk to each other */
4876 (sbif
->bif_ifflags
& dbif
->bif_ifflags
& IFBIF_PRIVATE
)) {
4880 if ((dbif
->bif_ifflags
& IFBIF_STP
) &&
4881 dbif
->bif_stp
.bp_state
== BSTP_IFSTATE_DISCARDING
) {
4885 if ((dbif
->bif_ifflags
& IFBIF_DISCOVER
) == 0 &&
4886 (m
->m_flags
& (M_BCAST
| M_MCAST
)) == 0) {
4890 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
4894 if (!(dbif
->bif_flags
& BIFF_MEDIA_ACTIVE
)) {
4898 if (TAILQ_NEXT(dbif
, bif_next
) == NULL
) {
4902 mc
= m_dup(m
, M_DONTWAIT
);
4904 (void) ifnet_stat_increment_out(sc
->sc_ifp
,
4911 * If broadcast input is enabled, do so only if this
4912 * is an input packet.
4915 (dbif
->bif_flags
& BIFF_INPUT_BROADCAST
) != 0) {
4916 mc_in
= m_dup(mc
, M_DONTWAIT
);
4917 /* this could fail, but we continue anyways */
4924 * Filter on the output interface. Pass a NULL bridge interface
4925 * pointer so we do not redundantly filter on the bridge for
4926 * each interface we broadcast on.
4929 (PFIL_HOOKED(&inet_pfil_hook
) || PFIL_HOOKED_INET6
)) {
4931 /* Keep the layer3 header aligned */
4932 int i
= min(mc
->m_pkthdr
.len
, max_protohdr
);
4933 mc
= m_copyup(mc
, i
, ETHER_ALIGN
);
4935 (void) ifnet_stat_increment_out(
4936 sc
->sc_ifp
, 0, 0, 1);
4937 if (mc_in
!= NULL
) {
4943 if (bridge_pfil(&mc
, NULL
, dst_if
, PFIL_OUT
) != 0) {
4944 if (mc_in
!= NULL
) {
4950 if (mc_in
!= NULL
) {
4956 #endif /* PFIL_HOOKS */
4959 (void) bridge_enqueue(sc
, NULL
, dst_if
, mc
, cksum_op
);
4962 if (mc_in
== NULL
) {
4965 bpf_tap_in(dst_if
, DLT_EN10MB
, mc_in
, NULL
, 0);
4966 mbuf_pkthdr_setrcvif(mc_in
, dst_if
);
4967 mbuf_pkthdr_setheader(mc_in
, mbuf_data(mc_in
));
4968 mbuf_setdata(mc_in
, (char *)mbuf_data(mc_in
) + ETHER_HDR_LEN
,
4969 mbuf_len(mc_in
) - ETHER_HDR_LEN
);
4970 mbuf_pkthdr_adjustlen(mc_in
, -ETHER_HDR_LEN
);
4971 mc_in
->m_flags
|= M_PROTO1
; /* set to avoid loops */
4972 dlil_input_packet_list(dst_if
, mc_in
);
4980 #endif /* PFIL_HOOKS */
4988 * Duplicate a packet out one or more interfaces that are in span mode,
4989 * the original mbuf is unmodified.
4992 bridge_span(struct bridge_softc
*sc
, struct mbuf
*m
)
4994 struct bridge_iflist
*bif
;
4995 struct ifnet
*dst_if
;
4998 if (TAILQ_EMPTY(&sc
->sc_spanlist
)) {
5002 TAILQ_FOREACH(bif
, &sc
->sc_spanlist
, bif_next
) {
5003 dst_if
= bif
->bif_ifp
;
5005 if ((dst_if
->if_flags
& IFF_RUNNING
) == 0) {
5009 mc
= m_copypacket(m
, M_DONTWAIT
);
5011 (void) ifnet_stat_increment_out(sc
->sc_ifp
, 0, 0, 1);
5015 (void) bridge_enqueue(sc
, NULL
, dst_if
, mc
,
5016 kChecksumOperationNone
);
5024 * Add a bridge routing entry.
5027 bridge_rtupdate(struct bridge_softc
*sc
, const uint8_t *dst
, uint16_t vlan
,
5028 struct bridge_iflist
*bif
, int setflags
, uint8_t flags
)
5030 struct bridge_rtnode
*brt
;
5033 BRIDGE_LOCK_ASSERT_HELD(sc
);
5035 /* Check the source address is valid and not multicast. */
5036 if (ETHER_IS_MULTICAST(dst
) ||
5037 (dst
[0] == 0 && dst
[1] == 0 && dst
[2] == 0 &&
5038 dst
[3] == 0 && dst
[4] == 0 && dst
[5] == 0) != 0) {
5043 /* 802.1p frames map to vlan 1 */
5049 * A route for this destination might already exist. If so,
5050 * update it, otherwise create a new one.
5052 if ((brt
= bridge_rtnode_lookup(sc
, dst
, vlan
)) == NULL
) {
5053 if (sc
->sc_brtcnt
>= sc
->sc_brtmax
) {
5054 sc
->sc_brtexceeded
++;
5057 /* Check per interface address limits (if enabled) */
5058 if (bif
->bif_addrmax
&& bif
->bif_addrcnt
>= bif
->bif_addrmax
) {
5059 bif
->bif_addrexceeded
++;
5064 * Allocate a new bridge forwarding node, and
5065 * initialize the expiration time and Ethernet
5068 brt
= zalloc_noblock(bridge_rtnode_pool
);
5072 bzero(brt
, sizeof(struct bridge_rtnode
));
5074 if (bif
->bif_ifflags
& IFBIF_STICKY
) {
5075 brt
->brt_flags
= IFBAF_STICKY
;
5077 brt
->brt_flags
= IFBAF_DYNAMIC
;
5080 memcpy(brt
->brt_addr
, dst
, ETHER_ADDR_LEN
);
5081 brt
->brt_vlan
= vlan
;
5084 if ((error
= bridge_rtnode_insert(sc
, brt
)) != 0) {
5085 zfree(bridge_rtnode_pool
, brt
);
5091 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5092 printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
5093 "on %s count %u hashsize %u\n", __func__
,
5094 dst
[0], dst
[1], dst
[2], dst
[3], dst
[4], dst
[5],
5095 sc
->sc_ifp
->if_xname
, sc
->sc_brtcnt
,
5096 sc
->sc_rthash_size
);
5101 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
&&
5102 brt
->brt_dst
!= bif
) {
5103 brt
->brt_dst
->bif_addrcnt
--;
5105 brt
->brt_dst
->bif_addrcnt
++;
5108 if ((flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5111 now
= (unsigned long) net_uptime();
5112 brt
->brt_expire
= now
+ sc
->sc_brttimeout
;
5115 brt
->brt_flags
= flags
;
5125 * Lookup the destination interface for an address.
5127 static struct ifnet
*
5128 bridge_rtlookup(struct bridge_softc
*sc
, const uint8_t *addr
, uint16_t vlan
)
5130 struct bridge_rtnode
*brt
;
5132 BRIDGE_LOCK_ASSERT_HELD(sc
);
5134 if ((brt
= bridge_rtnode_lookup(sc
, addr
, vlan
)) == NULL
) {
5138 return brt
->brt_ifp
;
5144 * Trim the routine table so that we have a number
5145 * of routing entries less than or equal to the
5149 bridge_rttrim(struct bridge_softc
*sc
)
5151 struct bridge_rtnode
*brt
, *nbrt
;
5153 BRIDGE_LOCK_ASSERT_HELD(sc
);
5155 /* Make sure we actually need to do this. */
5156 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
) {
5160 /* Force an aging cycle; this might trim enough addresses. */
5162 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
) {
5166 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5167 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5168 bridge_rtnode_destroy(sc
, brt
);
5169 if (sc
->sc_brtcnt
<= sc
->sc_brtmax
) {
5177 * bridge_aging_timer:
5179 * Aging periodic timer for the bridge routing table.
5182 bridge_aging_timer(struct bridge_softc
*sc
)
5184 BRIDGE_LOCK_ASSERT_HELD(sc
);
5188 if ((sc
->sc_ifp
->if_flags
& IFF_RUNNING
) &&
5189 (sc
->sc_flags
& SCF_DETACHING
) == 0) {
5190 sc
->sc_aging_timer
.bdc_sc
= sc
;
5191 sc
->sc_aging_timer
.bdc_func
= bridge_aging_timer
;
5192 sc
->sc_aging_timer
.bdc_ts
.tv_sec
= bridge_rtable_prune_period
;
5193 bridge_schedule_delayed_call(&sc
->sc_aging_timer
);
5200 * Perform an aging cycle.
5203 bridge_rtage(struct bridge_softc
*sc
)
5205 struct bridge_rtnode
*brt
, *nbrt
;
5208 BRIDGE_LOCK_ASSERT_HELD(sc
);
5210 now
= (unsigned long) net_uptime();
5212 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5213 if ((brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5214 if (now
>= brt
->brt_expire
) {
5215 bridge_rtnode_destroy(sc
, brt
);
5224 * Remove all dynamic addresses from the bridge.
5227 bridge_rtflush(struct bridge_softc
*sc
, int full
)
5229 struct bridge_rtnode
*brt
, *nbrt
;
5231 BRIDGE_LOCK_ASSERT_HELD(sc
);
5233 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5234 if (full
|| (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5235 bridge_rtnode_destroy(sc
, brt
);
5243 * Remove an address from the table.
5246 bridge_rtdaddr(struct bridge_softc
*sc
, const uint8_t *addr
, uint16_t vlan
)
5248 struct bridge_rtnode
*brt
;
5251 BRIDGE_LOCK_ASSERT_HELD(sc
);
5254 * If vlan is zero then we want to delete for all vlans so the lookup
5255 * may return more than one.
5257 while ((brt
= bridge_rtnode_lookup(sc
, addr
, vlan
)) != NULL
) {
5258 bridge_rtnode_destroy(sc
, brt
);
5262 return found
? 0 : ENOENT
;
5268 * Delete routes to a speicifc member interface.
5271 bridge_rtdelete(struct bridge_softc
*sc
, struct ifnet
*ifp
, int full
)
5273 struct bridge_rtnode
*brt
, *nbrt
;
5275 BRIDGE_LOCK_ASSERT_HELD(sc
);
5277 LIST_FOREACH_SAFE(brt
, &sc
->sc_rtlist
, brt_list
, nbrt
) {
5278 if (brt
->brt_ifp
== ifp
&& (full
||
5279 (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
)) {
5280 bridge_rtnode_destroy(sc
, brt
);
5286 * bridge_rtable_init:
5288 * Initialize the route table for this bridge.
5291 bridge_rtable_init(struct bridge_softc
*sc
)
5295 sc
->sc_rthash
= _MALLOC(sizeof(*sc
->sc_rthash
) * BRIDGE_RTHASH_SIZE
,
5296 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5297 if (sc
->sc_rthash
== NULL
) {
5298 printf("%s: no memory\n", __func__
);
5301 sc
->sc_rthash_size
= BRIDGE_RTHASH_SIZE
;
5303 for (i
= 0; i
< sc
->sc_rthash_size
; i
++) {
5304 LIST_INIT(&sc
->sc_rthash
[i
]);
5307 sc
->sc_rthash_key
= RandomULong();
5309 LIST_INIT(&sc
->sc_rtlist
);
5315 * bridge_rthash_delayed_resize:
5317 * Resize the routing table hash on a delayed thread call.
5320 bridge_rthash_delayed_resize(struct bridge_softc
*sc
)
5322 u_int32_t new_rthash_size
;
5323 struct _bridge_rtnode_list
*new_rthash
= NULL
;
5324 struct _bridge_rtnode_list
*old_rthash
= NULL
;
5326 struct bridge_rtnode
*brt
;
5329 BRIDGE_LOCK_ASSERT_HELD(sc
);
5332 * Four entries per hash bucket is our ideal load factor
5334 if (sc
->sc_brtcnt
< sc
->sc_rthash_size
* 4) {
5339 * Doubling the number of hash buckets may be too simplistic
5340 * especially when facing a spike of new entries
5342 new_rthash_size
= sc
->sc_rthash_size
* 2;
5344 sc
->sc_flags
|= SCF_RESIZING
;
5347 new_rthash
= _MALLOC(sizeof(*sc
->sc_rthash
) * new_rthash_size
,
5348 M_DEVBUF
, M_WAITOK
| M_ZERO
);
5351 sc
->sc_flags
&= ~SCF_RESIZING
;
5353 if (new_rthash
== NULL
) {
5357 if ((sc
->sc_flags
& SCF_DETACHING
)) {
5362 * Fail safe from here on
5364 old_rthash
= sc
->sc_rthash
;
5365 sc
->sc_rthash
= new_rthash
;
5366 sc
->sc_rthash_size
= new_rthash_size
;
5369 * Get a new key to force entries to be shuffled around to reduce
5370 * the likelihood they will land in the same buckets
5372 sc
->sc_rthash_key
= RandomULong();
5374 for (i
= 0; i
< sc
->sc_rthash_size
; i
++) {
5375 LIST_INIT(&sc
->sc_rthash
[i
]);
5378 LIST_FOREACH(brt
, &sc
->sc_rtlist
, brt_list
) {
5379 LIST_REMOVE(brt
, brt_hash
);
5380 (void) bridge_rtnode_hash(sc
, brt
);
5385 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5386 printf("%s: %s new size %u\n", __func__
,
5387 sc
->sc_ifp
->if_xname
, sc
->sc_rthash_size
);
5389 #endif /* BRIDGE_DEBUG */
5391 _FREE(old_rthash
, M_DEVBUF
);
5395 printf("%s: %s failed %d\n", __func__
,
5396 sc
->sc_ifp
->if_xname
, error
);
5397 #endif /* BRIDGE_DEBUG */
5398 if (new_rthash
!= NULL
) {
5399 _FREE(new_rthash
, M_DEVBUF
);
5405 * Resize the number of hash buckets based on the load factor
5406 * Currently only grow
5407 * Failing to resize the hash table is not fatal
5410 bridge_rthash_resize(struct bridge_softc
*sc
)
5412 BRIDGE_LOCK_ASSERT_HELD(sc
);
5414 if ((sc
->sc_flags
& SCF_DETACHING
) || (sc
->sc_flags
& SCF_RESIZING
)) {
5419 * Four entries per hash bucket is our ideal load factor
5421 if (sc
->sc_brtcnt
< sc
->sc_rthash_size
* 4) {
5425 * Hard limit on the size of the routing hash table
5427 if (sc
->sc_rthash_size
>= bridge_rtable_hash_size_max
) {
5431 sc
->sc_resize_call
.bdc_sc
= sc
;
5432 sc
->sc_resize_call
.bdc_func
= bridge_rthash_delayed_resize
;
5433 bridge_schedule_delayed_call(&sc
->sc_resize_call
);
5437 * bridge_rtable_fini:
5439 * Deconstruct the route table for this bridge.
5442 bridge_rtable_fini(struct bridge_softc
*sc
)
5444 KASSERT(sc
->sc_brtcnt
== 0,
5445 ("%s: %d bridge routes referenced", __func__
, sc
->sc_brtcnt
));
5446 if (sc
->sc_rthash
) {
5447 _FREE(sc
->sc_rthash
, M_DEVBUF
);
5448 sc
->sc_rthash
= NULL
;
5453 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
5454 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
5456 #define mix(a, b, c) \
5458 a -= b; a -= c; a ^= (c >> 13); \
5459 b -= c; b -= a; b ^= (a << 8); \
5460 c -= a; c -= b; c ^= (b >> 13); \
5461 a -= b; a -= c; a ^= (c >> 12); \
5462 b -= c; b -= a; b ^= (a << 16); \
5463 c -= a; c -= b; c ^= (b >> 5); \
5464 a -= b; a -= c; a ^= (c >> 3); \
5465 b -= c; b -= a; b ^= (a << 10); \
5466 c -= a; c -= b; c ^= (b >> 15); \
5467 } while ( /*CONSTCOND*/ 0)
5469 static __inline
uint32_t
5470 bridge_rthash(struct bridge_softc
*sc
, const uint8_t *addr
)
5472 uint32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= sc
->sc_rthash_key
;
5483 return c
& BRIDGE_RTHASH_MASK(sc
);
5489 bridge_rtnode_addr_cmp(const uint8_t *a
, const uint8_t *b
)
5493 for (i
= 0, d
= 0; i
< ETHER_ADDR_LEN
&& d
== 0; i
++) {
5494 d
= ((int)a
[i
]) - ((int)b
[i
]);
5501 * bridge_rtnode_lookup:
5503 * Look up a bridge route node for the specified destination. Compare the
5504 * vlan id or if zero then just return the first match.
5506 static struct bridge_rtnode
*
5507 bridge_rtnode_lookup(struct bridge_softc
*sc
, const uint8_t *addr
,
5510 struct bridge_rtnode
*brt
;
5514 BRIDGE_LOCK_ASSERT_HELD(sc
);
5516 hash
= bridge_rthash(sc
, addr
);
5517 LIST_FOREACH(brt
, &sc
->sc_rthash
[hash
], brt_hash
) {
5518 dir
= bridge_rtnode_addr_cmp(addr
, brt
->brt_addr
);
5519 if (dir
== 0 && (brt
->brt_vlan
== vlan
|| vlan
== 0)) {
5531 * bridge_rtnode_hash:
5533 * Insert the specified bridge node into the route hash table.
5534 * This is used when adding a new node or to rehash when resizing
5538 bridge_rtnode_hash(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5540 struct bridge_rtnode
*lbrt
;
5544 BRIDGE_LOCK_ASSERT_HELD(sc
);
5546 hash
= bridge_rthash(sc
, brt
->brt_addr
);
5548 lbrt
= LIST_FIRST(&sc
->sc_rthash
[hash
]);
5550 LIST_INSERT_HEAD(&sc
->sc_rthash
[hash
], brt
, brt_hash
);
5555 dir
= bridge_rtnode_addr_cmp(brt
->brt_addr
, lbrt
->brt_addr
);
5556 if (dir
== 0 && brt
->brt_vlan
== lbrt
->brt_vlan
) {
5558 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5559 printf("%s: %s EEXIST "
5560 "%02x:%02x:%02x:%02x:%02x:%02x\n",
5561 __func__
, sc
->sc_ifp
->if_xname
,
5562 brt
->brt_addr
[0], brt
->brt_addr
[1],
5563 brt
->brt_addr
[2], brt
->brt_addr
[3],
5564 brt
->brt_addr
[4], brt
->brt_addr
[5]);
5570 LIST_INSERT_BEFORE(lbrt
, brt
, brt_hash
);
5573 if (LIST_NEXT(lbrt
, brt_hash
) == NULL
) {
5574 LIST_INSERT_AFTER(lbrt
, brt
, brt_hash
);
5577 lbrt
= LIST_NEXT(lbrt
, brt_hash
);
5578 } while (lbrt
!= NULL
);
5581 if (if_bridge_debug
& BR_DBGF_RT_TABLE
) {
5582 printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
5583 __func__
, sc
->sc_ifp
->if_xname
,
5584 brt
->brt_addr
[0], brt
->brt_addr
[1], brt
->brt_addr
[2],
5585 brt
->brt_addr
[3], brt
->brt_addr
[4], brt
->brt_addr
[5]);
5594 * bridge_rtnode_insert:
5596 * Insert the specified bridge node into the route table. We
5597 * assume the entry is not already in the table.
5600 bridge_rtnode_insert(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5604 error
= bridge_rtnode_hash(sc
, brt
);
5609 LIST_INSERT_HEAD(&sc
->sc_rtlist
, brt
, brt_list
);
5612 bridge_rthash_resize(sc
);
5618 * bridge_rtnode_destroy:
5620 * Destroy a bridge rtnode.
5623 bridge_rtnode_destroy(struct bridge_softc
*sc
, struct bridge_rtnode
*brt
)
5625 BRIDGE_LOCK_ASSERT_HELD(sc
);
5627 LIST_REMOVE(brt
, brt_hash
);
5629 LIST_REMOVE(brt
, brt_list
);
5631 brt
->brt_dst
->bif_addrcnt
--;
5632 zfree(bridge_rtnode_pool
, brt
);
5637 * bridge_rtable_expire:
5639 * Set the expiry time for all routes on an interface.
5642 bridge_rtable_expire(struct ifnet
*ifp
, int age
)
5644 struct bridge_softc
*sc
= ifp
->if_bridge
;
5645 struct bridge_rtnode
*brt
;
5650 * If the age is zero then flush, otherwise set all the expiry times to
5651 * age for the interface
5654 bridge_rtdelete(sc
, ifp
, IFBF_FLUSHDYN
);
5658 now
= (unsigned long) net_uptime();
5660 LIST_FOREACH(brt
, &sc
->sc_rtlist
, brt_list
) {
5661 /* Cap the expiry time to 'age' */
5662 if (brt
->brt_ifp
== ifp
&&
5663 brt
->brt_expire
> now
+ age
&&
5664 (brt
->brt_flags
& IFBAF_TYPEMASK
) == IFBAF_DYNAMIC
) {
5665 brt
->brt_expire
= now
+ age
;
5673 * bridge_state_change:
5675 * Callback from the bridgestp code when a port changes states.
5678 bridge_state_change(struct ifnet
*ifp
, int state
)
5680 struct bridge_softc
*sc
= ifp
->if_bridge
;
5681 static const char *stpstates
[] = {
5691 log(LOG_NOTICE
, "%s: state changed to %s on %s\n",
5692 sc
->sc_ifp
->if_xname
,
5693 stpstates
[state
], ifp
->if_xname
);
5696 #endif /* BRIDGESTP */
5700 * Send bridge packets through pfil if they are one of the types pfil can deal
5701 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
5702 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
5706 bridge_pfil(struct mbuf
**mp
, struct ifnet
*bifp
, struct ifnet
*ifp
, int dir
)
5708 int snap
, error
, i
, hlen
;
5709 struct ether_header
*eh1
, eh2
;
5710 struct ip_fw_args args
;
5713 u_int16_t ether_type
;
5716 error
= -1; /* Default error if not error == 0 */
5719 /* we may return with the IP fields swapped, ensure its not shared */
5720 KASSERT(M_WRITABLE(*mp
), ("%s: modifying a shared mbuf", __func__
));
5723 if (pfil_bridge
== 0 && pfil_member
== 0 && pfil_ipfw
== 0) {
5724 return 0; /* filtering is disabled */
5726 i
= min((*mp
)->m_pkthdr
.len
, max_protohdr
);
5727 if ((*mp
)->m_len
< i
) {
5728 *mp
= m_pullup(*mp
, i
);
5730 printf("%s: m_pullup failed\n", __func__
);
5735 eh1
= mtod(*mp
, struct ether_header
*);
5736 ether_type
= ntohs(eh1
->ether_type
);
5739 * Check for SNAP/LLC.
5741 if (ether_type
< ETHERMTU
) {
5742 struct llc
*llc2
= (struct llc
*)(eh1
+ 1);
5744 if ((*mp
)->m_len
>= ETHER_HDR_LEN
+ 8 &&
5745 llc2
->llc_dsap
== LLC_SNAP_LSAP
&&
5746 llc2
->llc_ssap
== LLC_SNAP_LSAP
&&
5747 llc2
->llc_control
== LLC_UI
) {
5748 ether_type
= htons(llc2
->llc_un
.type_snap
.ether_type
);
5754 * If we're trying to filter bridge traffic, don't look at anything
5755 * other than IP and ARP traffic. If the filter doesn't understand
5756 * IPv6, don't allow IPv6 through the bridge either. This is lame
5757 * since if we really wanted, say, an AppleTalk filter, we are hosed,
5758 * but of course we don't have an AppleTalk filter to begin with.
5759 * (Note that since pfil doesn't understand ARP it will pass *ALL*
5762 switch (ether_type
) {
5764 case ETHERTYPE_REVARP
:
5765 if (pfil_ipfw_arp
== 0) {
5766 return 0; /* Automatically pass */
5772 case ETHERTYPE_IPV6
:
5777 * Check to see if the user wants to pass non-ip
5778 * packets, these will not be checked by pfil(9) and
5779 * passed unconditionally so the default is to drop.
5786 /* Strip off the Ethernet header and keep a copy. */
5787 m_copydata(*mp
, 0, ETHER_HDR_LEN
, (caddr_t
)&eh2
);
5788 m_adj(*mp
, ETHER_HDR_LEN
);
5790 /* Strip off snap header, if present */
5792 m_copydata(*mp
, 0, sizeof(struct llc
), (caddr_t
)&llc1
);
5793 m_adj(*mp
, sizeof(struct llc
));
5797 * Check the IP header for alignment and errors
5799 if (dir
== PFIL_IN
) {
5800 switch (ether_type
) {
5802 error
= bridge_ip_checkbasic(mp
);
5805 case ETHERTYPE_IPV6
:
5806 error
= bridge_ip6_checkbasic(mp
);
5817 if (IPFW_LOADED
&& pfil_ipfw
!= 0 && dir
== PFIL_OUT
&& ifp
!= NULL
) {
5819 args
.rule
= ip_dn_claim_rule(*mp
);
5820 if (args
.rule
!= NULL
&& fw_one_pass
) {
5821 goto ipfwpass
; /* packet already partially processed */
5825 args
.next_hop
= NULL
;
5827 args
.inp
= NULL
; /* used by ipfw uid/gid/jail rules */
5828 i
= ip_fw_chk_ptr(&args
);
5835 if (DUMMYNET_LOADED
&& (i
== IP_FW_DUMMYNET
)) {
5836 /* put the Ethernet header back on */
5837 M_PREPEND(*mp
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5841 bcopy(&eh2
, mtod(*mp
, caddr_t
), ETHER_HDR_LEN
);
5844 * Pass the pkt to dummynet, which consumes it. The
5845 * packet will return to us via bridge_dummynet().
5848 ip_dn_io_ptr(mp
, DN_TO_IFB_FWD
, &args
, DN_CLIENT_IPFW
);
5852 if (i
!= IP_FW_PASS
) { /* drop */
5861 * Run the packet through pfil
5863 switch (ether_type
) {
5866 * before calling the firewall, swap fields the same as
5867 * IP does. here we assume the header is contiguous
5869 ip
= mtod(*mp
, struct ip
*);
5871 ip
->ip_len
= ntohs(ip
->ip_len
);
5872 ip
->ip_off
= ntohs(ip
->ip_off
);
5875 * Run pfil on the member interface and the bridge, both can
5876 * be skipped by clearing pfil_member or pfil_bridge.
5879 * in_if -> bridge_if -> out_if
5881 if (pfil_bridge
&& dir
== PFIL_OUT
&& bifp
!= NULL
) {
5882 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, bifp
,
5886 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5890 if (pfil_member
&& ifp
!= NULL
) {
5891 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, ifp
,
5895 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5899 if (pfil_bridge
&& dir
== PFIL_IN
&& bifp
!= NULL
) {
5900 error
= pfil_run_hooks(&inet_pfil_hook
, mp
, bifp
,
5904 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5908 /* check if we need to fragment the packet */
5909 if (pfil_member
&& ifp
!= NULL
&& dir
== PFIL_OUT
) {
5910 i
= (*mp
)->m_pkthdr
.len
;
5911 if (i
> ifp
->if_mtu
) {
5912 error
= bridge_fragment(ifp
, *mp
, &eh2
, snap
,
5918 /* Recalculate the ip checksum and restore byte ordering */
5919 ip
= mtod(*mp
, struct ip
*);
5920 hlen
= ip
->ip_hl
<< 2;
5921 if (hlen
< sizeof(struct ip
)) {
5924 if (hlen
> (*mp
)->m_len
) {
5925 if ((*mp
= m_pullup(*mp
, hlen
)) == 0) {
5928 ip
= mtod(*mp
, struct ip
*);
5933 ip
->ip_len
= htons(ip
->ip_len
);
5934 ip
->ip_off
= htons(ip
->ip_off
);
5936 if (hlen
== sizeof(struct ip
)) {
5937 ip
->ip_sum
= in_cksum_hdr(ip
);
5939 ip
->ip_sum
= in_cksum(*mp
, hlen
);
5944 case ETHERTYPE_IPV6
:
5945 if (pfil_bridge
&& dir
== PFIL_OUT
&& bifp
!= NULL
) {
5946 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, bifp
,
5950 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5954 if (pfil_member
&& ifp
!= NULL
) {
5955 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, ifp
,
5959 if (*mp
== NULL
|| error
!= 0) { /* filter may consume */
5963 if (pfil_bridge
&& dir
== PFIL_IN
&& bifp
!= NULL
) {
5964 error
= pfil_run_hooks(&inet6_pfil_hook
, mp
, bifp
,
5984 * Finally, put everything back the way it was and return
5987 M_PREPEND(*mp
, sizeof(struct llc
), M_DONTWAIT
, 0);
5991 bcopy(&llc1
, mtod(*mp
, caddr_t
), sizeof(struct llc
));
5994 M_PREPEND(*mp
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
5998 bcopy(&eh2
, mtod(*mp
, caddr_t
), ETHER_HDR_LEN
);
6009 * Perform basic checks on header size since
6010 * pfil assumes ip_input has already processed
6011 * it for it. Cut-and-pasted from ip_input.c.
6012 * Given how simple the IPv6 version is,
6013 * does the IPv4 version really need to be
6016 * XXX Should we update ipstat here, or not?
6017 * XXX Right now we update ipstat but not
6021 bridge_ip_checkbasic(struct mbuf
**mp
)
6023 struct mbuf
*m
= *mp
;
6032 if (IP_HDR_ALIGNED_P(mtod(m
, caddr_t
)) == 0) {
6033 /* max_linkhdr is already rounded up to nearest 4-byte */
6034 if ((m
= m_copyup(m
, sizeof(struct ip
),
6035 max_linkhdr
)) == NULL
) {
6036 /* XXXJRT new stat, please */
6037 ipstat
.ips_toosmall
++;
6040 } else if (__predict_false(m
->m_len
< sizeof(struct ip
))) {
6041 if ((m
= m_pullup(m
, sizeof(struct ip
))) == NULL
) {
6042 ipstat
.ips_toosmall
++;
6046 ip
= mtod(m
, struct ip
*);
6051 if (ip
->ip_v
!= IPVERSION
) {
6052 ipstat
.ips_badvers
++;
6055 hlen
= ip
->ip_hl
<< 2;
6056 if (hlen
< sizeof(struct ip
)) { /* minimum header length */
6057 ipstat
.ips_badhlen
++;
6060 if (hlen
> m
->m_len
) {
6061 if ((m
= m_pullup(m
, hlen
)) == 0) {
6062 ipstat
.ips_badhlen
++;
6065 ip
= mtod(m
, struct ip
*);
6071 if (m
->m_pkthdr
.csum_flags
& CSUM_IP_CHECKED
) {
6072 sum
= !(m
->m_pkthdr
.csum_flags
& CSUM_IP_VALID
);
6074 if (hlen
== sizeof(struct ip
)) {
6075 sum
= in_cksum_hdr(ip
);
6077 sum
= in_cksum(m
, hlen
);
6081 ipstat
.ips_badsum
++;
6085 /* Retrieve the packet length. */
6086 len
= ntohs(ip
->ip_len
);
6089 * Check for additional length bogosity
6092 ipstat
.ips_badlen
++;
6097 * Check that the amount of data in the buffers
6098 * is as at least much as the IP header would have us expect.
6099 * Drop packet if shorter than we expect.
6101 if (m
->m_pkthdr
.len
< len
) {
6102 ipstat
.ips_tooshort
++;
6106 /* Checks out, proceed */
6117 * Same as above, but for IPv6.
6118 * Cut-and-pasted from ip6_input.c.
6119 * XXX Should we update ip6stat, or not?
6122 bridge_ip6_checkbasic(struct mbuf
**mp
)
6124 struct mbuf
*m
= *mp
;
6125 struct ip6_hdr
*ip6
;
6128 * If the IPv6 header is not aligned, slurp it up into a new
6129 * mbuf with space for link headers, in the event we forward
6130 * it. Otherwise, if it is aligned, make sure the entire base
6131 * IPv6 header is in the first mbuf of the chain.
6133 if (IP6_HDR_ALIGNED_P(mtod(m
, caddr_t
)) == 0) {
6134 struct ifnet
*inifp
= m
->m_pkthdr
.rcvif
;
6135 /* max_linkhdr is already rounded up to nearest 4-byte */
6136 if ((m
= m_copyup(m
, sizeof(struct ip6_hdr
),
6137 max_linkhdr
)) == NULL
) {
6138 /* XXXJRT new stat, please */
6139 ip6stat
.ip6s_toosmall
++;
6140 in6_ifstat_inc(inifp
, ifs6_in_hdrerr
);
6143 } else if (__predict_false(m
->m_len
< sizeof(struct ip6_hdr
))) {
6144 struct ifnet
*inifp
= m
->m_pkthdr
.rcvif
;
6145 if ((m
= m_pullup(m
, sizeof(struct ip6_hdr
))) == NULL
) {
6146 ip6stat
.ip6s_toosmall
++;
6147 in6_ifstat_inc(inifp
, ifs6_in_hdrerr
);
6152 ip6
= mtod(m
, struct ip6_hdr
*);
6154 if ((ip6
->ip6_vfc
& IPV6_VERSION_MASK
) != IPV6_VERSION
) {
6155 ip6stat
.ip6s_badvers
++;
6156 in6_ifstat_inc(m
->m_pkthdr
.rcvif
, ifs6_in_hdrerr
);
6160 /* Checks out, proceed */
6173 * Return a fragmented mbuf chain.
6176 bridge_fragment(struct ifnet
*ifp
, struct mbuf
*m
, struct ether_header
*eh
,
6177 int snap
, struct llc
*llc
)
6183 if (m
->m_len
< sizeof(struct ip
) &&
6184 (m
= m_pullup(m
, sizeof(struct ip
))) == NULL
) {
6187 ip
= mtod(m
, struct ip
*);
6189 error
= ip_fragment(ip
, &m
, ifp
->if_mtu
, ifp
->if_hwassist
,
6195 /* walk the chain and re-add the Ethernet header */
6196 for (m0
= m
; m0
; m0
= m0
->m_nextpkt
) {
6199 M_PREPEND(m0
, sizeof(struct llc
), M_DONTWAIT
, 0);
6204 bcopy(llc
, mtod(m0
, caddr_t
),
6205 sizeof(struct llc
));
6207 M_PREPEND(m0
, ETHER_HDR_LEN
, M_DONTWAIT
, 0);
6212 bcopy(eh
, mtod(m0
, caddr_t
), ETHER_HDR_LEN
);
6219 ipstat
.ips_fragmented
++;
6230 #endif /* PFIL_HOOKS */
6233 * bridge_set_bpf_tap:
6235 * Sets ups the BPF callbacks.
6238 bridge_set_bpf_tap(ifnet_t ifp
, bpf_tap_mode mode
, bpf_packet_func bpf_callback
)
6240 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6243 if (sc
== NULL
|| (sc
->sc_flags
& SCF_DETACHING
)) {
6247 case BPF_TAP_DISABLE
:
6248 sc
->sc_bpf_input
= sc
->sc_bpf_output
= NULL
;
6252 sc
->sc_bpf_input
= bpf_callback
;
6255 case BPF_TAP_OUTPUT
:
6256 sc
->sc_bpf_output
= bpf_callback
;
6259 case BPF_TAP_INPUT_OUTPUT
:
6260 sc
->sc_bpf_input
= sc
->sc_bpf_output
= bpf_callback
;
6273 * Callback when interface has been detached.
6276 bridge_detach(ifnet_t ifp
)
6278 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6281 bstp_detach(&sc
->sc_stp
);
6282 #endif /* BRIDGESTP */
6284 /* Tear down the routing table. */
6285 bridge_rtable_fini(sc
);
6287 lck_mtx_lock(&bridge_list_mtx
);
6288 LIST_REMOVE(sc
, sc_list
);
6289 lck_mtx_unlock(&bridge_list_mtx
);
6293 lck_mtx_destroy(&sc
->sc_mtx
, bridge_lock_grp
);
6294 if_clone_softc_deallocate(&bridge_cloner
, sc
);
6300 * Invoke the input BPF callback if enabled
6302 __private_extern__ errno_t
6303 bridge_bpf_input(ifnet_t ifp
, struct mbuf
*m
)
6305 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6307 if (sc
->sc_bpf_input
) {
6308 if (mbuf_pkthdr_rcvif(m
) != ifp
) {
6309 printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__
,
6310 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m
)),
6311 (uint64_t)VM_KERNEL_ADDRPERM(ifp
));
6313 (*sc
->sc_bpf_input
)(ifp
, m
);
6319 * bridge_bpf_output:
6321 * Invoke the output BPF callback if enabled
6323 __private_extern__ errno_t
6324 bridge_bpf_output(ifnet_t ifp
, struct mbuf
*m
)
6326 struct bridge_softc
*sc
= (struct bridge_softc
*)ifnet_softc(ifp
);
6328 if (sc
->sc_bpf_output
) {
6329 (*sc
->sc_bpf_output
)(ifp
, m
);
6335 * bridge_link_event:
6337 * Report a data link event on an interface
6340 bridge_link_event(struct ifnet
*ifp
, u_int32_t event_code
)
6343 struct kern_event_msg header
;
6345 char if_name
[IFNAMSIZ
];
6349 if (if_bridge_debug
& BR_DBGF_LIFECYCLE
) {
6350 printf("%s: %s event_code %u - %s\n", __func__
, ifp
->if_xname
,
6351 event_code
, dlil_kev_dl_code_str(event_code
));
6353 #endif /* BRIDGE_DEBUG */
6355 bzero(&event
, sizeof(event
));
6356 event
.header
.total_size
= sizeof(event
);
6357 event
.header
.vendor_code
= KEV_VENDOR_APPLE
;
6358 event
.header
.kev_class
= KEV_NETWORK_CLASS
;
6359 event
.header
.kev_subclass
= KEV_DL_SUBCLASS
;
6360 event
.header
.event_code
= event_code
;
6361 event
.header
.event_data
[0] = ifnet_family(ifp
);
6362 event
.unit
= (u_int32_t
)ifnet_unit(ifp
);
6363 strlcpy(event
.if_name
, ifnet_name(ifp
), IFNAMSIZ
);
6364 ifnet_event(ifp
, &event
.header
);
6367 #define BRIDGE_HF_DROP(reason, func, line) { \
6368 bridge_hostfilter_stats.reason++; \
6369 if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
6370 printf("%s.%d" #reason, func, line); \
6375 * Make sure this is a DHCP or Bootp request that match the host filter
6378 bridge_dhcp_filter(struct bridge_iflist
*bif
, struct mbuf
*m
, size_t offset
)
6384 * Note: We use the dhcp structure because bootp structure definition
6385 * is larger and some vendors do not pad the request
6387 error
= mbuf_copydata(m
, offset
, sizeof(struct dhcp
), &dhcp
);
6389 BRIDGE_HF_DROP(brhf_dhcp_too_small
, __func__
, __LINE__
);
6392 if (dhcp
.dp_op
!= BOOTREQUEST
) {
6393 BRIDGE_HF_DROP(brhf_dhcp_bad_op
, __func__
, __LINE__
);
6397 * The hardware address must be an exact match
6399 if (dhcp
.dp_htype
!= ARPHRD_ETHER
) {
6400 BRIDGE_HF_DROP(brhf_dhcp_bad_htype
, __func__
, __LINE__
);
6403 if (dhcp
.dp_hlen
!= ETHER_ADDR_LEN
) {
6404 BRIDGE_HF_DROP(brhf_dhcp_bad_hlen
, __func__
, __LINE__
);
6407 if (bcmp(dhcp
.dp_chaddr
, bif
->bif_hf_hwsrc
,
6408 ETHER_ADDR_LEN
) != 0) {
6409 BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr
, __func__
, __LINE__
);
6413 * Client address must match the host address or be not specified
6415 if (dhcp
.dp_ciaddr
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
&&
6416 dhcp
.dp_ciaddr
.s_addr
!= INADDR_ANY
) {
6417 BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr
, __func__
, __LINE__
);
6426 bridge_host_filter(struct bridge_iflist
*bif
, struct mbuf
*m
)
6429 struct ether_header
*eh
;
6430 static struct in_addr inaddr_any
= { .s_addr
= INADDR_ANY
};
6433 * Check the Ethernet header is large enough
6435 if (mbuf_pkthdr_len(m
) < sizeof(struct ether_header
)) {
6436 BRIDGE_HF_DROP(brhf_ether_too_small
, __func__
, __LINE__
);
6439 if (mbuf_len(m
) < sizeof(struct ether_header
) &&
6440 mbuf_pullup(&m
, sizeof(struct ether_header
)) != 0) {
6441 BRIDGE_HF_DROP(brhf_ether_pullup_failed
, __func__
, __LINE__
);
6444 eh
= mtod(m
, struct ether_header
*);
6447 * Restrict the source hardware address
6449 if ((bif
->bif_flags
& BIFF_HF_HWSRC
) == 0 ||
6450 bcmp(eh
->ether_shost
, bif
->bif_hf_hwsrc
,
6451 ETHER_ADDR_LEN
) != 0) {
6452 BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr
, __func__
, __LINE__
);
6457 * Restrict Ethernet protocols to ARP and IP
6459 if (eh
->ether_type
== htons(ETHERTYPE_ARP
)) {
6460 struct ether_arp
*ea
;
6461 size_t minlen
= sizeof(struct ether_header
) +
6462 sizeof(struct ether_arp
);
6465 * Make the Ethernet and ARP headers contiguous
6467 if (mbuf_pkthdr_len(m
) < minlen
) {
6468 BRIDGE_HF_DROP(brhf_arp_too_small
, __func__
, __LINE__
);
6471 if (mbuf_len(m
) < minlen
&& mbuf_pullup(&m
, minlen
) != 0) {
6472 BRIDGE_HF_DROP(brhf_arp_pullup_failed
,
6473 __func__
, __LINE__
);
6477 * Verify this is an ethernet/ip arp
6479 eh
= mtod(m
, struct ether_header
*);
6480 ea
= (struct ether_arp
*)(eh
+ 1);
6481 if (ea
->arp_hrd
!= htons(ARPHRD_ETHER
)) {
6482 BRIDGE_HF_DROP(brhf_arp_bad_hw_type
,
6483 __func__
, __LINE__
);
6486 if (ea
->arp_pro
!= htons(ETHERTYPE_IP
)) {
6487 BRIDGE_HF_DROP(brhf_arp_bad_pro_type
,
6488 __func__
, __LINE__
);
6492 * Verify the address lengths are correct
6494 if (ea
->arp_hln
!= ETHER_ADDR_LEN
) {
6495 BRIDGE_HF_DROP(brhf_arp_bad_hw_len
, __func__
, __LINE__
);
6498 if (ea
->arp_pln
!= sizeof(struct in_addr
)) {
6499 BRIDGE_HF_DROP(brhf_arp_bad_pro_len
,
6500 __func__
, __LINE__
);
6505 * Allow only ARP request or ARP reply
6507 if (ea
->arp_op
!= htons(ARPOP_REQUEST
) &&
6508 ea
->arp_op
!= htons(ARPOP_REPLY
)) {
6509 BRIDGE_HF_DROP(brhf_arp_bad_op
, __func__
, __LINE__
);
6513 * Verify source hardware address matches
6515 if (bcmp(ea
->arp_sha
, bif
->bif_hf_hwsrc
,
6516 ETHER_ADDR_LEN
) != 0) {
6517 BRIDGE_HF_DROP(brhf_arp_bad_sha
, __func__
, __LINE__
);
6521 * Verify source protocol address:
6522 * May be null for an ARP probe
6524 if (bcmp(ea
->arp_spa
, &bif
->bif_hf_ipsrc
.s_addr
,
6525 sizeof(struct in_addr
)) != 0 &&
6526 bcmp(ea
->arp_spa
, &inaddr_any
,
6527 sizeof(struct in_addr
)) != 0) {
6528 BRIDGE_HF_DROP(brhf_arp_bad_spa
, __func__
, __LINE__
);
6534 bridge_hostfilter_stats
.brhf_arp_ok
+= 1;
6536 } else if (eh
->ether_type
== htons(ETHERTYPE_IP
)) {
6537 size_t minlen
= sizeof(struct ether_header
) + sizeof(struct ip
);
6542 * Make the Ethernet and IP headers contiguous
6544 if (mbuf_pkthdr_len(m
) < minlen
) {
6545 BRIDGE_HF_DROP(brhf_ip_too_small
, __func__
, __LINE__
);
6548 offset
= sizeof(struct ether_header
);
6549 error
= mbuf_copydata(m
, offset
, sizeof(struct ip
), &iphdr
);
6551 BRIDGE_HF_DROP(brhf_ip_too_small
, __func__
, __LINE__
);
6555 * Verify the source IP address
6557 if (iphdr
.ip_p
== IPPROTO_UDP
) {
6560 minlen
+= sizeof(struct udphdr
);
6561 if (mbuf_pkthdr_len(m
) < minlen
) {
6562 BRIDGE_HF_DROP(brhf_ip_too_small
,
6563 __func__
, __LINE__
);
6568 * Allow all zero addresses for DHCP requests
6570 if (iphdr
.ip_src
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
&&
6571 iphdr
.ip_src
.s_addr
!= INADDR_ANY
) {
6572 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
,
6573 __func__
, __LINE__
);
6576 offset
= sizeof(struct ether_header
) +
6577 (IP_VHL_HL(iphdr
.ip_vhl
) << 2);
6578 error
= mbuf_copydata(m
, offset
,
6579 sizeof(struct udphdr
), &udp
);
6581 BRIDGE_HF_DROP(brhf_ip_too_small
,
6582 __func__
, __LINE__
);
6586 * Either it's a Bootp/DHCP packet that we like or
6587 * it's a UDP packet from the host IP as source address
6589 if (udp
.uh_sport
== htons(IPPORT_BOOTPC
) &&
6590 udp
.uh_dport
== htons(IPPORT_BOOTPS
)) {
6591 minlen
+= sizeof(struct dhcp
);
6592 if (mbuf_pkthdr_len(m
) < minlen
) {
6593 BRIDGE_HF_DROP(brhf_ip_too_small
,
6594 __func__
, __LINE__
);
6597 offset
+= sizeof(struct udphdr
);
6598 error
= bridge_dhcp_filter(bif
, m
, offset
);
6602 } else if (iphdr
.ip_src
.s_addr
== INADDR_ANY
) {
6603 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
,
6604 __func__
, __LINE__
);
6607 } else if (iphdr
.ip_src
.s_addr
!= bif
->bif_hf_ipsrc
.s_addr
||
6608 bif
->bif_hf_ipsrc
.s_addr
== INADDR_ANY
) {
6609 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr
, __func__
, __LINE__
);
6613 * Allow only boring IP protocols
6615 if (iphdr
.ip_p
!= IPPROTO_TCP
&&
6616 iphdr
.ip_p
!= IPPROTO_UDP
&&
6617 iphdr
.ip_p
!= IPPROTO_ICMP
&&
6618 iphdr
.ip_p
!= IPPROTO_ESP
&&
6619 iphdr
.ip_p
!= IPPROTO_AH
&&
6620 iphdr
.ip_p
!= IPPROTO_GRE
) {
6621 BRIDGE_HF_DROP(brhf_ip_bad_proto
, __func__
, __LINE__
);
6624 bridge_hostfilter_stats
.brhf_ip_ok
+= 1;
6627 BRIDGE_HF_DROP(brhf_bad_ether_type
, __func__
, __LINE__
);
6632 if (if_bridge_debug
& BR_DBGF_HOSTFILTER
) {
6634 printf_mbuf_data(m
, 0,
6635 sizeof(struct ether_header
) +