]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/if_bridge.c
xnu-1699.26.8.tar.gz
[apple/xnu.git] / bsd / net / if_bridge.c
CommitLineData
6d2010ae
A
1/* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
2/*
3 * Copyright (c) 2004-2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29
30/*
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
50 * written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65/*
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
68 *
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
71 * are met:
72 * 1. Redistributions of source code must retain the above copyright
73 * notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 * notice, this list of conditions and the following disclaimer in the
76 * documentation and/or other materials provided with the distribution.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
89 *
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
91 */
92
93/*
94 * Network interface bridge support.
95 *
96 * TODO:
97 *
98 * - Currently only supports Ethernet-like interfaces (Ethernet,
99 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
100 * to bridge other types of interfaces (FDDI-FDDI, and maybe
101 * consider heterogenous bridges).
102 */
103
104#include <sys/cdefs.h>
105//__FBSDID("$FreeBSD$");
106
107//#include "opt_inet.h"
108//#include "opt_inet6.h"
109//#include "opt_carp.h"
110
111#define BRIDGE_DEBUG 1
112#ifndef BRIDGE_DEBUG
113#define BRIDGE_DEBUG 0
114#endif /* BRIDGE_DEBUG */
115
116#include <sys/param.h>
117#include <sys/mbuf.h>
118#include <sys/malloc.h>
119#include <sys/protosw.h>
120#include <sys/systm.h>
121#include <sys/time.h>
122#include <sys/socket.h> /* for net/if.h */
123#include <sys/sockio.h>
124//#include <sys/ctype.h> /* string functions */
125#include <sys/kernel.h>
126#include <sys/random.h>
127#include <sys/syslog.h>
128#include <sys/sysctl.h>
129//#include <vm/uma.h>
130//#include <sys/module.h>
131//#include <sys/priv.h>
132#include <sys/proc.h>
133#include <sys/lock.h>
134//#include <sys/mutex.h>
135#include <sys/mcache.h>
136
137#include <sys/kauth.h>
138
139#include <libkern/libkern.h>
140
141#include <kern/zalloc.h>
142
143#if NBPFILTER > 0
144#include <net/bpf.h>
145#endif
146#include <net/if.h>
147//#include <net/if_clone.h>
148#include <net/if_dl.h>
149#include <net/if_types.h>
150#include <net/if_var.h>
151//#include <net/pfil.h>
152
153#include <netinet/in.h> /* for struct arpcom */
154#include <netinet/in_systm.h>
155#include <netinet/in_var.h>
156#include <netinet/ip.h>
157#include <netinet/ip_var.h>
158#ifdef INET6
159#include <netinet/ip6.h>
160#include <netinet6/ip6_var.h>
161#endif
162#ifdef DEV_CARP
163#include <netinet/ip_carp.h>
164#endif
165//#include <machine/in_cksum.h>
166#include <netinet/if_ether.h> /* for struct arpcom */
167#include <net/bridgestp.h>
168#include <net/if_bridgevar.h>
169#include <net/if_llc.h>
170#include <net/if_vlan_var.h>
171
172#include <net/if_ether.h>
173#include <net/dlil.h>
174#include <net/kpi_interfacefilter.h>
175
176#include <net/route.h>
177#ifdef PFIL_HOOKS
178#include <netinet/ip_fw2.h>
179#include <netinet/ip_dummynet.h>
180#endif /* PFIL_HOOKS */
181
182#if BRIDGE_DEBUG
183
184#define BR_LCKDBG_MAX 4
185
186#define BRIDGE_LOCK(_sc) bridge_lock(_sc)
187#define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
188#define BRIDGE_LOCK_ASSERT(_sc) lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
189#define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
190#define BRIDGE_UNREF(_sc) bridge_unref(_sc)
191#define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
192#define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
193
194#else /* BRIDGE_DEBUG */
195
196#define BRIDGE_LOCK(_sc) lck_mtx_lock((_sc)->sc_mtx)
197#define BRIDGE_UNLOCK(_sc) lck_mtx_unlock((_sc)->sc_mtx)
198#define BRIDGE_LOCK_ASSERT(_sc) lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
199#define BRIDGE_LOCK2REF(_sc, _err) do { \
200 lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED); \
201 if ((_sc)->sc_iflist_xcnt > 0) \
202 (_err) = EBUSY; \
203 else \
204 (_sc)->sc_iflist_ref++; \
205 lck_mtx_unlock((_sc)->sc_mtx); \
206} while (0)
207#define BRIDGE_UNREF(_sc) do { \
208 lck_mtx_lock((_sc)->sc_mtx); \
209 (_sc)->sc_iflist_ref--; \
210 if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
211 lck_mtx_unlock((_sc)->sc_mtx); \
212 wakeup(&(_sc)->sc_cv); \
213 } else \
214 lck_mtx_unlock((_sc)->sc_mtx); \
215} while (0)
216#define BRIDGE_XLOCK(_sc) do { \
217 lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED); \
218 (_sc)->sc_iflist_xcnt++; \
219 while ((_sc)->sc_iflist_ref > 0) \
220 msleep(&(_sc)->sc_cv, (_sc)->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL); \
221} while (0)
222#define BRIDGE_XDROP(_sc) do { \
223 lck_mtx_assert((_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED); \
224 (_sc)->sc_iflist_xcnt--; \
225} while (0)
226
227#endif /* BRIDGE_DEBUG */
228
229#if NBPFILTER > 0
230#define BRIDGE_BPF_MTAP_INPUT(sc, m) \
231 if (sc->sc_bpf_input) \
232 bridge_bpf_input(sc->sc_ifp, m)
233#else /* NBPFILTER */
234#define BRIDGE_BPF_MTAP_INPUT(ifp, m)
235#endif /* NBPFILTER */
236
237/*
238 * Size of the route hash table. Must be a power of two.
239 */
240/* APPLE MODIFICATION - per Wasabi performance improvement, change the hash table size */
241#if 0
242#ifndef BRIDGE_RTHASH_SIZE
243#define BRIDGE_RTHASH_SIZE 1024
244#endif
245#else
246#ifndef BRIDGE_RTHASH_SIZE
247#define BRIDGE_RTHASH_SIZE 256
248#endif
249#endif
250
251/* APPLE MODIFICATION - support for HW checksums */
252#if APPLE_BRIDGE_HWCKSUM_SUPPORT
253#include <netinet/udp.h>
254#include <netinet/tcp.h>
255#endif
256
257#define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
258
259/*
260 * Maximum number of addresses to cache.
261 */
262#ifndef BRIDGE_RTABLE_MAX
263#define BRIDGE_RTABLE_MAX 100
264#endif
265
266
267/*
268 * Timeout (in seconds) for entries learned dynamically.
269 */
270#ifndef BRIDGE_RTABLE_TIMEOUT
271#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
272#endif
273
274/*
275 * Number of seconds between walks of the route list.
276 */
277#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
278#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
279#endif
280
281/*
282 * List of capabilities to possibly mask on the member interface.
283 */
284#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
285/*
286 * List of capabilities to disable on the member interface.
287 */
288#define BRIDGE_IFCAPS_STRIP IFCAP_LRO
289
290/*
291 * Bridge interface list entry.
292 */
293struct bridge_iflist {
294 TAILQ_ENTRY(bridge_iflist) bif_next;
295 struct ifnet *bif_ifp; /* member if */
296 struct bstp_port bif_stp; /* STP state */
297 uint32_t bif_flags; /* member if flags */
298 int bif_savedcaps; /* saved capabilities */
299 uint32_t bif_addrmax; /* max # of addresses */
300 uint32_t bif_addrcnt; /* cur. # of addresses */
301 uint32_t bif_addrexceeded;/* # of address violations */
302
303 interface_filter_t bif_iff_ref;
304 struct bridge_softc *bif_sc;
305 char bif_promisc; /* promiscuous mode set */
306 char bif_proto_attached; /* protocol attached */
307 char bif_filter_attached; /* interface filter attached */
308};
309
310/*
311 * Bridge route node.
312 */
313struct bridge_rtnode {
314 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
315 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
316 struct bridge_iflist *brt_dst; /* destination if */
317 unsigned long brt_expire; /* expiration time */
318 uint8_t brt_flags; /* address flags */
319 uint8_t brt_addr[ETHER_ADDR_LEN];
320 uint16_t brt_vlan; /* vlan id */
321
322};
323#define brt_ifp brt_dst->bif_ifp
324
325/*
326 * Software state for each bridge.
327 */
328struct bridge_softc {
329 struct ifnet *sc_ifp; /* make this an interface */
330 LIST_ENTRY(bridge_softc) sc_list;
331 lck_mtx_t *sc_mtx;
332 void *sc_cv;
333 uint32_t sc_brtmax; /* max # of addresses */
334 uint32_t sc_brtcnt; /* cur. # of addresses */
335 uint32_t sc_brttimeout; /* rt timeout in seconds */
336 uint32_t sc_iflist_ref; /* refcount for sc_iflist */
337 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
338 TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
339 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
340 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
341 uint32_t sc_rthash_key; /* key for hash */
342 TAILQ_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
343 struct bstp_state sc_stp; /* STP state */
344 uint32_t sc_brtexceeded; /* # of cache drops */
345 uint32_t sc_filter_flags; /* ipf and flags */
346
347 char sc_if_xname[IFNAMSIZ];
348 bpf_packet_func sc_bpf_input;
349 bpf_packet_func sc_bpf_output;
350 u_int32_t sc_flags;
351
352#if BRIDGE_DEBUG
353 void *lock_lr[BR_LCKDBG_MAX]; /* locking calling history */
354 int next_lock_lr;
355 void *unlock_lr[BR_LCKDBG_MAX]; /* unlocking caller history */
356 int next_unlock_lr;
357#endif /* BRIDGE_DEBUG */
358};
359
360#define SCF_DETACHING 0x1
361
362static lck_mtx_t *bridge_list_mtx;
363//eventhandler_tag bridge_detach_cookie = NULL;
364
365int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
366
367static zone_t bridge_rtnode_pool = NULL;
368
369static int bridge_clone_create(struct if_clone *, uint32_t, void *);
370static int bridge_clone_destroy(struct ifnet *);
371
372static errno_t bridge_ioctl(struct ifnet *, u_long, void *);
373#if HAS_IF_CAP
374static void bridge_mutecaps(struct bridge_softc *);
375static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
376 int);
377#endif
378__private_extern__ void bridge_ifdetach(struct bridge_iflist *, struct ifnet *);
379static int bridge_init(struct ifnet *);
380#if HAS_BRIDGE_DUMMYNET
381static void bridge_dummynet(struct mbuf *, struct ifnet *);
382#endif
383static void bridge_stop(struct ifnet *, int);
384static errno_t bridge_start(struct ifnet *, struct mbuf *);
385__private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *);
386#if BRIDGE_MEMBER_OUT_FILTER
387static errno_t bridge_iff_output(void *, ifnet_t , protocol_family_t , mbuf_t *);
388static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
389 struct rtentry *);
390#endif
391static void bridge_enqueue(struct bridge_softc *, struct ifnet *,
392 struct mbuf *);
393static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
394
395static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
396 struct mbuf *m);
397
398static void bridge_timer(void *);
399
400static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
401 struct mbuf *, int);
402static void bridge_span(struct bridge_softc *, struct mbuf *);
403
404static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
405 uint16_t, struct bridge_iflist *, int, uint8_t);
406static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
407 uint16_t);
408static void bridge_rttrim(struct bridge_softc *);
409static void bridge_rtage(struct bridge_softc *);
410static void bridge_rtflush(struct bridge_softc *, int);
411static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
412 uint16_t);
413
414static int bridge_rtable_init(struct bridge_softc *);
415static void bridge_rtable_fini(struct bridge_softc *);
416
417static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
418static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
419 const uint8_t *, uint16_t);
420static int bridge_rtnode_insert(struct bridge_softc *,
421 struct bridge_rtnode *);
422static void bridge_rtnode_destroy(struct bridge_softc *,
423 struct bridge_rtnode *);
424static void bridge_rtable_expire(struct ifnet *, int);
425static void bridge_state_change(struct ifnet *, int);
426
427static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
428 const char *name);
429static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
430 struct ifnet *ifp);
431static void bridge_delete_member(struct bridge_softc *,
432 struct bridge_iflist *, int);
433static void bridge_delete_span(struct bridge_softc *,
434 struct bridge_iflist *);
435
436static int bridge_ioctl_add(struct bridge_softc *, void *);
437static int bridge_ioctl_del(struct bridge_softc *, void *);
438static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
439static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
440static int bridge_ioctl_scache(struct bridge_softc *, void *);
441static int bridge_ioctl_gcache(struct bridge_softc *, void *);
442static int bridge_ioctl_gifs32(struct bridge_softc *, void *);
443static int bridge_ioctl_gifs64(struct bridge_softc *, void *);
444static int bridge_ioctl_rts32(struct bridge_softc *, void *);
445static int bridge_ioctl_rts64(struct bridge_softc *, void *);
446static int bridge_ioctl_saddr32(struct bridge_softc *, void *);
447static int bridge_ioctl_saddr64(struct bridge_softc *, void *);
448static int bridge_ioctl_sto(struct bridge_softc *, void *);
449static int bridge_ioctl_gto(struct bridge_softc *, void *);
450static int bridge_ioctl_daddr32(struct bridge_softc *, void *);
451static int bridge_ioctl_daddr64(struct bridge_softc *, void *);
452static int bridge_ioctl_flush(struct bridge_softc *, void *);
453static int bridge_ioctl_gpri(struct bridge_softc *, void *);
454static int bridge_ioctl_spri(struct bridge_softc *, void *);
455static int bridge_ioctl_ght(struct bridge_softc *, void *);
456static int bridge_ioctl_sht(struct bridge_softc *, void *);
457static int bridge_ioctl_gfd(struct bridge_softc *, void *);
458static int bridge_ioctl_sfd(struct bridge_softc *, void *);
459static int bridge_ioctl_gma(struct bridge_softc *, void *);
460static int bridge_ioctl_sma(struct bridge_softc *, void *);
461static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
462static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
463static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
464static int bridge_ioctl_addspan(struct bridge_softc *, void *);
465static int bridge_ioctl_delspan(struct bridge_softc *, void *);
466static int bridge_ioctl_gbparam32(struct bridge_softc *, void *);
467static int bridge_ioctl_gbparam64(struct bridge_softc *, void *);
468static int bridge_ioctl_grte(struct bridge_softc *, void *);
469static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
470static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
471static int bridge_ioctl_sproto(struct bridge_softc *, void *);
472static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
473static int bridge_ioctl_purge(struct bridge_softc *sc, void *arg);
474static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
475static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
476#ifdef PFIL_HOOKS
477static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
478 int);
479static int bridge_ip_checkbasic(struct mbuf **mp);
480#ifdef INET6
481static int bridge_ip6_checkbasic(struct mbuf **mp);
482#endif /* INET6 */
483static int bridge_fragment(struct ifnet *, struct mbuf *,
484 struct ether_header *, int, struct llc *);
485#endif /* PFIL_HOOKS */
486
487static errno_t bridge_set_bpf_tap(ifnet_t ifn, bpf_tap_mode mode, bpf_packet_func bpf_callback);
488__private_extern__ errno_t bridge_bpf_input(ifnet_t ifp, struct mbuf *m);
489__private_extern__ errno_t bridge_bpf_output(ifnet_t ifp, struct mbuf *m);
490
491static void bridge_detach(ifnet_t ifp);
492
493#define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
494
495/* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
496#define VLANTAGOF(_m) 0
497
498static struct bstp_cb_ops bridge_ops = {
499 .bcb_state = bridge_state_change,
500 .bcb_rtage = bridge_rtable_expire
501};
502
503SYSCTL_DECL(_net_link);
504SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
505
506#if defined(PFIL_HOOKS)
507static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
508static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
509static int pfil_member = 1; /* run pfil hooks on the member interface */
510static int pfil_ipfw = 0; /* layer2 filter with ipfw */
511static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */
512static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for
513 locally destined packets */
514SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
515 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
516SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW,
517 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
518SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
519 &pfil_bridge, 0, "Packet filter on the bridge interface");
520SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
521 &pfil_member, 0, "Packet filter on the member interface");
522SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW,
523 &pfil_local_phys, 0,
524 "Packet filter on the physical interface for locally destined packets");
525#endif /* PFIL_HOOKS */
526
527static int log_stp = 0; /* log STP state changes */
528SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
529 &log_stp, 0, "Log STP state changes");
530
531struct bridge_control {
532 int (*bc_func)(struct bridge_softc *, void *);
533 unsigned int bc_argsize;
534 unsigned int bc_flags;
535};
536
537#define BC_F_COPYIN 0x01 /* copy arguments in */
538#define BC_F_COPYOUT 0x02 /* copy arguments out */
539#define BC_F_SUSER 0x04 /* do super-user check */
540
541static const struct bridge_control bridge_control_table32[] = {
542 { bridge_ioctl_add, sizeof(struct ifbreq),
543 BC_F_COPYIN|BC_F_SUSER },
544 { bridge_ioctl_del, sizeof(struct ifbreq),
545 BC_F_COPYIN|BC_F_SUSER },
546
547 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
548 BC_F_COPYIN|BC_F_COPYOUT },
549 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
550 BC_F_COPYIN|BC_F_SUSER },
551
552 { bridge_ioctl_scache, sizeof(struct ifbrparam),
553 BC_F_COPYIN|BC_F_SUSER },
554 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
555 BC_F_COPYOUT },
556
557 { bridge_ioctl_gifs32, sizeof(struct ifbifconf32),
558 BC_F_COPYIN|BC_F_COPYOUT },
559 { bridge_ioctl_rts32, sizeof(struct ifbaconf32),
560 BC_F_COPYIN|BC_F_COPYOUT },
561
562 { bridge_ioctl_saddr32, sizeof(struct ifbareq32),
563 BC_F_COPYIN|BC_F_SUSER },
564
565 { bridge_ioctl_sto, sizeof(struct ifbrparam),
566 BC_F_COPYIN|BC_F_SUSER },
567 { bridge_ioctl_gto, sizeof(struct ifbrparam),
568 BC_F_COPYOUT },
569
570 { bridge_ioctl_daddr32, sizeof(struct ifbareq32),
571 BC_F_COPYIN|BC_F_SUSER },
572
573 { bridge_ioctl_flush, sizeof(struct ifbreq),
574 BC_F_COPYIN|BC_F_SUSER },
575
576 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
577 BC_F_COPYOUT },
578 { bridge_ioctl_spri, sizeof(struct ifbrparam),
579 BC_F_COPYIN|BC_F_SUSER },
580
581 { bridge_ioctl_ght, sizeof(struct ifbrparam),
582 BC_F_COPYOUT },
583 { bridge_ioctl_sht, sizeof(struct ifbrparam),
584 BC_F_COPYIN|BC_F_SUSER },
585
586 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
587 BC_F_COPYOUT },
588 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
589 BC_F_COPYIN|BC_F_SUSER },
590
591 { bridge_ioctl_gma, sizeof(struct ifbrparam),
592 BC_F_COPYOUT },
593 { bridge_ioctl_sma, sizeof(struct ifbrparam),
594 BC_F_COPYIN|BC_F_SUSER },
595
596 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
597 BC_F_COPYIN|BC_F_SUSER },
598
599 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
600 BC_F_COPYIN|BC_F_SUSER },
601
602 { bridge_ioctl_gfilt, sizeof(struct ifbrparam),
603 BC_F_COPYOUT },
604 { bridge_ioctl_sfilt, sizeof(struct ifbrparam),
605 BC_F_COPYIN|BC_F_SUSER },
606
607 { bridge_ioctl_purge, sizeof(struct ifbreq),
608 BC_F_COPYIN|BC_F_SUSER },
609
610 { bridge_ioctl_addspan, sizeof(struct ifbreq),
611 BC_F_COPYIN|BC_F_SUSER },
612 { bridge_ioctl_delspan, sizeof(struct ifbreq),
613 BC_F_COPYIN|BC_F_SUSER },
614
615 { bridge_ioctl_gbparam32, sizeof(struct ifbropreq32),
616 BC_F_COPYOUT },
617
618 { bridge_ioctl_grte, sizeof(struct ifbrparam),
619 BC_F_COPYOUT },
620
621 { bridge_ioctl_gifsstp32, sizeof(struct ifbpstpconf32),
622 BC_F_COPYIN|BC_F_COPYOUT },
623
624 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
625 BC_F_COPYIN|BC_F_SUSER },
626
627 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
628 BC_F_COPYIN|BC_F_SUSER },
629
630 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
631 BC_F_COPYIN|BC_F_SUSER },
632};
633
634static const struct bridge_control bridge_control_table64[] = {
635 { bridge_ioctl_add, sizeof(struct ifbreq),
636 BC_F_COPYIN|BC_F_SUSER },
637 { bridge_ioctl_del, sizeof(struct ifbreq),
638 BC_F_COPYIN|BC_F_SUSER },
639
640 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
641 BC_F_COPYIN|BC_F_COPYOUT },
642 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
643 BC_F_COPYIN|BC_F_SUSER },
644
645 { bridge_ioctl_scache, sizeof(struct ifbrparam),
646 BC_F_COPYIN|BC_F_SUSER },
647 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
648 BC_F_COPYOUT },
649
650 { bridge_ioctl_gifs64, sizeof(struct ifbifconf64),
651 BC_F_COPYIN|BC_F_COPYOUT },
652 { bridge_ioctl_rts64, sizeof(struct ifbaconf64),
653 BC_F_COPYIN|BC_F_COPYOUT },
654
655 { bridge_ioctl_saddr64, sizeof(struct ifbareq64),
656 BC_F_COPYIN|BC_F_SUSER },
657
658 { bridge_ioctl_sto, sizeof(struct ifbrparam),
659 BC_F_COPYIN|BC_F_SUSER },
660 { bridge_ioctl_gto, sizeof(struct ifbrparam),
661 BC_F_COPYOUT },
662
663 { bridge_ioctl_daddr64, sizeof(struct ifbareq64),
664 BC_F_COPYIN|BC_F_SUSER },
665
666 { bridge_ioctl_flush, sizeof(struct ifbreq),
667 BC_F_COPYIN|BC_F_SUSER },
668
669 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
670 BC_F_COPYOUT },
671 { bridge_ioctl_spri, sizeof(struct ifbrparam),
672 BC_F_COPYIN|BC_F_SUSER },
673
674 { bridge_ioctl_ght, sizeof(struct ifbrparam),
675 BC_F_COPYOUT },
676 { bridge_ioctl_sht, sizeof(struct ifbrparam),
677 BC_F_COPYIN|BC_F_SUSER },
678
679 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
680 BC_F_COPYOUT },
681 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
682 BC_F_COPYIN|BC_F_SUSER },
683
684 { bridge_ioctl_gma, sizeof(struct ifbrparam),
685 BC_F_COPYOUT },
686 { bridge_ioctl_sma, sizeof(struct ifbrparam),
687 BC_F_COPYIN|BC_F_SUSER },
688
689 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
690 BC_F_COPYIN|BC_F_SUSER },
691
692 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
693 BC_F_COPYIN|BC_F_SUSER },
694
695 { bridge_ioctl_gfilt, sizeof(struct ifbrparam),
696 BC_F_COPYOUT },
697 { bridge_ioctl_sfilt, sizeof(struct ifbrparam),
698 BC_F_COPYIN|BC_F_SUSER },
699
700 { bridge_ioctl_purge, sizeof(struct ifbreq),
701 BC_F_COPYIN|BC_F_SUSER },
702
703 { bridge_ioctl_addspan, sizeof(struct ifbreq),
704 BC_F_COPYIN|BC_F_SUSER },
705 { bridge_ioctl_delspan, sizeof(struct ifbreq),
706 BC_F_COPYIN|BC_F_SUSER },
707
708 { bridge_ioctl_gbparam64, sizeof(struct ifbropreq64),
709 BC_F_COPYOUT },
710
711 { bridge_ioctl_grte, sizeof(struct ifbrparam),
712 BC_F_COPYOUT },
713
714 { bridge_ioctl_gifsstp64, sizeof(struct ifbpstpconf64),
715 BC_F_COPYIN|BC_F_COPYOUT },
716
717 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
718 BC_F_COPYIN|BC_F_SUSER },
719
720 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
721 BC_F_COPYIN|BC_F_SUSER },
722
723 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
724 BC_F_COPYIN|BC_F_SUSER },
725};
726
727static const unsigned int bridge_control_table_size =
728sizeof(bridge_control_table32) / sizeof(bridge_control_table32[0]);
729
730static LIST_HEAD(, bridge_softc) bridge_list = LIST_HEAD_INITIALIZER(bridge_list);
731
732static lck_grp_t *bridge_lock_grp = NULL;
733static lck_attr_t *bridge_lock_attr = NULL;
734
735static if_clone_t bridge_cloner = NULL;
736
737__private_extern__ int _if_brige_debug = 0;
738
739SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW,
740 &_if_brige_debug, 0, "Bridge debug");
741
742#if BRIDGE_DEBUG
743
744static void printf_ether_header(struct ether_header *eh);
745static void printf_mbuf_data(mbuf_t m, size_t offset, size_t len);
746static void printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix);
747static void printf_mbuf(mbuf_t m, const char *prefix, const char *suffix);
748static void link_print(struct sockaddr_dl * dl_p);
749
750static void bridge_lock(struct bridge_softc *);
751static void bridge_unlock(struct bridge_softc *);
752static int bridge_lock2ref(struct bridge_softc *);
753static void bridge_unref(struct bridge_softc *);
754static void bridge_xlock(struct bridge_softc *);
755static void bridge_xdrop(struct bridge_softc *);
756
757static void bridge_lock(struct bridge_softc *sc)
758{
759 void *lr_saved = __builtin_return_address(0);
760
761 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
762
763 lck_mtx_lock(sc->sc_mtx);
764
765 sc->lock_lr[sc->next_lock_lr] = lr_saved;
766 sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
767}
768
769static void bridge_unlock(struct bridge_softc *sc)
770{
771 void *lr_saved = __builtin_return_address(0);
772
773 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
774
775 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
776 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
777
778 lck_mtx_unlock(sc->sc_mtx);
779}
780
781static int bridge_lock2ref(struct bridge_softc *sc)
782{
783 int error = 0;
784 void *lr_saved = __builtin_return_address(0);
785
786 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
787
788 if (sc->sc_iflist_xcnt > 0)
789 error = EBUSY;
790 else
791 sc->sc_iflist_ref++;
792
793 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
794 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
795 lck_mtx_unlock(sc->sc_mtx);
796
797 return error;
798}
799
800static void bridge_unref(struct bridge_softc *sc)
801{
802 void *lr_saved = __builtin_return_address(0);
803
804 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
805
806 lck_mtx_lock(sc->sc_mtx);
807 sc->lock_lr[sc->next_lock_lr] = lr_saved;
808 sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
809
810 sc->sc_iflist_ref--;
811
812 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
813 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
814 if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) {
815 lck_mtx_unlock(sc->sc_mtx);
816 wakeup(&sc->sc_cv);
817 } else
818 lck_mtx_unlock(sc->sc_mtx);
819}
820
821static void bridge_xlock(struct bridge_softc *sc)
822{
823 void *lr_saved = __builtin_return_address(0);
824
825 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
826
827 sc->sc_iflist_xcnt++;
828 while (sc->sc_iflist_ref > 0) {
829 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
830 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
831
832 msleep(&sc->sc_cv, sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL);
833
834 sc->lock_lr[sc->next_lock_lr] = lr_saved;
835 sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
836 }
837}
838
839static void bridge_xdrop(struct bridge_softc *sc)
840{
841 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
842
843 sc->sc_iflist_xcnt--;
844}
845
846void
847printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
848{
849 if (m)
850 printf("%spktlen: %u rcvif: %p header: %p nextpkt: %p%s",
851 prefix ? prefix : "",
852 (unsigned int)mbuf_pkthdr_len(m), mbuf_pkthdr_rcvif(m), mbuf_pkthdr_header(m), mbuf_nextpkt(m),
853 suffix ? suffix : "");
854 else
855 printf("%s<NULL>%s\n", prefix, suffix);
856}
857
858void
859printf_mbuf(mbuf_t m, const char *prefix, const char *suffix)
860{
861 if (m) {
862 printf("%s%p type: %u flags: 0x%x len: %u data: %p maxlen: %u datastart: %p next: %p%s",
863 prefix ? prefix : "",
864 m, mbuf_type(m), mbuf_flags(m), (unsigned int)mbuf_len(m), mbuf_data(m),
865 (unsigned int)mbuf_maxlen(m), mbuf_datastart(m), mbuf_next(m),
866 !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
867 if ((mbuf_flags(m) & MBUF_PKTHDR))
868 printf_mbuf_pkthdr(m, " ", suffix);
869 } else
870 printf("%s<NULL>%s\n", prefix, suffix);
871}
872
873void
874printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
875{
876 mbuf_t n;
877 size_t i, j;
878 size_t pktlen, mlen, maxlen;
879 unsigned char *ptr;
880
881 pktlen = mbuf_pkthdr_len(m);
882
883 if (offset > pktlen)
884 return;
885
886 maxlen = (pktlen - offset > len) ? len : pktlen;
887 n = m;
888 mlen = mbuf_len(n);
889 ptr = mbuf_data(n);
890 for (i = 0, j = 0; i < maxlen; i++, j++) {
891 if (j >= mlen) {
892 n = mbuf_next(n);
893 if (n == 0)
894 break;
895 ptr = mbuf_data(n);
896 mlen = mbuf_len(n);
897 j = 0;
898 }
899 if (i >= offset) {
900 printf("%02x%s", ptr[j], i % 2 ? " " : "");
901 }
902 }
903 return;
904}
905
906static void
907printf_ether_header(struct ether_header *eh)
908{
909 printf("%02x:%02x:%02x:%02x:%02x:%02x > %02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
910 eh->ether_shost[0], eh->ether_shost[1], eh->ether_shost[2],
911 eh->ether_shost[3], eh->ether_shost[4], eh->ether_shost[5],
912 eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
913 eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5],
914 eh->ether_type);
915}
916
917static void
918link_print(struct sockaddr_dl * dl_p)
919{
920 int i;
921
922#if 1
923 printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
924 " slen %d addr ", dl_p->sdl_len,
925 dl_p->sdl_index, dl_p->sdl_family, dl_p->sdl_type,
926 dl_p->sdl_nlen, dl_p->sdl_alen, dl_p->sdl_slen);
927#endif
928 for (i = 0; i < dl_p->sdl_alen; i++)
929 printf("%s%x", i ? ":" : "",
930 (CONST_LLADDR(dl_p))[i]);
931 printf("\n");
932 return;
933}
934
935#endif /* BRIDGE_DEBUG */
936
937/*
938 * bridgeattach:
939 *
940 * Pseudo-device attach routine.
941 */
942__private_extern__ int
943bridgeattach(__unused int n)
944{
945 int error;
946 lck_grp_attr_t *lck_grp_attr = NULL;
947 struct ifnet_clone_params ifnet_clone_params;
948
949 bridge_rtnode_pool = zinit(sizeof(struct bridge_rtnode), 1024 * sizeof(struct bridge_rtnode),
950 0, "bridge_rtnode");
951 zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE);
952
953 lck_grp_attr = lck_grp_attr_alloc_init();
954
955 bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr);
956
957 bridge_lock_attr = lck_attr_alloc_init();
958
959#if BRIDGE_DEBUG
960 lck_attr_setdebug(bridge_lock_attr);
961#endif
962
963 bridge_list_mtx = lck_mtx_alloc_init(bridge_lock_grp, bridge_lock_attr);
964
965 // can free the attributes once we've allocated the group lock
966 lck_grp_attr_free(lck_grp_attr);
967
968 LIST_INIT(&bridge_list);
969
970 bstp_sys_init();
971
972 ifnet_clone_params.ifc_name = "bridge";
973 ifnet_clone_params.ifc_create = bridge_clone_create;
974 ifnet_clone_params.ifc_destroy = bridge_clone_destroy;
975
976 error = ifnet_clone_attach(&ifnet_clone_params, &bridge_cloner);
977 if (error != 0)
978 printf("bridgeattach: ifnet_clone_attach failed %d\n", error);
979
980 return error;
981}
982
983#if defined(PFIL_HOOKS)
984/*
985 * handler for net.link.bridge.pfil_ipfw
986 */
987static int
988sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
989{
990#pragma unused(arg1,arg2)
991 int enable = pfil_ipfw;
992 int error;
993
994 error = sysctl_handle_int(oidp, &enable, 0, req);
995 enable = (enable) ? 1 : 0;
996
997 if (enable != pfil_ipfw) {
998 pfil_ipfw = enable;
999
1000 /*
1001 * Disable pfil so that ipfw doesnt run twice, if the user
1002 * really wants both then they can re-enable pfil_bridge and/or
1003 * pfil_member. Also allow non-ip packets as ipfw can filter by
1004 * layer2 type.
1005 */
1006 if (pfil_ipfw) {
1007 pfil_onlyip = 0;
1008 pfil_bridge = 0;
1009 pfil_member = 0;
1010 }
1011 }
1012
1013 return (error);
1014}
1015SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
1016 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
1017#endif /* PFIL_HOOKS */
1018
1019/*
1020 * bridge_clone_create:
1021 *
1022 * Create a new bridge instance.
1023 */
1024static int
1025bridge_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params)
1026{
1027 struct ifnet *ifp = NULL;
1028 struct bridge_softc *sc;
1029 u_char eaddr[6];
1030 struct ifnet_init_params init_params;
1031 errno_t error = 0;
1032 uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) + IFNAMSIZ + ETHER_ADDR_LEN];
1033 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
1034
1035 sc = _MALLOC(sizeof(*sc), M_DEVBUF, M_WAITOK);
1036 memset(sc, 0, sizeof(*sc));
1037
1038 sc->sc_mtx = lck_mtx_alloc_init(bridge_lock_grp, bridge_lock_attr);
1039 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
1040 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
1041 sc->sc_filter_flags = IFBF_FILT_DEFAULT;
1042#ifndef BRIDGE_IPF
1043 /*
1044 * For backwards compatibility with previous behaviour...
1045 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1046 * not defined.
1047 */
1048 sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
1049#endif
1050
1051 /* Initialize our routing table. */
1052 error = bridge_rtable_init(sc);
1053 if (error != 0) {
1054 printf("bridge_clone_create: bridge_rtable_init failed %d\n", error);
1055 goto done;
1056 }
1057
1058 TAILQ_INIT(&sc->sc_iflist);
1059 TAILQ_INIT(&sc->sc_spanlist);
1060
1061 /* use the interface name as the unique id for ifp recycle */
1062 snprintf(sc->sc_if_xname, sizeof(sc->sc_if_xname), "%s%d",
1063 ifc->ifc_name, unit);
1064 memset(&init_params, 0, sizeof(struct ifnet_init_params));
1065 init_params.uniqueid = sc->sc_if_xname;
1066 init_params.uniqueid_len = strlen(sc->sc_if_xname);
1067 init_params.name = ifc->ifc_name;
1068 init_params.unit = unit;
1069 init_params.family = IFNET_FAMILY_ETHERNET;
1070 init_params.type = IFT_BRIDGE;
1071 init_params.output = bridge_start;
1072 init_params.demux = ether_demux;
1073 init_params.add_proto = ether_add_proto;
1074 init_params.del_proto = ether_del_proto;
1075 init_params.check_multi = ether_check_multi;
1076 init_params.framer = ether_frameout;
1077 init_params.softc = sc;
1078 init_params.ioctl = bridge_ioctl;
1079 init_params.set_bpf_tap = bridge_set_bpf_tap;
1080 init_params.detach = bridge_detach;
1081 init_params.broadcast_addr = etherbroadcastaddr;
1082 init_params.broadcast_len = ETHER_ADDR_LEN;
1083 error = ifnet_allocate(&init_params, &ifp);
1084 if (error != 0) {
1085 printf("bridge_clone_create: ifnet_allocate failed %d\n", error);
1086 goto done;
1087 }
1088 sc->sc_ifp = ifp;
1089
1090 error = ifnet_set_mtu(ifp, ETHERMTU);
1091 if (error != 0) {
1092 printf("bridge_clone_create: ifnet_set_mtu failed %d\n", error);
1093 goto done;
1094 }
1095 error = ifnet_set_addrlen(ifp, ETHER_ADDR_LEN);
1096 if (error != 0) {
1097 printf("bridge_clone_create: ifnet_set_addrlen failed %d\n", error);
1098 goto done;
1099 }
1100 error = ifnet_set_baudrate(ifp, 10000000) ; // XXX: this is what IONetworking does
1101 if (error != 0) {
1102 printf("bridge_clone_create: ifnet_set_baudrate failed %d\n", error);
1103 goto done;
1104 }
1105 error = ifnet_set_hdrlen(ifp, ETHER_HDR_LEN);
1106 if (error != 0) {
1107 printf("bridge_clone_create: ifnet_set_hdrlen failed %d\n", error);
1108 goto done;
1109 }
1110 error = ifnet_set_flags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST,
1111 0xffff);
1112 if (error != 0) {
1113 printf("bridge_clone_create: ifnet_set_flags failed %d\n", error);
1114 goto done;
1115 }
1116
1117#if 0
1118 /*
1119 * Generate a random ethernet address with a locally administered
1120 * address.
1121 *
1122 * Since we are using random ethernet addresses for the bridge, it is
1123 * possible that we might have address collisions, so make sure that
1124 * this hardware address isn't already in use on another bridge.
1125 */
1126 {
1127 int retry;
1128
1129 for (retry = 1; retry != 0;) {
1130 struct ifnet *bifp;
1131 struct bridge_softc *sc2;
1132
1133 read_random(eaddr, ETHER_ADDR_LEN);
1134 eaddr[0] &= ~1; /* clear multicast bit */
1135 eaddr[0] |= 2; /* set the LAA bit */
1136 retry = 0;
1137 lck_mtx_lock(bridge_list_mtx);
1138 LIST_FOREACH(sc2, &bridge_list, sc_list) {
1139 bifp = sc2->sc_ifp;
1140 if (memcmp(eaddr, ifnet_lladdr(bifp), ETHER_ADDR_LEN) == 0)
1141 retry = 1;
1142 }
1143 lck_mtx_unlock(bridge_list_mtx);
1144 }
1145 }
1146#else
1147 /*
1148 * Generate a random ethernet address and use the private AC:DE:48
1149 * OUI code.
1150 */
1151 {
1152 uint32_t r;
1153
1154 read_random(&r, sizeof(r));
1155 eaddr[0] = 0xAC;
1156 eaddr[1] = 0xDE;
1157 eaddr[2] = 0x48;
1158 eaddr[3] = (r >> 0) & 0xffu;
1159 eaddr[4] = (r >> 8) & 0xffu;
1160 eaddr[5] = (r >> 16) & 0xffu;
1161 }
1162#endif
1163
1164 memset(sdl, 0, sizeof(sdl_buffer));
1165 sdl->sdl_family = AF_LINK;
1166 sdl->sdl_nlen = strlen(sc->sc_if_xname);
1167 sdl->sdl_alen = ETHER_ADDR_LEN;
1168 sdl->sdl_len = offsetof(struct sockaddr_dl, sdl_data);
1169 memcpy(sdl->sdl_data, sc->sc_if_xname, sdl->sdl_nlen);
1170 memcpy(LLADDR(sdl), eaddr, ETHER_ADDR_LEN);
1171
1172#if BRIDGE_DEBUG
1173 link_print(sdl);
1174#endif
1175
1176 error = ifnet_attach(ifp, NULL);
1177 if (error != 0) {
1178 printf("bridge_clone_create: ifnet_attach failed %d\n", error);
1179 goto done;
1180 }
1181
1182 error = ifnet_set_lladdr_and_type(ifp, eaddr, ETHER_ADDR_LEN, IFT_ETHER);
1183 if (error != 0) {
1184 printf("bridge_clone_create: ifnet_set_lladdr_and_type failed %d\n", error);
1185 goto done;
1186 }
1187
1188#if APPLE_BRIDGE_HWCKSUM_SUPPORT
1189 /*
1190 * APPLE MODIFICATION - our bridge can support HW checksums
1191 * (useful if underlying interfaces support them) on TX,
1192 * RX is not that interesting, since the stack just looks to
1193 * see if the packet has been checksummed already (I think)
1194 * but we might as well indicate we support it
1195 */
1196 ifp->if_capabilities =
1197 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx |
1198 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx ;
1199#endif
1200
1201 bstp_attach(&sc->sc_stp, &bridge_ops);
1202
1203 lck_mtx_lock(bridge_list_mtx);
1204 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
1205 lck_mtx_unlock(bridge_list_mtx);
1206
1207 /* attach as ethernet */
1208 error = bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header), NULL, NULL);
1209
1210done:
1211 if (error != 0) {
1212 printf("bridge_clone_create failed error %d\n", error);
1213 /* Cleanup TBD */
1214 }
1215
1216 return error;
1217}
1218
1219/*
1220 * bridge_clone_destroy:
1221 *
1222 * Destroy a bridge instance.
1223 */
1224static int
1225bridge_clone_destroy(struct ifnet *ifp)
1226{
1227 struct bridge_softc *sc = ifp->if_softc;
1228 struct bridge_iflist *bif;
1229 errno_t error;
1230
1231 BRIDGE_LOCK(sc);
1232 if ((sc->sc_flags & SCF_DETACHING)) {
1233 BRIDGE_UNLOCK(sc);
1234 return 0;
1235 }
1236 sc->sc_flags |= SCF_DETACHING;
1237
1238 bridge_stop(ifp, 1);
1239
1240 error = ifnet_set_flags(ifp, 0, IFF_UP);
1241 if (error != 0) {
1242 printf("bridge_clone_destroy: ifnet_set_flags failed %d\n", error);
1243 }
1244
1245 while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL)
1246 bridge_delete_member(sc, bif, 0);
1247
1248 while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
1249 bridge_delete_span(sc, bif);
1250 }
1251
1252 BRIDGE_UNLOCK(sc);
1253
1254 error = ifnet_detach(ifp);
1255 if (error != 0) {
1256 panic("bridge_clone_destroy: ifnet_detach(%p) failed %d\n", ifp, error);
1257 if ((sc = (struct bridge_softc *)ifnet_softc(ifp)) != NULL) {
1258 BRIDGE_LOCK(sc);
1259 sc->sc_flags &= ~SCF_DETACHING;
1260 BRIDGE_UNLOCK(sc);
1261 }
1262 return 0;
1263 }
1264
1265 return 0;
1266}
1267
1268#define DRVSPEC do { \
1269 if (ifd->ifd_cmd >= bridge_control_table_size) { \
1270 error = EINVAL; \
1271 break; \
1272 } \
1273 bc = &bridge_control_table[ifd->ifd_cmd]; \
1274 \
1275 if (cmd == SIOCGDRVSPEC && \
1276 (bc->bc_flags & BC_F_COPYOUT) == 0) { \
1277 error = EINVAL; \
1278 break; \
1279 } \
1280 else if (cmd == SIOCSDRVSPEC && \
1281 (bc->bc_flags & BC_F_COPYOUT) != 0) { \
1282 error = EINVAL; \
1283 break; \
1284 } \
1285 \
1286 if (bc->bc_flags & BC_F_SUSER) { \
1287 error = kauth_authorize_generic(kauth_cred_get(), KAUTH_GENERIC_ISSUSER); \
1288 if (error) \
1289 break; \
1290 } \
1291 \
1292 if (ifd->ifd_len != bc->bc_argsize || \
1293 ifd->ifd_len > sizeof(args)) { \
1294 error = EINVAL; \
1295 break; \
1296 } \
1297 \
1298 bzero(&args, sizeof(args)); \
1299 if (bc->bc_flags & BC_F_COPYIN) { \
1300 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
1301 if (error) \
1302 break; \
1303 } \
1304 \
1305 BRIDGE_LOCK(sc); \
1306 error = (*bc->bc_func)(sc, &args); \
1307 BRIDGE_UNLOCK(sc); \
1308 if (error) \
1309 break; \
1310 \
1311 if (bc->bc_flags & BC_F_COPYOUT) \
1312 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
1313} while (0)
1314
1315
1316/*
1317 * bridge_ioctl:
1318 *
1319 * Handle a control request from the operator.
1320 */
1321static errno_t
1322bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1323{
1324 struct bridge_softc *sc = ifp->if_softc;
1325 struct ifreq *ifr = (struct ifreq *) data;
1326 int error = 0;
1327
1328 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1329
1330#if BRIDGE_DEBUG
1331 if (_if_brige_debug)
1332 printf("bridge_ioctl: ifp %p cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1333 ifp,
1334 cmd,
1335 (cmd & IOC_IN) ? 'I' : ' ',
1336 (cmd & IOC_OUT) ? 'O' : ' ',
1337 IOCPARM_LEN(cmd),
1338 (char)IOCGROUP(cmd),
1339 cmd & 0xff);
1340#endif
1341
1342 switch (cmd) {
1343
1344 case SIOCSIFADDR:
1345 case SIOCAIFADDR:
1346 ifnet_set_flags(ifp, IFF_UP, IFF_UP);
1347 break;
1348
1349 case SIOCGIFMEDIA32:
1350 case SIOCGIFMEDIA64:
1351 error = EINVAL;
1352 break;
1353
1354 case SIOCADDMULTI:
1355 case SIOCDELMULTI:
1356 break;
1357
1358 case SIOCSDRVSPEC32:
1359 case SIOCGDRVSPEC32: {
1360 union {
1361 struct ifbreq ifbreq;
1362 struct ifbifconf32 ifbifconf;
1363 struct ifbareq32 ifbareq;
1364 struct ifbaconf32 ifbaconf;
1365 struct ifbrparam ifbrparam;
1366 struct ifbropreq32 ifbropreq;
1367 } args;
1368 struct ifdrv32 *ifd = (struct ifdrv32 *) data;
1369 const struct bridge_control *bridge_control_table = bridge_control_table32, *bc;
1370
1371 DRVSPEC;
1372
1373 break;
1374 }
1375 case SIOCSDRVSPEC64:
1376 case SIOCGDRVSPEC64: {
1377 union {
1378 struct ifbreq ifbreq;
1379 struct ifbifconf64 ifbifconf;
1380 struct ifbareq64 ifbareq;
1381 struct ifbaconf64 ifbaconf;
1382 struct ifbrparam ifbrparam;
1383 struct ifbropreq64 ifbropreq;
1384 } args;
1385 struct ifdrv64 *ifd = (struct ifdrv64 *) data;
1386 const struct bridge_control *bridge_control_table = bridge_control_table64, *bc;
1387
1388 DRVSPEC;
1389
1390 break;
1391 }
1392
1393 case SIOCSIFFLAGS:
1394 if (!(ifp->if_flags & IFF_UP) &&
1395 (ifp->if_flags & IFF_RUNNING)) {
1396 /*
1397 * If interface is marked down and it is running,
1398 * then stop and disable it.
1399 */
1400 BRIDGE_LOCK(sc);
1401 bridge_stop(ifp, 1);
1402 BRIDGE_UNLOCK(sc);
1403 } else if ((ifp->if_flags & IFF_UP) &&
1404 !(ifp->if_flags & IFF_RUNNING)) {
1405 /*
1406 * If interface is marked up and it is stopped, then
1407 * start it.
1408 */
1409 BRIDGE_LOCK(sc);
1410 error = bridge_init(ifp);
1411 BRIDGE_UNLOCK(sc);
1412 }
1413 break;
1414
1415 case SIOCSIFLLADDR:
1416 error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
1417 if (error != 0)
1418 printf("bridge_ioctl: ifnet_set_lladdr failed %d\n", error);
1419 break;
1420
1421 case SIOCSIFMTU:
1422 /* Do not allow the MTU to be changed on the bridge */
1423 error = EINVAL;
1424 break;
1425
1426 default:
1427 /*
1428 * drop the lock as ether_ioctl() will call bridge_start() and
1429 * cause the lock to be recursed.
1430 */
1431 error = ether_ioctl(ifp, cmd, data);
1432#if BRIDGE_DEBUG
1433 if (error != 0)
1434 printf("bridge_ioctl: ether_ioctl ifp %p cmd 0x%08lx (%c%c [%lu] %c %lu) failed error: %d\n",
1435 ifp,
1436 cmd,
1437 (cmd & IOC_IN) ? 'I' : ' ',
1438 (cmd & IOC_OUT) ? 'O' : ' ',
1439 IOCPARM_LEN(cmd),
1440 (char) IOCGROUP(cmd),
1441 cmd & 0xff,
1442 error);
1443#endif /* BRIDGE_DEBUG */
1444 break;
1445 }
1446 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1447
1448 return (error);
1449}
1450
1451#if HAS_IF_CAP
1452/*
1453 * bridge_mutecaps:
1454 *
1455 * Clear or restore unwanted capabilities on the member interface
1456 */
1457static void
1458bridge_mutecaps(struct bridge_softc *sc)
1459{
1460 struct bridge_iflist *bif;
1461 int enabled, mask;
1462
1463 /* Initial bitmask of capabilities to test */
1464 mask = BRIDGE_IFCAPS_MASK;
1465
1466 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1467 /* Every member must support it or its disabled */
1468 mask &= bif->bif_savedcaps;
1469 }
1470
1471 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1472 enabled = bif->bif_ifp->if_capenable;
1473 enabled &= ~BRIDGE_IFCAPS_STRIP;
1474 /* strip off mask bits and enable them again if allowed */
1475 enabled &= ~BRIDGE_IFCAPS_MASK;
1476 enabled |= mask;
1477
1478 bridge_set_ifcap(sc, bif, enabled);
1479 }
1480
1481}
1482
1483static void
1484bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1485{
1486 struct ifnet *ifp = bif->bif_ifp;
1487 struct ifreq ifr;
1488 int error;
1489
1490 bzero(&ifr, sizeof(ifr));
1491 ifr.ifr_reqcap = set;
1492
1493 if (ifp->if_capenable != set) {
1494 IFF_LOCKGIANT(ifp);
1495 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1496 IFF_UNLOCKGIANT(ifp);
1497 if (error)
1498 printf("error setting interface capabilities on %s\n",
1499 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
1500 ifp->if_xname);
1501 }
1502}
1503#endif /* HAS_IF_CAP */
1504
1505/*
1506 * bridge_lookup_member:
1507 *
1508 * Lookup a bridge member interface.
1509 */
1510static struct bridge_iflist *
1511bridge_lookup_member(struct bridge_softc *sc, const char *name)
1512{
1513 struct bridge_iflist *bif;
1514 struct ifnet *ifp;
1515 char if_xname[IFNAMSIZ];
1516
1517 BRIDGE_LOCK_ASSERT(sc);
1518
1519 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1520 ifp = bif->bif_ifp;
1521 snprintf(if_xname, sizeof(if_xname), "%s%d",
1522 ifnet_name(ifp), ifnet_unit(ifp));
1523 if (strncmp(if_xname, name, sizeof(if_xname)) == 0)
1524 return (bif);
1525 }
1526
1527 return (NULL);
1528}
1529
1530/*
1531 * bridge_lookup_member_if:
1532 *
1533 * Lookup a bridge member interface by ifnet*.
1534 */
1535static struct bridge_iflist *
1536bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1537{
1538 struct bridge_iflist *bif;
1539
1540 BRIDGE_LOCK_ASSERT(sc);
1541
1542 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1543 if (bif->bif_ifp == member_ifp)
1544 return (bif);
1545 }
1546
1547 return (NULL);
1548}
1549
1550static errno_t
1551bridge_iff_input(void* cookie, ifnet_t ifp, __unused protocol_family_t protocol,
1552 mbuf_t *data, char **frame_ptr)
1553{
1554 errno_t error = 0;
1555 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1556 struct bridge_softc *sc = bif->bif_sc;
1557 int included = 0;
1558 size_t frmlen = 0;
1559 mbuf_t m = *data;
1560
1561 if ((m->m_flags & M_PROTO1))
1562 goto out;
1563
1564 if (*frame_ptr >= (char *)mbuf_datastart(m) && *frame_ptr <= (char *)mbuf_data(m)) {
1565 included = 1;
1566 frmlen = (char *)mbuf_data(m) - *frame_ptr;
1567 }
1568#if BRIDGE_DEBUG
1569 if (_if_brige_debug) {
1570 printf("bridge_iff_input %s%d from %s%d m %p data %p frame %p %s frmlen %lu\n",
1571 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
1572 ifnet_name(ifp), ifnet_unit(ifp),
1573 m, mbuf_data(m), *frame_ptr, included ? "inside" : "outside", frmlen);
1574
1575 if (_if_brige_debug > 1) {
1576 printf_mbuf(m, "bridge_iff_input[", "\n");
1577 printf_ether_header((struct ether_header *)*frame_ptr);
1578 printf_mbuf_data(m, 0, 20);
1579 printf("\n");
1580 }
1581 }
1582#endif /* BRIDGE_DEBUG */
1583
1584 /* Move data pointer to start of frame to the link layer header */
1585 if (included) {
1586 (void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen, mbuf_len(m) + frmlen);
1587 (void) mbuf_pkthdr_adjustlen(m, frmlen);
1588 } else {
1589 printf("bridge_iff_input: frame_ptr outside mbuf\n");
1590 goto out;
1591 }
1592
1593 error = bridge_input(ifp, m, *frame_ptr);
1594
1595 /* Adjust packet back to original */
1596 if (error == 0) {
1597 (void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen, mbuf_len(m) - frmlen);
1598 (void) mbuf_pkthdr_adjustlen(m, -frmlen);
1599 }
1600#if BRIDGE_DEBUG
1601 if (_if_brige_debug > 1) {
1602 printf("\n");
1603 printf_mbuf(m, "bridge_iff_input]", "\n");
1604 }
1605#endif /* BRIDGE_DEBUG */
1606
1607out:
1608 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1609
1610 return error;
1611}
1612
1613
1614#if BRIDGE_MEMBER_OUT_FILTER
1615static errno_t
1616bridge_iff_output(void *cookie, ifnet_t ifp, __unused protocol_family_t protocol, mbuf_t *data)
1617{
1618 errno_t error = 0;
1619 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1620 struct bridge_softc *sc = bif->bif_sc;
1621 mbuf_t m = *data;
1622
1623 if ((m->m_flags & M_PROTO1))
1624 goto out;
1625
1626#if BRIDGE_DEBUG
1627 if (_if_brige_debug) {
1628 printf("bridge_iff_output %s%d from %s%d m %p data %p\n",
1629 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
1630 ifnet_name(ifp), ifnet_unit(ifp),
1631 m, mbuf_data(m));
1632 }
1633#endif /* BRIDGE_DEBUG */
1634
1635 error = bridge_output(sc, ifp, m);
1636 if (error != 0) {
1637 printf("bridge_iff_output: bridge_output failed error %d\n", error);
1638 }
1639
1640out:
1641 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
1642
1643 return error;
1644}
1645#endif /* BRIDGE_MEMBER_OUT_FILTER */
1646
1647
1648static void
1649bridge_iff_event(void* cookie, ifnet_t ifp, __unused protocol_family_t protocol,
1650 const struct kev_msg *event_msg)
1651{
1652 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1653
1654 if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
1655 event_msg->kev_class == KEV_NETWORK_CLASS &&
1656 event_msg->kev_subclass == KEV_DL_SUBCLASS) {
1657 switch (event_msg->event_code) {
1658 case KEV_DL_IF_DETACHING:
1659 case KEV_DL_IF_DETACHED:
1660 bridge_ifdetach(bif, ifp);
1661 break;
1662
1663 case KEV_DL_LINK_OFF:
1664 case KEV_DL_LINK_ON: {
1665 bstp_linkstate(ifp, event_msg->event_code);
1666 break;
1667 }
1668
1669 case KEV_DL_SIFFLAGS: {
1670 if (bif->bif_promisc == 0 && (ifp->if_flags & IFF_UP)) {
1671 errno_t error = ifnet_set_promiscuous(ifp, 1);
1672 if (error != 0) {
1673 printf("bridge_iff_event: ifnet_set_promiscuous(%s%d) failed %d\n",
1674 ifnet_name(ifp), ifnet_unit(ifp), error);
1675 } else {
1676 bif->bif_promisc = 1;
1677 }
1678 }
1679 break;
1680 }
1681
1682 default:
1683 break;
1684 }
1685 }
1686}
1687
1688/*
1689 * bridge_iff_detached:
1690 *
1691 * Detach an interface from a bridge. Called when a member
1692 * interface is detaching.
1693 */
1694static void
1695bridge_iff_detached(void* cookie, __unused ifnet_t ifp)
1696{
1697 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1698
1699#if BRIDGE_DEBUG
1700 printf("bridge_iff_detached: %s%d\n",
1701 ifnet_name(ifp), ifnet_unit(ifp));
1702#endif
1703
1704 bridge_ifdetach(bif, ifp);
1705
1706 _FREE(bif, M_DEVBUF);
1707
1708 return;
1709}
1710
1711static errno_t
1712bridge_proto_input(ifnet_t ifp, __unused protocol_family_t protocol,
1713 __unused mbuf_t packet, __unused char *header)
1714{
1715 printf("bridge_proto_input: unexpected packet from %s%d\n",
1716 ifnet_name(ifp), ifnet_unit(ifp));
1717 return 0;
1718}
1719
1720static int
1721bridge_attach_protocol(struct ifnet *ifp)
1722{
1723 int error;
1724 struct ifnet_attach_proto_param reg;
1725
1726 printf("bridge_attach_protocol: %s%d\n",
1727 ifnet_name(ifp), ifnet_unit(ifp));
1728
1729 bzero(&reg, sizeof(reg));
1730 reg.input = bridge_proto_input;
1731
1732 error = ifnet_attach_protocol(ifp, PF_BRIDGE, &reg);
1733 if (error)
1734 printf("bridge_attach_protocol: ifnet_attach_protocol(%s%d) failed, %d\n",
1735 ifnet_name(ifp), ifnet_unit(ifp), error);
1736
1737 return (error);
1738}
1739
1740static int
1741bridge_detach_protocol(struct ifnet *ifp)
1742{
1743 int error;
1744
1745 printf("bridge_detach_protocol: %s%d\n",
1746 ifnet_name(ifp), ifnet_unit(ifp));
1747
1748 error = ifnet_detach_protocol(ifp, PF_BRIDGE);
1749 if (error)
1750 printf("bridge_attach_protocol: ifnet_detach_protocol(%s%d) failed, %d\n",
1751 ifnet_name(ifp), ifnet_unit(ifp), error);
1752
1753 return (error);
1754}
1755
1756/*
1757 * bridge_delete_member:
1758 *
1759 * Delete the specified member interface.
1760 */
1761static void
1762bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1763 int gone)
1764{
1765 struct ifnet *ifs = bif->bif_ifp;
1766
1767 BRIDGE_LOCK_ASSERT(sc);
1768
1769 if (!gone) {
1770 switch (ifs->if_type) {
1771 case IFT_ETHER:
1772 case IFT_L2VLAN:
1773 /*
1774 * Take the interface out of promiscuous mode.
1775 */
1776 if (bif->bif_promisc)
1777 (void) ifnet_set_promiscuous(ifs, 0);
1778 break;
1779
1780 case IFT_GIF:
1781 break;
1782
1783 default:
1784#ifdef DIAGNOSTIC
1785 panic("bridge_delete_member: impossible");
1786#endif
1787 break;
1788 }
1789
1790#if HAS_IF_CAP
1791 /* reneable any interface capabilities */
1792 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1793#endif
1794 }
1795
1796 if (bif->bif_proto_attached) {
1797 /* Respect lock ordering with DLIL lock */
1798 BRIDGE_UNLOCK(sc);
1799 (void) bridge_detach_protocol(ifs);
1800 BRIDGE_LOCK(sc);
1801 }
1802 if (bif->bif_flags & IFBIF_STP)
1803 bstp_disable(&bif->bif_stp);
1804
1805 ifs->if_bridge = NULL;
1806 BRIDGE_XLOCK(sc);
1807 TAILQ_REMOVE(&sc->sc_iflist, bif, bif_next);
1808 BRIDGE_XDROP(sc);
1809
1810 ifnet_release(ifs);
1811
1812#if HAS_IF_CAP
1813 bridge_mutecaps(sc); /* recalcuate now this interface is removed */
1814#endif /* HAS_IF_CAP */
1815 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1816 KASSERT(bif->bif_addrcnt == 0,
1817 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1818
1819 BRIDGE_UNLOCK(sc);
1820 bstp_destroy(&bif->bif_stp); /* prepare to free */
1821 BRIDGE_LOCK(sc);
1822
1823 if (bif->bif_filter_attached) {
1824 /* Respect lock ordering with DLIL lock */
1825 BRIDGE_UNLOCK(sc);
1826 iflt_detach(bif->bif_iff_ref);
1827 BRIDGE_LOCK(sc);
1828 } else {
1829 _FREE(bif, M_DEVBUF);
1830 }
1831}
1832
1833/*
1834 * bridge_delete_span:
1835 *
1836 * Delete the specified span interface.
1837 */
1838static void
1839bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1840{
1841 BRIDGE_LOCK_ASSERT(sc);
1842
1843 KASSERT(bif->bif_ifp->if_bridge == NULL,
1844 ("%s: not a span interface", __func__));
1845
1846 ifnet_release(bif->bif_ifp);
1847
1848 TAILQ_REMOVE(&sc->sc_spanlist, bif, bif_next);
1849 _FREE(bif, M_DEVBUF);
1850}
1851
1852static int
1853bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1854{
1855 struct ifbreq *req = arg;
1856 struct bridge_iflist *bif = NULL;
1857 struct ifnet *ifs;
1858 int error = 0;
1859 struct iff_filter iff;
1860
1861 ifs = ifunit(req->ifbr_ifsname);
1862 if (ifs == NULL)
1863 return (ENOENT);
1864 if (ifs->if_ioctl == NULL) /* must be supported */
1865 return (EINVAL);
1866
1867 /* If it's in the span list, it can't be a member. */
1868 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1869 if (ifs == bif->bif_ifp)
1870 return (EBUSY);
1871
1872 /* Allow the first Ethernet member to define the MTU */
1873 if (ifs->if_type != IFT_GIF) {
1874 if (TAILQ_EMPTY(&sc->sc_iflist))
1875 sc->sc_ifp->if_mtu = ifs->if_mtu;
1876 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1877 printf("%s%d: invalid MTU for %s%d",
1878 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
1879 ifnet_name(ifs), ifnet_unit(ifs));
1880 return (EINVAL);
1881 }
1882 }
1883
1884 if (ifs->if_bridge == sc)
1885 return (EEXIST);
1886
1887 if (ifs->if_bridge != NULL)
1888 return (EBUSY);
1889
1890 bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1891 if (bif == NULL)
1892 return (ENOMEM);
1893
1894 bif->bif_ifp = ifs;
1895 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1896#if HAS_IF_CAP
1897 bif->bif_savedcaps = ifs->if_capenable;
1898#endif /* HAS_IF_CAP */
1899 bif->bif_sc = sc;
1900
1901 ifnet_reference(ifs);
1902
1903 ifs->if_bridge = sc;
1904 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1905 /*
1906 * XXX: XLOCK HERE!?!
1907 */
1908 TAILQ_INSERT_TAIL(&sc->sc_iflist, bif, bif_next);
1909
1910#if HAS_IF_CAP
1911 /* Set interface capabilities to the intersection set of all members */
1912 bridge_mutecaps(sc);
1913#endif /* HAS_IF_CAP */
1914
1915
1916 switch (ifs->if_type) {
1917 case IFT_ETHER:
1918 case IFT_L2VLAN:
1919 /*
1920 * Place the interface into promiscuous mode.
1921 */
1922 error = ifnet_set_promiscuous(ifs, 1);
1923 if (error) {
1924 /* Ignore error when device is not up */
1925 if (error != ENETDOWN)
1926 goto out;
1927 error = 0;
1928 } else {
1929 bif->bif_promisc = 1;
1930 }
1931 break;
1932
1933 case IFT_GIF:
1934 break;
1935
1936 default:
1937 error = EINVAL;
1938 goto out;
1939 }
1940
1941 /*
1942 * Respect lock ordering with DLIL lock for the following operations
1943 */
1944 BRIDGE_UNLOCK(sc);
1945
1946 /*
1947 * install an interface filter
1948 */
1949 memset(&iff, 0, sizeof(struct iff_filter));
1950 iff.iff_cookie = bif;
1951 iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
1952 iff.iff_input = bridge_iff_input;
1953#if BRIDGE_MEMBER_OUT_FILTER
1954 iff.iff_output = bridge_iff_output;
1955#endif /* BRIDGE_MEMBER_OUT_FILTER */
1956 iff.iff_event = bridge_iff_event;
1957 iff.iff_detached = bridge_iff_detached;
1958 error = iflt_attach(ifs, &iff, &bif->bif_iff_ref);
1959 if (error != 0) {
1960 printf("bridge_ioctl_add: iflt_attach failed %d\n", error);
1961 BRIDGE_LOCK(sc);
1962 goto out;
1963 }
1964 bif->bif_filter_attached = 1;
1965
1966 /*
1967 * install an dummy "bridge" protocol
1968 */
1969 if ((error = bridge_attach_protocol(ifs)) != 0) {
1970 if (error != 0) {
1971 printf("bridge_ioctl_add: bridge_attach_protocol failed %d\n", error);
1972 BRIDGE_LOCK(sc);
1973 goto out;
1974 }
1975 }
1976 bif->bif_proto_attached = 1;
1977
1978 BRIDGE_LOCK(sc);
1979
1980out:
1981 if (error && bif != NULL)
1982 bridge_delete_member(sc, bif, 1);
1983
1984 return (error);
1985}
1986
1987static int
1988bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1989{
1990 struct ifbreq *req = arg;
1991 struct bridge_iflist *bif;
1992
1993 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1994 if (bif == NULL)
1995 return (ENOENT);
1996
1997 bridge_delete_member(sc, bif, 0);
1998
1999 return (0);
2000}
2001
2002static int
2003bridge_ioctl_purge(__unused struct bridge_softc *sc, __unused void *arg)
2004{
2005 return (0);
2006}
2007
2008static int
2009bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
2010{
2011 struct ifbreq *req = arg;
2012 struct bridge_iflist *bif;
2013 struct bstp_port *bp;
2014
2015 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2016 if (bif == NULL)
2017 return (ENOENT);
2018
2019 bp = &bif->bif_stp;
2020 req->ifbr_ifsflags = bif->bif_flags;
2021 req->ifbr_state = bp->bp_state;
2022 req->ifbr_priority = bp->bp_priority;
2023 req->ifbr_path_cost = bp->bp_path_cost;
2024 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
2025 req->ifbr_proto = bp->bp_protover;
2026 req->ifbr_role = bp->bp_role;
2027 req->ifbr_stpflags = bp->bp_flags;
2028 req->ifbr_addrcnt = bif->bif_addrcnt;
2029 req->ifbr_addrmax = bif->bif_addrmax;
2030 req->ifbr_addrexceeded = bif->bif_addrexceeded;
2031
2032 /* Copy STP state options as flags */
2033 if (bp->bp_operedge)
2034 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
2035 if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
2036 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
2037 if (bp->bp_ptp_link)
2038 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
2039 if (bp->bp_flags & BSTP_PORT_AUTOPTP)
2040 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
2041 if (bp->bp_flags & BSTP_PORT_ADMEDGE)
2042 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
2043 if (bp->bp_flags & BSTP_PORT_ADMCOST)
2044 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
2045 return (0);
2046}
2047
2048static int
2049bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
2050{
2051 struct ifbreq *req = arg;
2052 struct bridge_iflist *bif;
2053 struct bstp_port *bp;
2054 int error;
2055
2056 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2057 if (bif == NULL)
2058 return (ENOENT);
2059 bp = &bif->bif_stp;
2060
2061 if (req->ifbr_ifsflags & IFBIF_SPAN)
2062 /* SPAN is readonly */
2063 return (EINVAL);
2064
2065
2066 if (req->ifbr_ifsflags & IFBIF_STP) {
2067 if ((bif->bif_flags & IFBIF_STP) == 0) {
2068 error = bstp_enable(&bif->bif_stp);
2069 if (error)
2070 return (error);
2071 }
2072 } else {
2073 if ((bif->bif_flags & IFBIF_STP) != 0)
2074 bstp_disable(&bif->bif_stp);
2075 }
2076
2077 /* Pass on STP flags */
2078 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
2079 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
2080 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
2081 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
2082
2083 /* Save the bits relating to the bridge */
2084 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
2085
2086
2087 return (0);
2088}
2089
2090static int
2091bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
2092{
2093 struct ifbrparam *param = arg;
2094
2095 sc->sc_brtmax = param->ifbrp_csize;
2096 bridge_rttrim(sc);
2097
2098 return (0);
2099}
2100
2101static int
2102bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
2103{
2104 struct ifbrparam *param = arg;
2105
2106 param->ifbrp_csize = sc->sc_brtmax;
2107
2108 return (0);
2109}
2110
2111
2112#define BRIDGE_IOCTL_GIFS do { \
2113 struct bridge_iflist *bif; \
2114 struct ifbreq breq; \
2115 char *buf, *outbuf; \
2116 unsigned int count, buflen, len; \
2117 \
2118 count = 0; \
2119 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
2120 count++; \
2121 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
2122 count++; \
2123 \
2124 buflen = sizeof(breq) * count; \
2125 if (bifc->ifbic_len == 0) { \
2126 bifc->ifbic_len = buflen; \
2127 return (0); \
2128 } \
2129 BRIDGE_UNLOCK(sc); \
2130 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2131 BRIDGE_LOCK(sc); \
2132 \
2133 count = 0; \
2134 buf = outbuf; \
2135 len = min(bifc->ifbic_len, buflen); \
2136 bzero(&breq, sizeof(breq)); \
2137 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2138 if (len < sizeof(breq)) \
2139 break; \
2140 \
2141 snprintf(breq.ifbr_ifsname, sizeof(breq.ifbr_ifsname), "%s%d", \
2142 ifnet_name(bif->bif_ifp), ifnet_unit(bif->bif_ifp)); \
2143 /* Fill in the ifbreq structure */ \
2144 error = bridge_ioctl_gifflags(sc, &breq); \
2145 if (error) \
2146 break; \
2147 memcpy(buf, &breq, sizeof(breq)); \
2148 count++; \
2149 buf += sizeof(breq); \
2150 len -= sizeof(breq); \
2151 } \
2152 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
2153 if (len < sizeof(breq)) \
2154 break; \
2155 \
2156 snprintf(breq.ifbr_ifsname, sizeof(breq.ifbr_ifsname), "%s%d", \
2157 ifnet_name(bif->bif_ifp), ifnet_unit(bif->bif_ifp)); \
2158 breq.ifbr_ifsflags = bif->bif_flags; \
2159 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; \
2160 memcpy(buf, &breq, sizeof(breq)); \
2161 count++; \
2162 buf += sizeof(breq); \
2163 len -= sizeof(breq); \
2164 } \
2165 \
2166 BRIDGE_UNLOCK(sc); \
2167 bifc->ifbic_len = sizeof(breq) * count; \
2168 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
2169 BRIDGE_LOCK(sc); \
2170 _FREE(outbuf, M_TEMP); \
2171} while (0)
2172
2173static int
2174bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg)
2175{
2176 struct ifbifconf64 *bifc = arg;
2177 int error = 0;
2178
2179 BRIDGE_IOCTL_GIFS;
2180
2181 return (error);
2182}
2183
2184static int
2185bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg)
2186{
2187 struct ifbifconf32 *bifc = arg;
2188 int error = 0;
2189
2190 BRIDGE_IOCTL_GIFS;
2191
2192 return (error);
2193}
2194
2195
2196#define BRIDGE_IOCTL_RTS do { \
2197 struct bridge_rtnode *brt; \
2198 char *buf, *outbuf; \
2199 unsigned int count, buflen, len; \
2200 struct timespec now; \
2201 \
2202 if (bac->ifbac_len == 0) \
2203 return (0); \
2204 \
2205 count = 0; \
2206 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
2207 count++; \
2208 buflen = sizeof(bareq) * count; \
2209 \
2210 BRIDGE_UNLOCK(sc); \
2211 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2212 BRIDGE_LOCK(sc); \
2213 \
2214 count = 0; \
2215 buf = outbuf; \
2216 len = min(bac->ifbac_len, buflen); \
2217 bzero(&bareq, sizeof(bareq)); \
2218 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
2219 if (len < sizeof(bareq)) \
2220 goto out; \
2221 snprintf(bareq.ifba_ifsname, sizeof(bareq.ifba_ifsname), "%s%d", \
2222 ifnet_name(brt->brt_ifp), ifnet_unit(brt->brt_ifp)); \
2223 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); \
2224 bareq.ifba_vlan = brt->brt_vlan; \
2225 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
2226 nanouptime(&now); \
2227 if ((unsigned long)now.tv_sec < brt->brt_expire) \
2228 bareq.ifba_expire = brt->brt_expire - now.tv_sec; \
2229 } else \
2230 bareq.ifba_expire = 0; \
2231 bareq.ifba_flags = brt->brt_flags; \
2232 \
2233 memcpy(buf, &bareq, sizeof(bareq)); \
2234 count++; \
2235 buf += sizeof(bareq); \
2236 len -= sizeof(bareq); \
2237 } \
2238out: \
2239 BRIDGE_UNLOCK(sc); \
2240 bac->ifbac_len = sizeof(bareq) * count; \
2241 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
2242 BRIDGE_LOCK(sc); \
2243 _FREE(outbuf, M_TEMP); \
2244 return (error); \
2245} while (0)
2246
2247static int
2248bridge_ioctl_rts64(struct bridge_softc *sc, void *arg)
2249{
2250 struct ifbaconf64 *bac = arg;
2251 struct ifbareq64 bareq;
2252 int error = 0;
2253
2254 BRIDGE_IOCTL_RTS;
2255
2256 return (error);
2257}
2258
2259static int
2260bridge_ioctl_rts32(struct bridge_softc *sc, void *arg)
2261{
2262 struct ifbaconf32 *bac = arg;
2263 struct ifbareq32 bareq;
2264 int error = 0;
2265
2266 BRIDGE_IOCTL_RTS;
2267
2268 return (error);
2269}
2270
2271static int
2272bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg)
2273{
2274 struct ifbareq32 *req = arg;
2275 struct bridge_iflist *bif;
2276 int error;
2277
2278 bif = bridge_lookup_member(sc, req->ifba_ifsname);
2279 if (bif == NULL)
2280 return (ENOENT);
2281
2282 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2283 req->ifba_flags);
2284
2285 return (error);
2286}
2287
2288static int
2289bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg)
2290{
2291 struct ifbareq64 *req = arg;
2292 struct bridge_iflist *bif;
2293 int error;
2294
2295 bif = bridge_lookup_member(sc, req->ifba_ifsname);
2296 if (bif == NULL)
2297 return (ENOENT);
2298
2299 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2300 req->ifba_flags);
2301
2302 return (error);
2303}
2304
2305static int
2306bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
2307{
2308 struct ifbrparam *param = arg;
2309
2310 sc->sc_brttimeout = param->ifbrp_ctime;
2311 return (0);
2312}
2313
2314static int
2315bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
2316{
2317 struct ifbrparam *param = arg;
2318
2319 param->ifbrp_ctime = sc->sc_brttimeout;
2320 return (0);
2321}
2322
2323static int
2324bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg)
2325{
2326 struct ifbareq32 *req = arg;
2327
2328 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2329}
2330
2331static int
2332bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg)
2333{
2334 struct ifbareq64 *req = arg;
2335
2336 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2337}
2338
2339static int
2340bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
2341{
2342 struct ifbreq *req = arg;
2343
2344 bridge_rtflush(sc, req->ifbr_ifsflags);
2345 return (0);
2346}
2347
2348static int
2349bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
2350{
2351 struct ifbrparam *param = arg;
2352 struct bstp_state *bs = &sc->sc_stp;
2353
2354 param->ifbrp_prio = bs->bs_bridge_priority;
2355 return (0);
2356}
2357
2358static int
2359bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
2360{
2361 struct ifbrparam *param = arg;
2362
2363 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
2364}
2365
2366static int
2367bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
2368{
2369 struct ifbrparam *param = arg;
2370 struct bstp_state *bs = &sc->sc_stp;
2371
2372 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
2373 return (0);
2374}
2375
2376static int
2377bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
2378{
2379 struct ifbrparam *param = arg;
2380
2381 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
2382}
2383
2384static int
2385bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
2386{
2387 struct ifbrparam *param = arg;
2388 struct bstp_state *bs = &sc->sc_stp;
2389
2390 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
2391 return (0);
2392}
2393
2394static int
2395bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
2396{
2397 struct ifbrparam *param = arg;
2398
2399 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
2400}
2401
2402static int
2403bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
2404{
2405 struct ifbrparam *param = arg;
2406 struct bstp_state *bs = &sc->sc_stp;
2407
2408 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
2409 return (0);
2410}
2411
2412static int
2413bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
2414{
2415 struct ifbrparam *param = arg;
2416
2417 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
2418}
2419
2420static int
2421bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
2422{
2423 struct ifbreq *req = arg;
2424 struct bridge_iflist *bif;
2425
2426 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2427 if (bif == NULL)
2428 return (ENOENT);
2429
2430 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
2431}
2432
2433static int
2434bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
2435{
2436 struct ifbreq *req = arg;
2437 struct bridge_iflist *bif;
2438
2439 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2440 if (bif == NULL)
2441 return (ENOENT);
2442
2443 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
2444}
2445
2446static int
2447bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
2448{
2449 struct ifbrparam *param = arg;
2450
2451 param->ifbrp_filter = sc->sc_filter_flags;
2452
2453 return (0);
2454}
2455
2456static int
2457bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
2458{
2459 struct ifbrparam *param = arg;
2460
2461 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
2462 return (EINVAL);
2463
2464#ifndef BRIDGE_IPF
2465 if (param->ifbrp_filter & IFBF_FILT_USEIPF)
2466 return (EINVAL);
2467#endif
2468
2469 sc->sc_filter_flags = param->ifbrp_filter;
2470
2471 return (0);
2472}
2473
2474static int
2475bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
2476{
2477 struct ifbreq *req = arg;
2478 struct bridge_iflist *bif;
2479
2480 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2481 if (bif == NULL)
2482 return (ENOENT);
2483
2484 bif->bif_addrmax = req->ifbr_addrmax;
2485 return (0);
2486}
2487
2488static int
2489bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2490{
2491 struct ifbreq *req = arg;
2492 struct bridge_iflist *bif = NULL;
2493 struct ifnet *ifs;
2494
2495 ifs = ifunit(req->ifbr_ifsname);
2496 if (ifs == NULL)
2497 return (ENOENT);
2498
2499 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2500 if (ifs == bif->bif_ifp)
2501 return (EBUSY);
2502
2503 if (ifs->if_bridge != NULL)
2504 return (EBUSY);
2505
2506 switch (ifs->if_type) {
2507 case IFT_ETHER:
2508 case IFT_GIF:
2509 case IFT_L2VLAN:
2510 break;
2511 default:
2512 return (EINVAL);
2513 }
2514
2515 bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
2516 if (bif == NULL)
2517 return (ENOMEM);
2518
2519 bif->bif_ifp = ifs;
2520 bif->bif_flags = IFBIF_SPAN;
2521
2522 ifnet_reference(bif->bif_ifp);
2523
2524 TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2525
2526 return (0);
2527}
2528
2529static int
2530bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2531{
2532 struct ifbreq *req = arg;
2533 struct bridge_iflist *bif;
2534 struct ifnet *ifs;
2535
2536 ifs = ifunit(req->ifbr_ifsname);
2537 if (ifs == NULL)
2538 return (ENOENT);
2539
2540 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2541 if (ifs == bif->bif_ifp)
2542 break;
2543
2544 if (bif == NULL)
2545 return (ENOENT);
2546
2547 bridge_delete_span(sc, bif);
2548
2549 return (0);
2550}
2551
2552#define BRIDGE_IOCTL_GBPARAM do { \
2553 struct bstp_state *bs = &sc->sc_stp; \
2554 struct bstp_port *root_port; \
2555 \
2556 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
2557 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
2558 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
2559 \
2560 root_port = bs->bs_root_port; \
2561 if (root_port == NULL) \
2562 req->ifbop_root_port = 0; \
2563 else \
2564 req->ifbop_root_port = root_port->bp_ifp->if_index; \
2565 \
2566 req->ifbop_holdcount = bs->bs_txholdcount; \
2567 req->ifbop_priority = bs->bs_bridge_priority; \
2568 req->ifbop_protocol = bs->bs_protover; \
2569 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
2570 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
2571 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
2572 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
2573 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
2574 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
2575} while (0)
2576
2577static int
2578bridge_ioctl_gbparam32(struct bridge_softc *sc, void *arg)
2579{
2580 struct ifbropreq32 *req = arg;
2581
2582 BRIDGE_IOCTL_GBPARAM;
2583
2584 return (0);
2585}
2586
2587static int
2588bridge_ioctl_gbparam64(struct bridge_softc *sc, void *arg)
2589{
2590 struct ifbropreq64 *req = arg;
2591
2592 BRIDGE_IOCTL_GBPARAM;
2593
2594 return (0);
2595}
2596
2597
2598static int
2599bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
2600{
2601 struct ifbrparam *param = arg;
2602
2603 param->ifbrp_cexceeded = sc->sc_brtexceeded;
2604 return (0);
2605}
2606
2607#define BRIDGE_IOCTL_GIFSSTP do { \
2608 struct bridge_iflist *bif; \
2609 struct bstp_port *bp; \
2610 struct ifbpstpreq bpreq; \
2611 char *buf, *outbuf; \
2612 unsigned int count, buflen, len; \
2613 \
2614 count = 0; \
2615 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2616 if ((bif->bif_flags & IFBIF_STP) != 0) \
2617 count++; \
2618 } \
2619 \
2620 buflen = sizeof(bpreq) * count; \
2621 if (bifstp->ifbpstp_len == 0) { \
2622 bifstp->ifbpstp_len = buflen; \
2623 return (0); \
2624 } \
2625 \
2626 BRIDGE_UNLOCK(sc); \
2627 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2628 BRIDGE_LOCK(sc); \
2629 \
2630 count = 0; \
2631 buf = outbuf; \
2632 len = min(bifstp->ifbpstp_len, buflen); \
2633 bzero(&bpreq, sizeof(bpreq)); \
2634 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2635 if (len < sizeof(bpreq)) \
2636 break; \
2637 \
2638 if ((bif->bif_flags & IFBIF_STP) == 0) \
2639 continue; \
2640 \
2641 bp = &bif->bif_stp; \
2642 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
2643 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
2644 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
2645 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
2646 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
2647 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
2648 \
2649 memcpy(buf, &bpreq, sizeof(bpreq)); \
2650 count++; \
2651 buf += sizeof(bpreq); \
2652 len -= sizeof(bpreq); \
2653 } \
2654 \
2655 BRIDGE_UNLOCK(sc); \
2656 bifstp->ifbpstp_len = sizeof(bpreq) * count; \
2657 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
2658 BRIDGE_LOCK(sc); \
2659 _FREE(outbuf, M_TEMP); \
2660 return (error); \
2661} while (0)
2662
2663static int
2664bridge_ioctl_gifsstp32(struct bridge_softc *sc, void *arg)
2665{
2666 struct ifbpstpconf32 *bifstp = arg;
2667 int error = 0;
2668
2669 BRIDGE_IOCTL_GIFSSTP;
2670
2671 return (error);
2672}
2673
2674static int
2675bridge_ioctl_gifsstp64(struct bridge_softc *sc, void *arg)
2676{
2677 struct ifbpstpconf64 *bifstp = arg;
2678 int error = 0;
2679
2680 BRIDGE_IOCTL_GIFSSTP;
2681
2682 return (error);
2683}
2684
2685static int
2686bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2687{
2688 struct ifbrparam *param = arg;
2689
2690 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2691}
2692
2693static int
2694bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2695{
2696 struct ifbrparam *param = arg;
2697
2698 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2699}
2700
2701/*
2702 * bridge_ifdetach:
2703 *
2704 * Detach an interface from a bridge. Called when a member
2705 * interface is detaching.
2706 */
2707__private_extern__ void
2708bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
2709{
2710 struct bridge_softc *sc = ifp->if_bridge;
2711
2712#if BRIDGE_DEBUG
2713 printf("bridge_ifdetach %s%d\n", ifnet_name(ifp), ifnet_unit(ifp));
2714#endif
2715
2716 /* Check if the interface is a bridge member */
2717 if (sc != NULL) {
2718 BRIDGE_LOCK(sc);
2719
2720 bif = bridge_lookup_member_if(sc, ifp);
2721 if (bif != NULL)
2722 bridge_delete_member(sc, bif, 1);
2723
2724 BRIDGE_UNLOCK(sc);
2725 return;
2726 }
2727
2728 /* Check if the interface is a span port */
2729 lck_mtx_lock(bridge_list_mtx);
2730 LIST_FOREACH(sc, &bridge_list, sc_list) {
2731 BRIDGE_LOCK(sc);
2732 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2733 if (ifp == bif->bif_ifp) {
2734 bridge_delete_span(sc, bif);
2735 break;
2736 }
2737
2738 BRIDGE_UNLOCK(sc);
2739 }
2740 lck_mtx_unlock(bridge_list_mtx);
2741}
2742
2743/*
2744 * bridge_init:
2745 *
2746 * Initialize a bridge interface.
2747 */
2748static int
2749bridge_init(struct ifnet *ifp)
2750{
2751 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
2752 struct timespec ts;
2753 errno_t error;
2754
2755 BRIDGE_LOCK_ASSERT(sc);
2756
2757 if ((ifnet_flags(ifp) & IFF_RUNNING))
2758 return 0;
2759
2760 ts.tv_sec = bridge_rtable_prune_period;
2761 ts.tv_nsec = 0;
2762 bsd_timeout(bridge_timer, sc, &ts);
2763
2764 error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
2765 if (error == 0)
2766 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
2767
2768 return error;
2769}
2770
2771/*
2772 * bridge_stop:
2773 *
2774 * Stop the bridge interface.
2775 */
2776static void
2777bridge_stop(struct ifnet *ifp, __unused int disable)
2778{
2779 struct bridge_softc *sc = ifp->if_softc;
2780
2781 BRIDGE_LOCK_ASSERT(sc);
2782
2783 if ((ifnet_flags(ifp) & IFF_RUNNING) == 0)
2784 return;
2785
2786 bsd_untimeout(bridge_timer, sc);
2787 bstp_stop(&sc->sc_stp);
2788
2789 bridge_rtflush(sc, IFBF_FLUSHDYN);
2790
2791 (void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
2792}
2793
2794/*
2795 * bridge_enqueue:
2796 *
2797 * Enqueue a packet on a bridge member interface.
2798 *
2799 */
2800static void
2801bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
2802{
2803 int len, error = 0;
2804 short mflags;
2805 struct mbuf *m0;
2806
2807 /* We may be sending a fragment so traverse the mbuf */
2808 for (; m; m = m0) {
2809 m0 = m->m_nextpkt;
2810 m->m_nextpkt = NULL;
2811
2812 len = m->m_pkthdr.len;
2813 mflags = m->m_flags;
2814 m->m_flags |= M_PROTO1; //set to avoid loops
2815
2816#if HAS_IF_CAP
2817 /*
2818 * If underlying interface can not do VLAN tag insertion itself
2819 * then attach a packet tag that holds it.
2820 */
2821 if ((m->m_flags & M_VLANTAG) &&
2822 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2823 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2824 if (m == NULL) {
2825 printf("%s%d: unable to prepend VLAN header\n",
2826 ifnet_name(dst_ifp), ifnet_unit(dst_ifp));
2827 (void) ifnet_stat_increment_out(dst_ifp, 0, 0, 1);
2828 continue;
2829 }
2830 m->m_flags &= ~M_VLANTAG;
2831 }
2832#endif /* HAS_IF_CAP */
2833
2834 error = ifnet_output_raw(dst_ifp, 0, m);
2835 if (error == 0) {
2836 (void) ifnet_stat_increment_out(sc->sc_ifp, 1, len, 0);
2837 } else {
2838 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
2839 }
2840 }
2841
2842 return;
2843}
2844
2845#if HAS_BRIDGE_DUMMYNET
2846/*
2847 * bridge_dummynet:
2848 *
2849 * Receive a queued packet from dummynet and pass it on to the output
2850 * interface.
2851 *
2852 * The mbuf has the Ethernet header already attached.
2853 */
2854static void
2855bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2856{
2857 struct bridge_softc *sc;
2858
2859 sc = ifp->if_bridge;
2860
2861 /*
2862 * The packet didnt originate from a member interface. This should only
2863 * ever happen if a member interface is removed while packets are
2864 * queued for it.
2865 */
2866 if (sc == NULL) {
2867 m_freem(m);
2868 return;
2869 }
2870
2871 if (PFIL_HOOKED(&inet_pfil_hook)
2872#ifdef INET6
2873 || PFIL_HOOKED(&inet6_pfil_hook)
2874#endif
2875 ) {
2876 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2877 return;
2878 if (m == NULL)
2879 return;
2880 }
2881
2882 bridge_enqueue(sc, ifp, m);
2883}
2884#endif /* HAS_BRIDGE_DUMMYNET */
2885
2886#if BRIDGE_MEMBER_OUT_FILTER
2887/*
2888 * bridge_output:
2889 *
2890 * Send output from a bridge member interface. This
2891 * performs the bridging function for locally originated
2892 * packets.
2893 *
2894 * The mbuf has the Ethernet header already attached. We must
2895 * enqueue or free the mbuf before returning.
2896 */
2897static int
2898bridge_output(struct ifnet *ifp, struct mbuf *m, __unused struct sockaddr *sa,
2899 __unused struct rtentry *rt)
2900{
2901 struct ether_header *eh;
2902 struct ifnet *dst_if;
2903 struct bridge_softc *sc;
2904 uint16_t vlan;
2905
2906#if BRIDGE_DEBUG
2907 if (_if_brige_debug)
2908 printf("bridge_output ifp %p %s%d\n", ifp, ifnet_name(ifp), ifnet_unit(ifp));
2909#endif /* BRIDGE_DEBUG */
2910
2911 if (m->m_len < ETHER_HDR_LEN) {
2912 m = m_pullup(m, ETHER_HDR_LEN);
2913 if (m == NULL)
2914 return (0);
2915 }
2916
2917 eh = mtod(m, struct ether_header *);
2918 sc = ifp->if_bridge;
2919 vlan = VLANTAGOF(m);
2920
2921 BRIDGE_LOCK(sc);
2922
2923 /* APPLE MODIFICATION
2924 * If the packet is an 802.1X ethertype, then only send on the
2925 * original output interface.
2926 */
2927 if (eh->ether_type == htons(ETHERTYPE_PAE)) {
2928 dst_if = ifp;
2929 goto sendunicast;
2930 }
2931
2932 /*
2933 * If bridge is down, but the original output interface is up,
2934 * go ahead and send out that interface. Otherwise, the packet
2935 * is dropped below.
2936 */
2937 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
2938 dst_if = ifp;
2939 goto sendunicast;
2940 }
2941
2942 /*
2943 * If the packet is a multicast, or we don't know a better way to
2944 * get there, send to all interfaces.
2945 */
2946 if (ETHER_IS_MULTICAST(eh->ether_dhost))
2947 dst_if = NULL;
2948 else
2949 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2950 if (dst_if == NULL) {
2951 struct bridge_iflist *bif;
2952 struct mbuf *mc;
2953 int error = 0, used = 0;
2954
2955 bridge_span(sc, m);
2956
2957 BRIDGE_LOCK2REF(sc, error);
2958 if (error) {
2959 m_freem(m);
2960 return (0);
2961 }
2962
2963 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
2964 dst_if = bif->bif_ifp;
2965
2966 if (dst_if->if_type == IFT_GIF)
2967 continue;
2968 if ((dst_if->if_flags & IFF_RUNNING) == 0)
2969 continue;
2970
2971 /*
2972 * If this is not the original output interface,
2973 * and the interface is participating in spanning
2974 * tree, make sure the port is in a state that
2975 * allows forwarding.
2976 */
2977 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2978 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2979 continue;
2980
2981 if (LIST_NEXT(bif, bif_next) == NULL) {
2982 used = 1;
2983 mc = m;
2984 } else {
2985 mc = m_copypacket(m, M_DONTWAIT);
2986 if (mc == NULL) {
2987 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
2988 continue;
2989 }
2990 }
2991
2992 bridge_enqueue(sc, dst_if, mc);
2993 }
2994 if (used == 0)
2995 m_freem(m);
2996 BRIDGE_UNREF(sc);
2997 return (0);
2998 }
2999
3000sendunicast:
3001 /*
3002 * XXX Spanning tree consideration here?
3003 */
3004
3005 bridge_span(sc, m);
3006 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
3007 m_freem(m);
3008 BRIDGE_UNLOCK(sc);
3009 return (0);
3010 }
3011
3012 BRIDGE_UNLOCK(sc);
3013 bridge_enqueue(sc, dst_if, m);
3014 return (0);
3015}
3016#endif /* BRIDGE_MEMBER_OUT_FILTER */
3017
3018#if APPLE_BRIDGE_HWCKSUM_SUPPORT
3019static struct mbuf* bridge_fix_txcsum( struct mbuf *m )
3020{
3021 // basic tests indicate that the vast majority of packets being processed
3022 // here have an Ethernet header mbuf pre-pended to them (the first case below)
3023 // the second highest are those where the Ethernet and IP/TCP/UDP headers are
3024 // all in one mbuf (second case below)
3025 // the third case has, in fact, never hit for me -- although if I comment out
3026 // the first two cases, that code works for them, so I consider it a
3027 // decent general solution
3028
3029 int amt = ETHER_HDR_LEN;
3030 int hlen = M_CSUM_DATA_IPv4_IPHL( m->m_pkthdr.csum_data );
3031 int off = M_CSUM_DATA_IPv4_OFFSET( m->m_pkthdr.csum_data );
3032
3033 /*
3034 * NOTE we should never get vlan-attached packets here;
3035 * support for those COULD be added, but we don't use them
3036 * and it really kinda slows things down to worry about them
3037 */
3038
3039#ifdef DIAGNOSTIC
3040 if ( m_tag_find( m, PACKET_TAG_VLAN, NULL ) != NULL )
3041 {
3042 printf( "bridge: transmitting packet tagged with VLAN?\n" );
3043 KASSERT( 0 );
3044 m_freem( m );
3045 return NULL;
3046 }
3047#endif
3048
3049 if ( m->m_pkthdr.csum_flags & M_CSUM_IPv4 )
3050 {
3051 amt += hlen;
3052 }
3053 if ( m->m_pkthdr.csum_flags & M_CSUM_TCPv4 )
3054 {
3055 amt += off + sizeof( uint16_t );
3056 }
3057
3058 if ( m->m_pkthdr.csum_flags & M_CSUM_UDPv4 )
3059 {
3060 amt += off + sizeof( uint16_t );
3061 }
3062
3063 if ( m->m_len == ETHER_HDR_LEN )
3064 {
3065 // this is the case where there's an Ethernet header in an mbuf
3066
3067 // the first mbuf is the Ethernet header -- just strip it off and do the checksum
3068 struct mbuf *m_ip = m->m_next;
3069
3070 // set up m_ip so the cksum operations work
3071 /* APPLE MODIFICATION 22 Apr 2008 <mvega@apple.com>
3072 * <rdar://5817385> Clear the m_tag list before setting
3073 * M_PKTHDR.
3074 *
3075 * If this m_buf chain was extended via M_PREPEND(), then
3076 * m_ip->m_pkthdr is identical to m->m_pkthdr (see
3077 * M_MOVE_PKTHDR()). The only thing preventing access to this
3078 * invalid packet header data is the fact that the M_PKTHDR
3079 * flag is clear, i.e., m_ip->m_flag & M_PKTHDR == 0, but we're
3080 * about to set the M_PKTHDR flag, so to be safe we initialize,
3081 * more accurately, we clear, m_ip->m_pkthdr.tags via
3082 * m_tag_init().
3083 *
3084 * Suppose that we do not do this; if m_pullup(), below, fails,
3085 * then m_ip will be freed along with m_ip->m_pkthdr.tags, but
3086 * we will also free m soon after, via m_freem(), and
3087 * consequently attempt to free m->m_pkthdr.tags in the
3088 * process. The problem is that m->m_pkthdr.tags will have
3089 * already been freed by virtue of being equal to
3090 * m_ip->m_pkthdr.tags. Attempts to dereference
3091 * m->m_pkthdr.tags in m_tag_delete_chain() will result in a
3092 * panic.
3093 */
3094 m_tag_init(m_ip);
3095 /* END MODIFICATION */
3096 m_ip->m_flags |= M_PKTHDR;
3097 m_ip->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags;
3098 m_ip->m_pkthdr.csum_data = m->m_pkthdr.csum_data;
3099 m_ip->m_pkthdr.len = m->m_pkthdr.len - ETHER_HDR_LEN;
3100
3101 // set up the header mbuf so we can prepend it back on again later
3102 m->m_pkthdr.csum_flags = 0;
3103 m->m_pkthdr.csum_data = 0;
3104 m->m_pkthdr.len = ETHER_HDR_LEN;
3105 m->m_next = NULL;
3106
3107
3108 // now do the checksums we need -- first IP
3109 if ( m_ip->m_pkthdr.csum_flags & M_CSUM_IPv4 )
3110 {
3111 // make sure the IP header (or at least the part with the cksum) is there
3112 m_ip = m_pullup( m_ip, sizeof( struct ip ) );
3113 if ( m_ip == NULL )
3114 {
3115 printf( "bridge: failed to flatten header\n ");
3116 m_freem( m );
3117 return NULL;
3118 }
3119
3120 // now do the checksum
3121 {
3122 struct ip *ip = mtod( m_ip, struct ip* );
3123 ip->ip_sum = in_cksum( m_ip, hlen );
3124
3125#ifdef VERY_VERY_VERY_DIAGNOSTIC
3126 printf( "bridge: performed IPv4 checksum\n" );
3127#endif
3128 }
3129 }
3130
3131 // now do a TCP or UDP delayed checksum
3132 if ( m_ip->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4) )
3133 {
3134 in_delayed_cksum( m_ip );
3135
3136#ifdef VERY_VERY_VERY_DIAGNOSTIC
3137 printf( "bridge: performed TCPv4/UDPv4 checksum\n" );
3138#endif
3139 }
3140
3141 // now attach the ethernet header back onto the IP packet
3142 m->m_next = m_ip;
3143 m->m_pkthdr.len += m_length( m_ip );
3144
3145 // clear the M_PKTHDR flags on the ip packet (again, we re-attach later)
3146 m_ip->m_flags &= ~M_PKTHDR;
3147
3148 // and clear any csum flags
3149 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
3150 }
3151 else if ( m->m_len >= amt )
3152 {
3153 // everything fits in the first mbuf, so futz with m->m_data, m->m_len and m->m_pkthdr.len to
3154 // make it work
3155 m->m_len -= ETHER_HDR_LEN;
3156 m->m_data += ETHER_HDR_LEN;
3157 m->m_pkthdr.len -= ETHER_HDR_LEN;
3158
3159 // now do the checksums we need -- first IP
3160 if ( m->m_pkthdr.csum_flags & M_CSUM_IPv4 )
3161 {
3162 struct ip *ip = mtod( m, struct ip* );
3163 ip->ip_sum = in_cksum( m, hlen );
3164
3165#ifdef VERY_VERY_VERY_DIAGNOSTIC
3166 printf( "bridge: performed IPv4 checksum\n" );
3167#endif
3168 }
3169
3170 // now do a TCP or UDP delayed checksum
3171 if ( m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4) )
3172 {
3173 in_delayed_cksum( m );
3174
3175#ifdef VERY_VERY_VERY_DIAGNOSTIC
3176 printf( "bridge: performed TCPv4/UDPv4 checksum\n" );
3177#endif
3178 }
3179
3180 // now stick the ethernet header back on
3181 m->m_len += ETHER_HDR_LEN;
3182 m->m_data -= ETHER_HDR_LEN;
3183 m->m_pkthdr.len += ETHER_HDR_LEN;
3184
3185 // and clear any csum flags
3186 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
3187 }
3188 else
3189 {
3190 struct mbuf *m_ip;
3191
3192 // general case -- need to simply split it off and deal
3193
3194 // first, calculate how much needs to be made writable (we may have a read-only mbuf here)
3195 hlen = M_CSUM_DATA_IPv4_IPHL( m->m_pkthdr.csum_data );
3196#if PARANOID
3197 off = M_CSUM_DATA_IPv4_OFFSET( m->m_pkthdr.csum_data );
3198
3199 if ( m->m_pkthdr.csum_flags & M_CSUM_IPv4 )
3200 {
3201 amt += hlen;
3202 }
3203
3204 if ( m->m_pkthdr.csum_flags & M_CSUM_TCPv4 )
3205 {
3206 amt += sizeof( struct tcphdr * );
3207 amt += off;
3208 }
3209
3210 if ( m->m_pkthdr.csum_flags & M_CSUM_UDPv4 )
3211 {
3212 amt += sizeof( struct udphdr * );
3213 amt += off;
3214 }
3215#endif
3216
3217 // now split the ethernet header off of the IP packet (we'll re-attach later)
3218 m_ip = m_split( m, ETHER_HDR_LEN, M_NOWAIT );
3219 if ( m_ip == NULL )
3220 {
3221 printf( "bridge_fix_txcsum: could not split ether header\n" );
3222
3223 m_freem( m );
3224 return NULL;
3225 }
3226
3227#if PARANOID
3228 // make sure that the IP packet is writable for the portion we need
3229 if ( m_makewritable( &m_ip, 0, amt, M_DONTWAIT ) != 0 )
3230 {
3231 printf( "bridge_fix_txcsum: could not make %d bytes writable\n", amt );
3232
3233 m_freem( m );
3234 m_freem( m_ip );
3235 return NULL;
3236 }
3237#endif
3238
3239 m_ip->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags;
3240 m_ip->m_pkthdr.csum_data = m->m_pkthdr.csum_data;
3241
3242 m->m_pkthdr.csum_flags = 0;
3243 m->m_pkthdr.csum_data = 0;
3244
3245 // now do the checksums we need -- first IP
3246 if ( m_ip->m_pkthdr.csum_flags & M_CSUM_IPv4 )
3247 {
3248 // make sure the IP header (or at least the part with the cksum) is there
3249 m_ip = m_pullup( m_ip, sizeof( struct ip ) );
3250 if ( m_ip == NULL )
3251 {
3252 printf( "bridge: failed to flatten header\n ");
3253 m_freem( m );
3254 return NULL;
3255 }
3256
3257 // now do the checksum
3258 {
3259 struct ip *ip = mtod( m_ip, struct ip* );
3260 ip->ip_sum = in_cksum( m_ip, hlen );
3261
3262#ifdef VERY_VERY_VERY_DIAGNOSTIC
3263 printf( "bridge: performed IPv4 checksum\n" );
3264#endif
3265 }
3266 }
3267
3268 // now do a TCP or UDP delayed checksum
3269 if ( m_ip->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4) )
3270 {
3271 in_delayed_cksum( m_ip );
3272
3273#ifdef VERY_VERY_VERY_DIAGNOSTIC
3274 printf( "bridge: performed TCPv4/UDPv4 checksum\n" );
3275#endif
3276 }
3277
3278 // now attach the ethernet header back onto the IP packet
3279 m->m_next = m_ip;
3280 m->m_pkthdr.len += m_length( m_ip );
3281
3282 // clear the M_PKTHDR flags on the ip packet (again, we re-attach later)
3283 m_ip->m_flags &= ~M_PKTHDR;
3284
3285 // and clear any csum flags
3286 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4);
3287 }
3288
3289 return m;
3290}
3291#endif
3292
3293/*
3294 * bridge_start:
3295 *
3296 * Start output on a bridge.
3297 *
3298 */
3299static errno_t
3300bridge_start(struct ifnet *ifp, struct mbuf *m)
3301{
3302 struct bridge_softc *sc = ifnet_softc(ifp);
3303 struct ether_header *eh;
3304 struct ifnet *dst_if;
3305
3306 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_NOTOWNED);
3307
3308 eh = mtod(m, struct ether_header *);
3309
3310 BRIDGE_LOCK(sc);
3311
3312 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0 &&
3313 (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0)) != NULL) {
3314
3315 {
3316#if APPLE_BRIDGE_HWCKSUM_SUPPORT
3317 /*
3318 * APPLE MODIFICATION - if the packet needs a checksum (i.e.,
3319 * checksum has been deferred for HW support) AND the destination
3320 * interface doesn't support HW checksums, then we
3321 * need to fix-up the checksum here
3322 */
3323 if (
3324 ( (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4) ) != 0 ) &&
3325 ( (dst_if->if_csum_flags_tx & m->m_pkthdr.csum_flags ) != m->m_pkthdr.csum_flags )
3326 )
3327 {
3328 m = bridge_fix_txcsum( m );
3329 if ( m == NULL )
3330 {
3331 goto done;
3332 }
3333 }
3334
3335#else
3336 if (eh->ether_type == htons(ETHERTYPE_IP))
3337 mbuf_outbound_finalize(m, PF_INET, sizeof(struct ether_header));
3338 else
3339 m->m_pkthdr.csum_flags = 0;
3340#endif
3341 #if NBPFILTER > 0
3342 if (sc->sc_bpf_output)
3343 bridge_bpf_output(ifp, m);
3344 #endif
3345 BRIDGE_UNLOCK(sc);
3346 bridge_enqueue(sc, dst_if, m);
3347 }
3348 } else
3349 {
3350#if APPLE_BRIDGE_HWCKSUM_SUPPORT
3351
3352 /*
3353 * APPLE MODIFICATION - if the MULTICAST packet needs a checksum (i.e.,
3354 * checksum has been deferred for HW support) AND at least one destination
3355 * interface doesn't support HW checksums, then we go ahead and fix it up
3356 * here, since it doesn't make sense to do it more than once
3357 */
3358
3359 if (
3360 (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_IPv4)) &&
3361 /*
3362 * XXX FIX ME: keep track of whether or not we have any interfaces that
3363 * do not support checksums (for now, assume we do)
3364 */
3365 ( 1 )
3366 )
3367 {
3368 m = bridge_fix_txcsum( m );
3369 if ( m == NULL )
3370 {
3371 goto done;
3372 }
3373 }
3374#else
3375 if (eh->ether_type == htons(ETHERTYPE_IP))
3376 mbuf_outbound_finalize(m, PF_INET, sizeof(struct ether_header));
3377 else
3378 m->m_pkthdr.csum_flags = 0;
3379#endif
3380
3381 #if NBPFILTER > 0
3382 if (sc->sc_bpf_output)
3383 bridge_bpf_output(ifp, m);
3384 #endif
3385 bridge_broadcast(sc, ifp, m, 0);
3386 }
3387#if APPLE_BRIDGE_HWCKSUM_SUPPORT
3388done:
3389#endif
3390
3391 return 0;
3392}
3393
3394/*
3395 * bridge_forward:
3396 *
3397 * The forwarding function of the bridge.
3398 *
3399 * NOTE: Releases the lock on return.
3400 */
3401static void
3402bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
3403 struct mbuf *m)
3404{
3405 struct bridge_iflist *dbif;
3406 struct ifnet *src_if, *dst_if, *ifp;
3407 struct ether_header *eh;
3408 uint16_t vlan;
3409 uint8_t *dst;
3410 int error;
3411
3412 lck_mtx_assert(sc->sc_mtx, LCK_MTX_ASSERT_OWNED);
3413
3414#if BRIDGE_DEBUG
3415 if (_if_brige_debug)
3416 printf("bridge_forward %s%d m%p\n", ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp), m);
3417#endif /* BRIDGE_DEBUG */
3418
3419 src_if = m->m_pkthdr.rcvif;
3420 ifp = sc->sc_ifp;
3421
3422 (void) ifnet_stat_increment_in(ifp, 1, m->m_pkthdr.len, 0);
3423 vlan = VLANTAGOF(m);
3424
3425
3426 if ((sbif->bif_flags & IFBIF_STP) &&
3427 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3428 goto drop;
3429
3430 eh = mtod(m, struct ether_header *);
3431 dst = eh->ether_dhost;
3432
3433 /* If the interface is learning, record the address. */
3434 if (sbif->bif_flags & IFBIF_LEARNING) {
3435 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
3436 sbif, 0, IFBAF_DYNAMIC);
3437 /*
3438 * If the interface has addresses limits then deny any source
3439 * that is not in the cache.
3440 */
3441 if (error && sbif->bif_addrmax)
3442 goto drop;
3443 }
3444
3445 if ((sbif->bif_flags & IFBIF_STP) != 0 &&
3446 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
3447 goto drop;
3448
3449 /*
3450 * At this point, the port either doesn't participate
3451 * in spanning tree or it is in the forwarding state.
3452 */
3453
3454 /*
3455 * If the packet is unicast, destined for someone on
3456 * "this" side of the bridge, drop it.
3457 */
3458 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
3459 dst_if = bridge_rtlookup(sc, dst, vlan);
3460 if (src_if == dst_if)
3461 goto drop;
3462 } else {
3463 /*
3464 * Check if its a reserved multicast address, any address
3465 * listed in 802.1D section 7.12.6 may not be forwarded by the
3466 * bridge.
3467 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
3468 */
3469 if (dst[0] == 0x01 && dst[1] == 0x80 &&
3470 dst[2] == 0xc2 && dst[3] == 0x00 &&
3471 dst[4] == 0x00 && dst[5] <= 0x0f)
3472 goto drop;
3473
3474
3475 /* ...forward it to all interfaces. */
3476 atomic_add_64(&ifp->if_imcasts, 1);
3477 dst_if = NULL;
3478 }
3479
3480 /*
3481 * If we have a destination interface which is a member of our bridge,
3482 * OR this is a unicast packet, push it through the bpf(4) machinery.
3483 * For broadcast or multicast packets, don't bother because it will
3484 * be reinjected into ether_input. We do this before we pass the packets
3485 * through the pfil(9) framework, as it is possible that pfil(9) will
3486 * drop the packet, or possibly modify it, making it difficult to debug
3487 * firewall issues on the bridge.
3488 */
3489#if NBPFILTER > 0
3490 if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) ||
3491 dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
3492 m->m_pkthdr.rcvif = ifp;
3493 if (sc->sc_bpf_input)
3494 bridge_bpf_input(ifp, m);
3495 }
3496#endif /* NBPFILTER */
3497
3498#if defined(PFIL_HOOKS)
3499 /* run the packet filter */
3500 if (PFIL_HOOKED(&inet_pfil_hook)
3501#ifdef INET6
3502 || PFIL_HOOKED(&inet6_pfil_hook)
3503#endif /* INET6 */
3504 ) {
3505 BRIDGE_UNLOCK(sc);
3506 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
3507 return;
3508 if (m == NULL)
3509 return;
3510 BRIDGE_LOCK(sc);
3511 }
3512#endif /* PFIL_HOOKS */
3513
3514 if (dst_if == NULL) {
3515 /*
3516 * Clear any in-bound checksum flags for this packet.
3517 */
3518 mbuf_inbound_modified(m);
3519
3520 bridge_broadcast(sc, src_if, m, 1);
3521
3522 return;
3523 }
3524
3525 /*
3526 * At this point, we're dealing with a unicast frame
3527 * going to a different interface.
3528 */
3529 if ((dst_if->if_flags & IFF_RUNNING) == 0)
3530 goto drop;
3531
3532 dbif = bridge_lookup_member_if(sc, dst_if);
3533 if (dbif == NULL)
3534 /* Not a member of the bridge (anymore?) */
3535 goto drop;
3536
3537 /* Private segments can not talk to each other */
3538 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
3539 goto drop;
3540
3541 if ((dbif->bif_flags & IFBIF_STP) &&
3542 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3543 goto drop;
3544
3545#if HAS_DHCPRA_MASK
3546 /* APPLE MODIFICATION <rdar://6985737> */
3547 if ((dst_if->if_extflags & IFEXTF_DHCPRA_MASK) != 0) {
3548 m = ip_xdhcpra_output(dst_if, m);
3549 if (!m) {
3550 ++sc->sc_sc.sc_ifp.if_xdhcpra;
3551 return;
3552 }
3553 }
3554#endif /* HAS_DHCPRA_MASK */
3555
3556 BRIDGE_UNLOCK(sc);
3557
3558#if defined(PFIL_HOOKS)
3559 if (PFIL_HOOKED(&inet_pfil_hook)
3560#ifdef INET6
3561 || PFIL_HOOKED(&inet6_pfil_hook)
3562#endif
3563 ) {
3564 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
3565 return;
3566 if (m == NULL)
3567 return;
3568 }
3569#endif /* PFIL_HOOKS */
3570
3571 /*
3572 * Clear any in-bound checksum flags for this packet.
3573 */
3574 mbuf_inbound_modified(m);
3575
3576 bridge_enqueue(sc, dst_if, m);
3577 return;
3578
3579drop:
3580 BRIDGE_UNLOCK(sc);
3581 m_freem(m);
3582}
3583
3584#if BRIDGE_DEBUG
3585
3586char * ether_ntop(char *, size_t , const u_char *);
3587
3588__private_extern__ char *
3589ether_ntop(char *buf, size_t len, const u_char *ap)
3590{
3591 snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
3592 ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
3593
3594 return buf;
3595}
3596
3597#endif /* BRIDGE_DEBUG */
3598
3599/*
3600 * bridge_input:
3601 *
3602 * Filter input from a member interface. Queue the packet for
3603 * bridging if it is not for us.
3604 */
3605__private_extern__ errno_t
3606bridge_input(struct ifnet *ifp, struct mbuf *m, __unused void *frame_header)
3607{
3608 struct bridge_softc *sc = ifp->if_bridge;
3609 struct bridge_iflist *bif, *bif2;
3610 struct ifnet *bifp;
3611 struct ether_header *eh;
3612 struct mbuf *mc, *mc2;
3613 uint16_t vlan;
3614 int error;
3615
3616#if BRIDGE_DEBUG
3617 if (_if_brige_debug)
3618 printf("bridge_input: %s%d from %s%d m %p data %p\n",
3619 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
3620 ifnet_name(ifp), ifnet_unit(ifp),
3621 m, mbuf_data(m));
3622#endif /* BRIDGE_DEBUG */
3623
3624 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
3625#if BRIDGE_DEBUG
3626 if (_if_brige_debug)
3627 printf( "bridge_input: %s%d not running passing along\n",
3628 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp));
3629#endif /* BRIDGE_DEBUG */
3630 return 0;
3631 }
3632
3633 bifp = sc->sc_ifp;
3634 vlan = VLANTAGOF(m);
3635
3636#ifdef IFF_MONITOR
3637 /*
3638 * Implement support for bridge monitoring. If this flag has been
3639 * set on this interface, discard the packet once we push it through
3640 * the bpf(4) machinery, but before we do, increment the byte and
3641 * packet counters associated with this interface.
3642 */
3643 if ((bifp->if_flags & IFF_MONITOR) != 0) {
3644 m->m_pkthdr.rcvif = bifp;
3645 BRIDGE_BPF_MTAP_INPUT(sc, m);
3646 (void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0);
3647 m_freem(m);
3648 return EJUSTRETURN;
3649 }
3650#endif /* IFF_MONITOR */
3651
3652 /*
3653 * Need to clear the promiscous flags otherwise it will be
3654 * dropped by DLIL after processing filters
3655 */
3656 if ((mbuf_flags(m) & MBUF_PROMISC))
3657 mbuf_setflags_mask(m, 0, MBUF_PROMISC);
3658
3659 BRIDGE_LOCK(sc);
3660 bif = bridge_lookup_member_if(sc, ifp);
3661 if (bif == NULL) {
3662 BRIDGE_UNLOCK(sc);
3663#if BRIDGE_DEBUG
3664 if (_if_brige_debug)
3665 printf( "bridge_input: %s%d bridge_lookup_member_if failed\n",
3666 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp));
3667#endif /* BRIDGE_DEBUG */
3668 return 0;
3669 }
3670
3671 eh = mtod(m, struct ether_header *);
3672
3673 bridge_span(sc, m);
3674
3675 if (m->m_flags & (M_BCAST|M_MCAST)) {
3676
3677#if BRIDGE_DEBUG
3678 if (_if_brige_debug)
3679 if ((m->m_flags & M_MCAST))
3680 printf("mulicast: %02x:%02x:%02x:%02x:%02x:%02x\n",
3681 eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
3682 eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5]);
3683
3684#endif /* BRIDGE_DEBUG */
3685
3686 /* Tap off 802.1D packets; they do not get forwarded. */
3687 if (memcmp(eh->ether_dhost, bstp_etheraddr,
3688 ETHER_ADDR_LEN) == 0) {
3689 m = bstp_input(&bif->bif_stp, ifp, m);
3690 if (m == NULL) {
3691 BRIDGE_UNLOCK(sc);
3692 return EJUSTRETURN;
3693 }
3694 }
3695
3696 if ((bif->bif_flags & IFBIF_STP) &&
3697 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3698 BRIDGE_UNLOCK(sc);
3699 return 0;
3700 }
3701
3702 /*
3703 * Make a deep copy of the packet and enqueue the copy
3704 * for bridge processing; return the original packet for
3705 * local processing.
3706 */
3707 mc = m_dup(m, M_DONTWAIT);
3708 if (mc == NULL) {
3709 BRIDGE_UNLOCK(sc);
3710 return 0;
3711 }
3712
3713 /*
3714 * Perform the bridge forwarding function with the copy.
3715 *
3716 * Note that bridge_forward calls BRIDGE_UNLOCK
3717 */
3718 bridge_forward(sc, bif, mc);
3719
3720 /*
3721 * Reinject the mbuf as arriving on the bridge so we have a
3722 * chance at claiming multicast packets. We can not loop back
3723 * here from ether_input as a bridge is never a member of a
3724 * bridge.
3725 */
3726 KASSERT(bifp->if_bridge == NULL,
3727 ("loop created in bridge_input"));
3728 mc2 = m_dup(m, M_DONTWAIT);
3729 if (mc2 != NULL) {
3730 /* Keep the layer3 header aligned */
3731 int i = min(mc2->m_pkthdr.len, max_protohdr);
3732 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
3733 }
3734 if (mc2 != NULL) {
3735 // mark packet as arriving on the bridge
3736 mc2->m_pkthdr.rcvif = bifp;
3737 mc2->m_pkthdr.header = mbuf_data(mc2);
3738
3739#if NBPFILTER > 0
3740 if (sc->sc_bpf_input)
3741 bridge_bpf_input(bifp, mc2);
3742#endif /* NBPFILTER */
3743 (void) mbuf_setdata(mc2, (char *)mbuf_data(mc2) + ETHER_HDR_LEN, mbuf_len(mc2) - ETHER_HDR_LEN);
3744 (void) mbuf_pkthdr_adjustlen(mc2, - ETHER_HDR_LEN);
3745
3746 (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(mc2), 0);
3747
3748#if BRIDGE_DEBUG
3749 if (_if_brige_debug)
3750 printf( "bridge_input: %s%d mcast for us\n",
3751 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp));
3752#endif /* BRIDGE_DEBUG */
3753
3754 dlil_input_packet_list(bifp, mc2);
3755 }
3756
3757 /* Return the original packet for local processing. */
3758 return 0;
3759 }
3760
3761 if ((bif->bif_flags & IFBIF_STP) &&
3762 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3763 BRIDGE_UNLOCK(sc);
3764 return 0;
3765 }
3766
3767#ifdef DEV_CARP
3768# define OR_CARP_CHECK_WE_ARE_DST(iface) \
3769 || ((iface)->if_carp \
3770 && carp_forus((iface)->if_carp, eh->ether_dhost))
3771# define OR_CARP_CHECK_WE_ARE_SRC(iface) \
3772 || ((iface)->if_carp \
3773 && carp_forus((iface)->if_carp, eh->ether_shost))
3774#else
3775# define OR_CARP_CHECK_WE_ARE_DST(iface)
3776# define OR_CARP_CHECK_WE_ARE_SRC(iface)
3777#endif
3778
3779#ifdef INET6
3780# define OR_PFIL_HOOKED_INET6 \
3781 || PFIL_HOOKED(&inet6_pfil_hook)
3782#else
3783# define OR_PFIL_HOOKED_INET6
3784#endif
3785
3786#if defined(PFIL_HOOKS)
3787#define PFIL_PHYS(sc, ifp, m) do { \
3788 if (pfil_local_phys && \
3789 (PFIL_HOOKED(&inet_pfil_hook) \
3790 OR_PFIL_HOOKED_INET6)) { \
3791 if (bridge_pfil(&m, NULL, ifp, \
3792 PFIL_IN) != 0 || m == NULL) { \
3793 BRIDGE_UNLOCK(sc); \
3794 return (NULL); \
3795 } \
3796 } \
3797 } while (0)
3798#else /* PFIL_HOOKS */
3799#define PFIL_PHYS(sc, ifp, m)
3800#endif /* PFIL_HOOKS */
3801
3802#define GRAB_OUR_PACKETS(iface) \
3803 if ((iface)->if_type == IFT_GIF) \
3804 continue; \
3805 /* It is destined for us. */ \
3806 if (memcmp(ifnet_lladdr((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \
3807 OR_CARP_CHECK_WE_ARE_DST((iface)) \
3808 ) { \
3809 if ((iface)->if_type == IFT_BRIDGE) { \
3810 BRIDGE_BPF_MTAP_INPUT(sc, m); \
3811 /* Filter on the physical interface. */ \
3812 PFIL_PHYS(sc, iface, m); \
3813 } \
3814 if (bif->bif_flags & IFBIF_LEARNING) { \
3815 error = bridge_rtupdate(sc, eh->ether_shost, \
3816 vlan, bif, 0, IFBAF_DYNAMIC); \
3817 if (error && bif->bif_addrmax) { \
3818 BRIDGE_UNLOCK(sc); \
3819 return EJUSTRETURN; \
3820 } \
3821 } \
3822 m->m_pkthdr.rcvif = iface; \
3823 BRIDGE_UNLOCK(sc); \
3824 return 0; \
3825 } \
3826 \
3827 /* We just received a packet that we sent out. */ \
3828 if (memcmp(ifnet_lladdr((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
3829 OR_CARP_CHECK_WE_ARE_SRC((iface)) \
3830 ) { \
3831 BRIDGE_UNLOCK(sc); \
3832 return EJUSTRETURN; \
3833 }
3834
3835 /*
3836 * Unicast.
3837 */
3838 /*
3839 * If the packet is for us, set the packets source as the
3840 * bridge, and return the packet back to ether_input for
3841 * local processing.
3842 */
3843 if (memcmp(eh->ether_dhost, ifnet_lladdr(bifp),
3844 ETHER_ADDR_LEN) == 0
3845 OR_CARP_CHECK_WE_ARE_DST(bifp)) {
3846
3847 /* Mark the packet as arriving on the bridge interface */
3848 (void) mbuf_pkthdr_setrcvif(m, bifp);
3849 mbuf_pkthdr_setheader(m, frame_header);
3850
3851 /*
3852 * If the interface is learning, and the source
3853 * address is valid and not multicast, record
3854 * the address.
3855 */
3856 if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
3857 ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
3858 (eh->ether_shost[0] | eh->ether_shost[1] |
3859 eh->ether_shost[2] | eh->ether_shost[3] |
3860 eh->ether_shost[4] | eh->ether_shost[5]) != 0) {
3861 (void) bridge_rtupdate(sc, eh->ether_shost,
3862 vlan, bif, 0, IFBAF_DYNAMIC);
3863 }
3864
3865 BRIDGE_BPF_MTAP_INPUT(sc, m);
3866
3867 (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, mbuf_len(m) - ETHER_HDR_LEN);
3868 (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
3869
3870 (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
3871
3872 BRIDGE_UNLOCK(sc);
3873
3874#if BRIDGE_DEBUG
3875 if (_if_brige_debug)
3876 printf( "bridge_input: %s%d packet for bridge\n",
3877 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp));
3878#endif /* BRIDGE_DEBUG */
3879
3880 dlil_input_packet_list(bifp, m);
3881
3882 return EJUSTRETURN;
3883 }
3884
3885 /*
3886 * if the destination of the packet is for the MAC address of
3887 * the member interface itself, then we don't need to forward
3888 * it -- just pass it back. Note that it'll likely just be
3889 * dropped by the stack, but if something else is bound to
3890 * the interface directly (for example, the wireless stats
3891 * protocol -- although that actually uses BPF right now),
3892 * then it will consume the packet
3893 *
3894 * ALSO, note that we do this check AFTER checking for the
3895 * bridge's own MAC address, because the bridge may be
3896 * using the SAME MAC address as one of its interfaces
3897 */
3898 if (memcmp(eh->ether_dhost, ifnet_lladdr(ifp),
3899 ETHER_ADDR_LEN) == 0) {
3900
3901#ifdef VERY_VERY_VERY_DIAGNOSTIC
3902 printf("bridge_input: not forwarding packet bound for member interface\n" );
3903#endif
3904 BRIDGE_UNLOCK(sc);
3905 return 0;
3906 }
3907
3908 /* Now check the all bridge members. */
3909 TAILQ_FOREACH(bif2, &sc->sc_iflist, bif_next) {
3910 GRAB_OUR_PACKETS(bif2->bif_ifp)
3911 }
3912
3913#undef OR_CARP_CHECK_WE_ARE_DST
3914#undef OR_CARP_CHECK_WE_ARE_SRC
3915#undef OR_PFIL_HOOKED_INET6
3916#undef GRAB_OUR_PACKETS
3917
3918 /*
3919 * Perform the bridge forwarding function.
3920 *
3921 * Note that bridge_forward calls BRIDGE_UNLOCK
3922 */
3923 bridge_forward(sc, bif, m);
3924
3925 return EJUSTRETURN;
3926}
3927
3928/*
3929 * bridge_broadcast:
3930 *
3931 * Send a frame to all interfaces that are members of
3932 * the bridge, except for the one on which the packet
3933 * arrived.
3934 *
3935 * NOTE: Releases the lock on return.
3936 */
3937static void
3938bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
3939 struct mbuf *m, int runfilt)
3940{
3941#ifndef PFIL_HOOKS
3942#pragma unused(runfilt)
3943#endif
3944 struct bridge_iflist *dbif, *sbif;
3945 struct mbuf *mc;
3946 struct ifnet *dst_if;
3947 int error = 0, used = 0;
3948
3949 sbif = bridge_lookup_member_if(sc, src_if);
3950
3951 BRIDGE_LOCK2REF(sc, error);
3952 if (error) {
3953 m_freem(m);
3954 return;
3955 }
3956
3957#ifdef PFIL_HOOKS
3958 /* Filter on the bridge interface before broadcasting */
3959 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
3960#ifdef INET6
3961 || PFIL_HOOKED(&inet6_pfil_hook)
3962#endif /* INET6 */
3963 )) {
3964 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
3965 goto out;
3966 if (m == NULL)
3967 goto out;
3968 }
3969#endif /* PFIL_HOOKS */
3970
3971 TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) {
3972 dst_if = dbif->bif_ifp;
3973 if (dst_if == src_if)
3974 continue;
3975
3976 /* Private segments can not talk to each other */
3977 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
3978 continue;
3979
3980 if ((dbif->bif_flags & IFBIF_STP) &&
3981 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3982 continue;
3983
3984 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
3985 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
3986 continue;
3987
3988 if ((dst_if->if_flags & IFF_RUNNING) == 0)
3989 continue;
3990
3991 if (TAILQ_NEXT(dbif, bif_next) == NULL) {
3992 mc = m;
3993 used = 1;
3994 } else {
3995 mc = m_dup(m, M_DONTWAIT);
3996 if (mc == NULL) {
3997 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
3998 continue;
3999 }
4000 }
4001
4002#ifdef PFIL_HOOKS
4003 /*
4004 * Filter on the output interface. Pass a NULL bridge interface
4005 * pointer so we do not redundantly filter on the bridge for
4006 * each interface we broadcast on.
4007 */
4008 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
4009#ifdef INET6
4010 || PFIL_HOOKED(&inet6_pfil_hook)
4011#endif
4012 )) {
4013 if (used == 0) {
4014 /* Keep the layer3 header aligned */
4015 int i = min(mc->m_pkthdr.len, max_protohdr);
4016 mc = m_copyup(mc, i, ETHER_ALIGN);
4017 if (mc == NULL) {
4018 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
4019 continue;
4020 }
4021 }
4022 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
4023 continue;
4024 if (mc == NULL)
4025 continue;
4026 }
4027#endif /* PFIL_HOOKS */
4028
4029 bridge_enqueue(sc, dst_if, mc);
4030 }
4031 if (used == 0)
4032 m_freem(m);
4033
4034#ifdef PFIL_HOOKS
4035out:
4036#endif /* PFIL_HOOKS */
4037
4038 BRIDGE_UNREF(sc);
4039}
4040
4041/*
4042 * bridge_span:
4043 *
4044 * Duplicate a packet out one or more interfaces that are in span mode,
4045 * the original mbuf is unmodified.
4046 */
4047static void
4048bridge_span(struct bridge_softc *sc, struct mbuf *m)
4049{
4050 struct bridge_iflist *bif;
4051 struct ifnet *dst_if;
4052 struct mbuf *mc;
4053
4054 if (TAILQ_EMPTY(&sc->sc_spanlist))
4055 return;
4056
4057 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
4058 dst_if = bif->bif_ifp;
4059
4060 if ((dst_if->if_flags & IFF_RUNNING) == 0)
4061 continue;
4062
4063 mc = m_copypacket(m, M_DONTWAIT);
4064 if (mc == NULL) {
4065 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
4066 continue;
4067 }
4068
4069 bridge_enqueue(sc, dst_if, mc);
4070 }
4071}
4072
4073
4074
4075/*
4076 * bridge_rtupdate:
4077 *
4078 * Add a bridge routing entry.
4079 */
4080static int
4081bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
4082 struct bridge_iflist *bif, int setflags, uint8_t flags)
4083{
4084 struct bridge_rtnode *brt;
4085 int error;
4086
4087 BRIDGE_LOCK_ASSERT(sc);
4088
4089 /* Check the source address is valid and not multicast. */
4090 if (ETHER_IS_MULTICAST(dst) ||
4091 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
4092 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
4093 return (EINVAL);
4094
4095
4096 /* 802.1p frames map to vlan 1 */
4097 if (vlan == 0)
4098 vlan = 1;
4099
4100 /*
4101 * A route for this destination might already exist. If so,
4102 * update it, otherwise create a new one.
4103 */
4104 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
4105 if (sc->sc_brtcnt >= sc->sc_brtmax) {
4106 sc->sc_brtexceeded++;
4107 return (ENOSPC);
4108 }
4109 /* Check per interface address limits (if enabled) */
4110 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
4111 bif->bif_addrexceeded++;
4112 return (ENOSPC);
4113 }
4114
4115 /*
4116 * Allocate a new bridge forwarding node, and
4117 * initialize the expiration time and Ethernet
4118 * address.
4119 */
4120 brt = zalloc_noblock(bridge_rtnode_pool);
4121 if (brt == NULL)
4122 return (ENOMEM);
4123
4124 if (bif->bif_flags & IFBIF_STICKY)
4125 brt->brt_flags = IFBAF_STICKY;
4126 else
4127 brt->brt_flags = IFBAF_DYNAMIC;
4128
4129 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
4130 brt->brt_vlan = vlan;
4131
4132
4133 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
4134 zfree(bridge_rtnode_pool, brt);
4135 return (error);
4136 }
4137 brt->brt_dst = bif;
4138 bif->bif_addrcnt++;
4139 }
4140
4141 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
4142 brt->brt_dst != bif) {
4143 brt->brt_dst->bif_addrcnt--;
4144 brt->brt_dst = bif;
4145 brt->brt_dst->bif_addrcnt++;
4146 }
4147
4148 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4149 struct timespec now;
4150
4151 nanouptime(&now);
4152 brt->brt_expire = now.tv_sec + sc->sc_brttimeout;
4153 }
4154 if (setflags)
4155 brt->brt_flags = flags;
4156
4157
4158 return (0);
4159}
4160
4161/*
4162 * bridge_rtlookup:
4163 *
4164 * Lookup the destination interface for an address.
4165 */
4166static struct ifnet *
4167bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4168{
4169 struct bridge_rtnode *brt;
4170
4171 BRIDGE_LOCK_ASSERT(sc);
4172
4173 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
4174 return (NULL);
4175
4176 return (brt->brt_ifp);
4177}
4178
4179/*
4180 * bridge_rttrim:
4181 *
4182 * Trim the routine table so that we have a number
4183 * of routing entries less than or equal to the
4184 * maximum number.
4185 */
4186static void
4187bridge_rttrim(struct bridge_softc *sc)
4188{
4189 struct bridge_rtnode *brt, *nbrt;
4190
4191 BRIDGE_LOCK_ASSERT(sc);
4192
4193 /* Make sure we actually need to do this. */
4194 if (sc->sc_brtcnt <= sc->sc_brtmax)
4195 return;
4196
4197 /* Force an aging cycle; this might trim enough addresses. */
4198 bridge_rtage(sc);
4199 if (sc->sc_brtcnt <= sc->sc_brtmax)
4200 return;
4201
4202 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4203 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4204 bridge_rtnode_destroy(sc, brt);
4205 if (sc->sc_brtcnt <= sc->sc_brtmax)
4206 return;
4207 }
4208 }
4209}
4210
4211/*
4212 * bridge_timer:
4213 *
4214 * Aging timer for the bridge.
4215 */
4216static void
4217bridge_timer(void *arg)
4218{
4219 struct bridge_softc *sc = arg;
4220
4221 BRIDGE_LOCK(sc);
4222
4223 bridge_rtage(sc);
4224
4225 BRIDGE_UNLOCK(sc);
4226
4227 if (sc->sc_ifp->if_flags & IFF_RUNNING) {
4228 struct timespec ts;
4229
4230 ts.tv_sec = bridge_rtable_prune_period;
4231 ts.tv_nsec = 0;
4232 bsd_timeout(bridge_timer, sc, &ts);
4233 }
4234}
4235
4236/*
4237 * bridge_rtage:
4238 *
4239 * Perform an aging cycle.
4240 */
4241static void
4242bridge_rtage(struct bridge_softc *sc)
4243{
4244 struct bridge_rtnode *brt, *nbrt;
4245
4246 BRIDGE_LOCK_ASSERT(sc);
4247
4248 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4249 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4250 struct timespec now;
4251
4252 nanouptime(&now);
4253 if ((unsigned long)now.tv_sec >= brt->brt_expire)
4254 bridge_rtnode_destroy(sc, brt);
4255 }
4256 }
4257}
4258
4259/*
4260 * bridge_rtflush:
4261 *
4262 * Remove all dynamic addresses from the bridge.
4263 */
4264static void
4265bridge_rtflush(struct bridge_softc *sc, int full)
4266{
4267 struct bridge_rtnode *brt, *nbrt;
4268
4269 BRIDGE_LOCK_ASSERT(sc);
4270
4271 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4272 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
4273 bridge_rtnode_destroy(sc, brt);
4274 }
4275}
4276
4277/*
4278 * bridge_rtdaddr:
4279 *
4280 * Remove an address from the table.
4281 */
4282static int
4283bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4284{
4285 struct bridge_rtnode *brt;
4286 int found = 0;
4287
4288 BRIDGE_LOCK_ASSERT(sc);
4289
4290 /*
4291 * If vlan is zero then we want to delete for all vlans so the lookup
4292 * may return more than one.
4293 */
4294 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
4295 bridge_rtnode_destroy(sc, brt);
4296 found = 1;
4297 }
4298
4299 return (found ? 0 : ENOENT);
4300}
4301
4302/*
4303 * bridge_rtdelete:
4304 *
4305 * Delete routes to a speicifc member interface.
4306 */
4307static void
4308bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
4309{
4310 struct bridge_rtnode *brt, *nbrt;
4311
4312 BRIDGE_LOCK_ASSERT(sc);
4313
4314 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4315 if (brt->brt_ifp == ifp && (full ||
4316 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
4317 bridge_rtnode_destroy(sc, brt);
4318 }
4319}
4320
4321/*
4322 * bridge_rtable_init:
4323 *
4324 * Initialize the route table for this bridge.
4325 */
4326static int
4327bridge_rtable_init(struct bridge_softc *sc)
4328{
4329 int i;
4330
4331 sc->sc_rthash = _MALLOC(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
4332 M_DEVBUF, M_NOWAIT);
4333 if (sc->sc_rthash == NULL)
4334 return (ENOMEM);
4335
4336 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
4337 LIST_INIT(&sc->sc_rthash[i]);
4338
4339 sc->sc_rthash_key = random();
4340
4341 LIST_INIT(&sc->sc_rtlist);
4342
4343 return (0);
4344}
4345
4346/*
4347 * bridge_rtable_fini:
4348 *
4349 * Deconstruct the route table for this bridge.
4350 */
4351static void
4352bridge_rtable_fini(struct bridge_softc *sc)
4353{
4354
4355 KASSERT(sc->sc_brtcnt == 0,
4356 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
4357 _FREE(sc->sc_rthash, M_DEVBUF);
4358}
4359
4360/*
4361 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
4362 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
4363 */
4364#define mix(a, b, c) \
4365do { \
4366 a -= b; a -= c; a ^= (c >> 13); \
4367 b -= c; b -= a; b ^= (a << 8); \
4368 c -= a; c -= b; c ^= (b >> 13); \
4369 a -= b; a -= c; a ^= (c >> 12); \
4370 b -= c; b -= a; b ^= (a << 16); \
4371 c -= a; c -= b; c ^= (b >> 5); \
4372 a -= b; a -= c; a ^= (c >> 3); \
4373 b -= c; b -= a; b ^= (a << 10); \
4374 c -= a; c -= b; c ^= (b >> 15); \
4375} while (/*CONSTCOND*/0)
4376
4377static __inline uint32_t
4378bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
4379{
4380 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
4381
4382 b += addr[5] << 8;
4383 b += addr[4];
4384 a += addr[3] << 24;
4385 a += addr[2] << 16;
4386 a += addr[1] << 8;
4387 a += addr[0];
4388
4389 mix(a, b, c);
4390
4391 return (c & BRIDGE_RTHASH_MASK);
4392}
4393
4394#undef mix
4395
4396static int
4397bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
4398{
4399 int i, d;
4400
4401 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
4402 d = ((int)a[i]) - ((int)b[i]);
4403 }
4404
4405 return (d);
4406}
4407
4408/*
4409 * bridge_rtnode_lookup:
4410 *
4411 * Look up a bridge route node for the specified destination. Compare the
4412 * vlan id or if zero then just return the first match.
4413 */
4414static struct bridge_rtnode *
4415bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4416{
4417 struct bridge_rtnode *brt;
4418 uint32_t hash;
4419 int dir;
4420
4421 BRIDGE_LOCK_ASSERT(sc);
4422
4423 hash = bridge_rthash(sc, addr);
4424 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
4425 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
4426 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
4427 return (brt);
4428 if (dir > 0)
4429 return (NULL);
4430 }
4431
4432 return (NULL);
4433}
4434
4435/*
4436 * bridge_rtnode_insert:
4437 *
4438 * Insert the specified bridge node into the route table. We
4439 * assume the entry is not already in the table.
4440 */
4441static int
4442bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
4443{
4444 struct bridge_rtnode *lbrt;
4445 uint32_t hash;
4446 int dir;
4447
4448 BRIDGE_LOCK_ASSERT(sc);
4449
4450 hash = bridge_rthash(sc, brt->brt_addr);
4451
4452 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
4453 if (lbrt == NULL) {
4454 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
4455 goto out;
4456 }
4457
4458 do {
4459 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
4460 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
4461 return (EEXIST);
4462 if (dir > 0) {
4463 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
4464 goto out;
4465 }
4466 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
4467 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
4468 goto out;
4469 }
4470 lbrt = LIST_NEXT(lbrt, brt_hash);
4471 } while (lbrt != NULL);
4472
4473#ifdef DIAGNOSTIC
4474 panic("bridge_rtnode_insert: impossible");
4475#endif
4476
4477out:
4478 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
4479 sc->sc_brtcnt++;
4480
4481 return (0);
4482}
4483
4484/*
4485 * bridge_rtnode_destroy:
4486 *
4487 * Destroy a bridge rtnode.
4488 */
4489static void
4490bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
4491{
4492 BRIDGE_LOCK_ASSERT(sc);
4493
4494 LIST_REMOVE(brt, brt_hash);
4495
4496 LIST_REMOVE(brt, brt_list);
4497 sc->sc_brtcnt--;
4498 brt->brt_dst->bif_addrcnt--;
4499 zfree(bridge_rtnode_pool, brt);
4500}
4501
4502/*
4503 * bridge_rtable_expire:
4504 *
4505 * Set the expiry time for all routes on an interface.
4506 */
4507static void
4508bridge_rtable_expire(struct ifnet *ifp, int age)
4509{
4510 struct bridge_softc *sc = ifp->if_bridge;
4511 struct bridge_rtnode *brt;
4512
4513 BRIDGE_LOCK(sc);
4514
4515 /*
4516 * If the age is zero then flush, otherwise set all the expiry times to
4517 * age for the interface
4518 */
4519 if (age == 0)
4520 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
4521 else {
4522 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
4523 struct timespec now;
4524
4525 nanouptime(&now);
4526 /* Cap the expiry time to 'age' */
4527 if (brt->brt_ifp == ifp &&
4528 brt->brt_expire > (unsigned long)now.tv_sec + age &&
4529 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
4530 brt->brt_expire = (unsigned long)now.tv_sec + age;
4531 }
4532 }
4533 BRIDGE_UNLOCK(sc);
4534}
4535
4536/*
4537 * bridge_state_change:
4538 *
4539 * Callback from the bridgestp code when a port changes states.
4540 */
4541static void
4542bridge_state_change(struct ifnet *ifp, int state)
4543{
4544 struct bridge_softc *sc = ifp->if_bridge;
4545 static const char *stpstates[] = {
4546 "disabled",
4547 "listening",
4548 "learning",
4549 "forwarding",
4550 "blocking",
4551 "discarding"
4552 };
4553
4554 if (log_stp)
4555 log(LOG_NOTICE, "%s%d: state changed to %s on %s%d\n",
4556 ifnet_name(sc->sc_ifp), ifnet_unit(sc->sc_ifp),
4557 stpstates[state],
4558 ifnet_name(ifp), ifnet_unit(ifp));
4559}
4560
4561#ifdef PFIL_HOOKS
4562/*
4563 * Send bridge packets through pfil if they are one of the types pfil can deal
4564 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
4565 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
4566 * that interface.
4567 */
4568static int
4569bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
4570{
4571 int snap, error, i, hlen;
4572 struct ether_header *eh1, eh2;
4573 struct ip_fw_args args;
4574 struct ip *ip;
4575 struct llc llc1;
4576 u_int16_t ether_type;
4577
4578 snap = 0;
4579 error = -1; /* Default error if not error == 0 */
4580
4581#if 0
4582 /* we may return with the IP fields swapped, ensure its not shared */
4583 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
4584#endif
4585
4586 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
4587 return (0); /* filtering is disabled */
4588
4589 i = min((*mp)->m_pkthdr.len, max_protohdr);
4590 if ((*mp)->m_len < i) {
4591 *mp = m_pullup(*mp, i);
4592 if (*mp == NULL) {
4593 printf("%s: m_pullup failed\n", __func__);
4594 return (-1);
4595 }
4596 }
4597
4598 eh1 = mtod(*mp, struct ether_header *);
4599 ether_type = ntohs(eh1->ether_type);
4600
4601 /*
4602 * Check for SNAP/LLC.
4603 */
4604 if (ether_type < ETHERMTU) {
4605 struct llc *llc2 = (struct llc *)(eh1 + 1);
4606
4607 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
4608 llc2->llc_dsap == LLC_SNAP_LSAP &&
4609 llc2->llc_ssap == LLC_SNAP_LSAP &&
4610 llc2->llc_control == LLC_UI) {
4611 ether_type = htons(llc2->llc_un.type_snap.ether_type);
4612 snap = 1;
4613 }
4614 }
4615
4616 /*
4617 * If we're trying to filter bridge traffic, don't look at anything
4618 * other than IP and ARP traffic. If the filter doesn't understand
4619 * IPv6, don't allow IPv6 through the bridge either. This is lame
4620 * since if we really wanted, say, an AppleTalk filter, we are hosed,
4621 * but of course we don't have an AppleTalk filter to begin with.
4622 * (Note that since pfil doesn't understand ARP it will pass *ALL*
4623 * ARP traffic.)
4624 */
4625 switch (ether_type) {
4626 case ETHERTYPE_ARP:
4627 case ETHERTYPE_REVARP:
4628 if (pfil_ipfw_arp == 0)
4629 return (0); /* Automatically pass */
4630 break;
4631
4632 case ETHERTYPE_IP:
4633#ifdef INET6
4634 case ETHERTYPE_IPV6:
4635#endif /* INET6 */
4636 break;
4637 default:
4638 /*
4639 * Check to see if the user wants to pass non-ip
4640 * packets, these will not be checked by pfil(9) and
4641 * passed unconditionally so the default is to drop.
4642 */
4643 if (pfil_onlyip)
4644 goto bad;
4645 }
4646
4647 /* Strip off the Ethernet header and keep a copy. */
4648 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
4649 m_adj(*mp, ETHER_HDR_LEN);
4650
4651 /* Strip off snap header, if present */
4652 if (snap) {
4653 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
4654 m_adj(*mp, sizeof(struct llc));
4655 }
4656
4657 /*
4658 * Check the IP header for alignment and errors
4659 */
4660 if (dir == PFIL_IN) {
4661 switch (ether_type) {
4662 case ETHERTYPE_IP:
4663 error = bridge_ip_checkbasic(mp);
4664 break;
4665#ifdef INET6
4666 case ETHERTYPE_IPV6:
4667 error = bridge_ip6_checkbasic(mp);
4668 break;
4669#endif /* INET6 */
4670 default:
4671 error = 0;
4672 }
4673 if (error)
4674 goto bad;
4675 }
4676
4677 if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
4678 error = -1;
4679 args.rule = ip_dn_claim_rule(*mp);
4680 if (args.rule != NULL && fw_one_pass)
4681 goto ipfwpass; /* packet already partially processed */
4682
4683 args.m = *mp;
4684 args.oif = ifp;
4685 args.next_hop = NULL;
4686 args.eh = &eh2;
4687 args.inp = NULL; /* used by ipfw uid/gid/jail rules */
4688 i = ip_fw_chk_ptr(&args);
4689 *mp = args.m;
4690
4691 if (*mp == NULL)
4692 return (error);
4693
4694 if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
4695
4696 /* put the Ethernet header back on */
4697 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
4698 if (*mp == NULL)
4699 return (error);
4700 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4701
4702 /*
4703 * Pass the pkt to dummynet, which consumes it. The
4704 * packet will return to us via bridge_dummynet().
4705 */
4706 args.oif = ifp;
4707 ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args);
4708 return (error);
4709 }
4710
4711 if (i != IP_FW_PASS) /* drop */
4712 goto bad;
4713 }
4714
4715ipfwpass:
4716 error = 0;
4717
4718 /*
4719 * Run the packet through pfil
4720 */
4721 switch (ether_type) {
4722 case ETHERTYPE_IP:
4723 /*
4724 * before calling the firewall, swap fields the same as
4725 * IP does. here we assume the header is contiguous
4726 */
4727 ip = mtod(*mp, struct ip *);
4728
4729 ip->ip_len = ntohs(ip->ip_len);
4730 ip->ip_off = ntohs(ip->ip_off);
4731
4732 /*
4733 * Run pfil on the member interface and the bridge, both can
4734 * be skipped by clearing pfil_member or pfil_bridge.
4735 *
4736 * Keep the order:
4737 * in_if -> bridge_if -> out_if
4738 */
4739 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
4740 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
4741 dir, NULL);
4742
4743 if (*mp == NULL || error != 0) /* filter may consume */
4744 break;
4745
4746 if (pfil_member && ifp != NULL)
4747 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
4748 dir, NULL);
4749
4750 if (*mp == NULL || error != 0) /* filter may consume */
4751 break;
4752
4753 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
4754 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
4755 dir, NULL);
4756
4757 if (*mp == NULL || error != 0) /* filter may consume */
4758 break;
4759
4760 /* check if we need to fragment the packet */
4761 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
4762 i = (*mp)->m_pkthdr.len;
4763 if (i > ifp->if_mtu) {
4764 error = bridge_fragment(ifp, *mp, &eh2, snap,
4765 &llc1);
4766 return (error);
4767 }
4768 }
4769
4770 /* Recalculate the ip checksum and restore byte ordering */
4771 ip = mtod(*mp, struct ip *);
4772 hlen = ip->ip_hl << 2;
4773 if (hlen < sizeof(struct ip))
4774 goto bad;
4775 if (hlen > (*mp)->m_len) {
4776 if ((*mp = m_pullup(*mp, hlen)) == 0)
4777 goto bad;
4778 ip = mtod(*mp, struct ip *);
4779 if (ip == NULL)
4780 goto bad;
4781 }
4782 ip->ip_len = htons(ip->ip_len);
4783 ip->ip_off = htons(ip->ip_off);
4784 ip->ip_sum = 0;
4785 if (hlen == sizeof(struct ip))
4786 ip->ip_sum = in_cksum_hdr(ip);
4787 else
4788 ip->ip_sum = in_cksum(*mp, hlen);
4789
4790 break;
4791#ifdef INET6
4792 case ETHERTYPE_IPV6:
4793 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
4794 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4795 dir, NULL);
4796
4797 if (*mp == NULL || error != 0) /* filter may consume */
4798 break;
4799
4800 if (pfil_member && ifp != NULL)
4801 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
4802 dir, NULL);
4803
4804 if (*mp == NULL || error != 0) /* filter may consume */
4805 break;
4806
4807 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
4808 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4809 dir, NULL);
4810 break;
4811#endif
4812 default:
4813 error = 0;
4814 break;
4815 }
4816
4817 if (*mp == NULL)
4818 return (error);
4819 if (error != 0)
4820 goto bad;
4821
4822 error = -1;
4823
4824 /*
4825 * Finally, put everything back the way it was and return
4826 */
4827 if (snap) {
4828 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
4829 if (*mp == NULL)
4830 return (error);
4831 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4832 }
4833
4834 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
4835 if (*mp == NULL)
4836 return (error);
4837 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4838
4839 return (0);
4840
4841bad:
4842 m_freem(*mp);
4843 *mp = NULL;
4844 return (error);
4845}
4846
4847
4848/*
4849 * Perform basic checks on header size since
4850 * pfil assumes ip_input has already processed
4851 * it for it. Cut-and-pasted from ip_input.c.
4852 * Given how simple the IPv6 version is,
4853 * does the IPv4 version really need to be
4854 * this complicated?
4855 *
4856 * XXX Should we update ipstat here, or not?
4857 * XXX Right now we update ipstat but not
4858 * XXX csum_counter.
4859 */
4860static int
4861bridge_ip_checkbasic(struct mbuf **mp)
4862{
4863 struct mbuf *m = *mp;
4864 struct ip *ip;
4865 int len, hlen;
4866 u_short sum;
4867
4868 if (*mp == NULL)
4869 return (-1);
4870
4871 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4872 if ((m = m_copyup(m, sizeof(struct ip),
4873 (max_linkhdr + 3) & ~3)) == NULL) {
4874 /* XXXJRT new stat, please */
4875 ipstat.ips_toosmall++;
4876 goto bad;
4877 }
4878 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
4879 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4880 ipstat.ips_toosmall++;
4881 goto bad;
4882 }
4883 }
4884 ip = mtod(m, struct ip *);
4885 if (ip == NULL) goto bad;
4886
4887 if (ip->ip_v != IPVERSION) {
4888 ipstat.ips_badvers++;
4889 goto bad;
4890 }
4891 hlen = ip->ip_hl << 2;
4892 if (hlen < sizeof(struct ip)) { /* minimum header length */
4893 ipstat.ips_badhlen++;
4894 goto bad;
4895 }
4896 if (hlen > m->m_len) {
4897 if ((m = m_pullup(m, hlen)) == 0) {
4898 ipstat.ips_badhlen++;
4899 goto bad;
4900 }
4901 ip = mtod(m, struct ip *);
4902 if (ip == NULL) goto bad;
4903 }
4904
4905 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4906 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4907 } else {
4908 if (hlen == sizeof(struct ip)) {
4909 sum = in_cksum_hdr(ip);
4910 } else {
4911 sum = in_cksum(m, hlen);
4912 }
4913 }
4914 if (sum) {
4915 ipstat.ips_badsum++;
4916 goto bad;
4917 }
4918
4919 /* Retrieve the packet length. */
4920 len = ntohs(ip->ip_len);
4921
4922 /*
4923 * Check for additional length bogosity
4924 */
4925 if (len < hlen) {
4926 ipstat.ips_badlen++;
4927 goto bad;
4928 }
4929
4930 /*
4931 * Check that the amount of data in the buffers
4932 * is as at least much as the IP header would have us expect.
4933 * Drop packet if shorter than we expect.
4934 */
4935 if (m->m_pkthdr.len < len) {
4936 ipstat.ips_tooshort++;
4937 goto bad;
4938 }
4939
4940 /* Checks out, proceed */
4941 *mp = m;
4942 return (0);
4943
4944bad:
4945 *mp = m;
4946 return (-1);
4947}
4948
4949#ifdef INET6
4950/*
4951 * Same as above, but for IPv6.
4952 * Cut-and-pasted from ip6_input.c.
4953 * XXX Should we update ip6stat, or not?
4954 */
4955static int
4956bridge_ip6_checkbasic(struct mbuf **mp)
4957{
4958 struct mbuf *m = *mp;
4959 struct ip6_hdr *ip6;
4960
4961 /*
4962 * If the IPv6 header is not aligned, slurp it up into a new
4963 * mbuf with space for link headers, in the event we forward
4964 * it. Otherwise, if it is aligned, make sure the entire base
4965 * IPv6 header is in the first mbuf of the chain.
4966 */
4967 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4968 struct ifnet *inifp = m->m_pkthdr.rcvif;
4969 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4970 (max_linkhdr + 3) & ~3)) == NULL) {
4971 /* XXXJRT new stat, please */
4972 ip6stat.ip6s_toosmall++;
4973 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4974 goto bad;
4975 }
4976 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4977 struct ifnet *inifp = m->m_pkthdr.rcvif;
4978 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4979 ip6stat.ip6s_toosmall++;
4980 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4981 goto bad;
4982 }
4983 }
4984
4985 ip6 = mtod(m, struct ip6_hdr *);
4986
4987 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4988 ip6stat.ip6s_badvers++;
4989 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4990 goto bad;
4991 }
4992
4993 /* Checks out, proceed */
4994 *mp = m;
4995 return (0);
4996
4997bad:
4998 *mp = m;
4999 return (-1);
5000}
5001#endif /* INET6 */
5002
5003/*
5004 * bridge_fragment:
5005 *
5006 * Return a fragmented mbuf chain.
5007 */
5008static int
5009bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
5010 int snap, struct llc *llc)
5011{
5012 struct mbuf *m0;
5013 struct ip *ip;
5014 int error = -1;
5015
5016 if (m->m_len < sizeof(struct ip) &&
5017 (m = m_pullup(m, sizeof(struct ip))) == NULL)
5018 goto out;
5019 ip = mtod(m, struct ip *);
5020
5021 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
5022 CSUM_DELAY_IP);
5023 if (error)
5024 goto out;
5025
5026 /* walk the chain and re-add the Ethernet header */
5027 for (m0 = m; m0; m0 = m0->m_nextpkt) {
5028 if (error == 0) {
5029 if (snap) {
5030 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT);
5031 if (m0 == NULL) {
5032 error = ENOBUFS;
5033 continue;
5034 }
5035 bcopy(llc, mtod(m0, caddr_t),
5036 sizeof(struct llc));
5037 }
5038 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
5039 if (m0 == NULL) {
5040 error = ENOBUFS;
5041 continue;
5042 }
5043 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
5044 } else
5045 m_freem(m);
5046 }
5047
5048 if (error == 0)
5049 ipstat.ips_fragmented++;
5050
5051 return (error);
5052
5053out:
5054 if (m != NULL)
5055 m_freem(m);
5056 return (error);
5057}
5058#endif /* PFIL_HOOKS */
5059
5060static errno_t
5061bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
5062{
5063 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5064
5065 //printf("bridge_set_bpf_tap ifp %p mode %d\n", ifp, mode);
5066
5067 /* TBD locking */
5068 if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
5069 return ENODEV;
5070 }
5071
5072 switch (mode) {
5073 case BPF_TAP_DISABLE:
5074 sc->sc_bpf_input = sc->sc_bpf_output = NULL;
5075 break;
5076
5077 case BPF_TAP_INPUT:
5078 sc->sc_bpf_input = bpf_callback;
5079 break;
5080
5081 case BPF_TAP_OUTPUT:
5082 sc->sc_bpf_output = bpf_callback;
5083 break;
5084
5085 case BPF_TAP_INPUT_OUTPUT:
5086 sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
5087 break;
5088
5089 default:
5090 break;
5091 }
5092
5093 return 0;
5094}
5095
5096static void
5097bridge_detach(ifnet_t ifp)
5098{
5099 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5100
5101 bstp_detach(&sc->sc_stp);
5102
5103 /* Tear down the routing table. */
5104 bridge_rtable_fini(sc);
5105
5106 lck_mtx_lock(bridge_list_mtx);
5107 LIST_REMOVE(sc, sc_list);
5108 lck_mtx_unlock(bridge_list_mtx);
5109
5110 ifnet_release(ifp);
5111
5112 lck_mtx_free(sc->sc_mtx, bridge_lock_grp);
5113
5114 _FREE(sc, M_DEVBUF);
5115 return;
5116}
5117
5118__private_extern__ errno_t bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
5119{
5120 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5121
5122 if (sc->sc_bpf_input) {
5123 if (mbuf_pkthdr_rcvif(m) != ifp)
5124 printf("bridge_bpf_input rcvif: %p != ifp %p\n", mbuf_pkthdr_rcvif(m), ifp);
5125 (*sc->sc_bpf_input)(ifp, m);
5126 }
5127 return 0;
5128}
5129
5130__private_extern__ errno_t bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
5131{
5132 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5133
5134 if (sc->sc_bpf_output) {
5135 (*sc->sc_bpf_output)(ifp, m);
5136 }
5137 return 0;
5138}