]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/if_bridge.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / net / if_bridge.c
1 /*
2 * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
30 /*
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior
50 * written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65 /*
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
68 *
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
71 * are met:
72 * 1. Redistributions of source code must retain the above copyright
73 * notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 * notice, this list of conditions and the following disclaimer in the
76 * documentation and/or other materials provided with the distribution.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
89 *
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
91 */
92
93 /*
94 * Network interface bridge support.
95 *
96 * TODO:
97 *
98 * - Currently only supports Ethernet-like interfaces (Ethernet,
99 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
100 * to bridge other types of interfaces (FDDI-FDDI, and maybe
101 * consider heterogenous bridges).
102 *
103 * - GIF isn't handled due to the lack of IPPROTO_ETHERIP support.
104 */
105
106 #include <sys/cdefs.h>
107
108 #define BRIDGE_DEBUG 1
109
110 #include <sys/param.h>
111 #include <sys/mbuf.h>
112 #include <sys/malloc.h>
113 #include <sys/protosw.h>
114 #include <sys/systm.h>
115 #include <sys/time.h>
116 #include <sys/socket.h> /* for net/if.h */
117 #include <sys/sockio.h>
118 #include <sys/kernel.h>
119 #include <sys/random.h>
120 #include <sys/syslog.h>
121 #include <sys/sysctl.h>
122 #include <sys/proc.h>
123 #include <sys/lock.h>
124 #include <sys/mcache.h>
125
126 #include <sys/kauth.h>
127
128 #include <kern/thread_call.h>
129
130 #include <libkern/libkern.h>
131
132 #include <kern/zalloc.h>
133
134 #if NBPFILTER > 0
135 #include <net/bpf.h>
136 #endif
137 #include <net/if.h>
138 #include <net/if_dl.h>
139 #include <net/if_types.h>
140 #include <net/if_var.h>
141 #include <net/if_media.h>
142 #include <net/net_api_stats.h>
143
144 #include <netinet/in.h> /* for struct arpcom */
145 #include <netinet/in_systm.h>
146 #include <netinet/in_var.h>
147 #define _IP_VHL
148 #include <netinet/ip.h>
149 #include <netinet/ip_var.h>
150 #if INET6
151 #include <netinet/ip6.h>
152 #include <netinet6/ip6_var.h>
153 #endif
154 #ifdef DEV_CARP
155 #include <netinet/ip_carp.h>
156 #endif
157 #include <netinet/if_ether.h> /* for struct arpcom */
158 #include <net/bridgestp.h>
159 #include <net/if_bridgevar.h>
160 #include <net/if_llc.h>
161 #if NVLAN > 0
162 #include <net/if_vlan_var.h>
163 #endif /* NVLAN > 0 */
164
165 #include <net/if_ether.h>
166 #include <net/dlil.h>
167 #include <net/kpi_interfacefilter.h>
168
169 #include <net/route.h>
170 #ifdef PFIL_HOOKS
171 #include <netinet/ip_fw2.h>
172 #include <netinet/ip_dummynet.h>
173 #endif /* PFIL_HOOKS */
174 #include <dev/random/randomdev.h>
175
176 #include <netinet/bootp.h>
177 #include <netinet/dhcp.h>
178
179
180 #if BRIDGE_DEBUG
181 #define BR_DBGF_LIFECYCLE 0x0001
182 #define BR_DBGF_INPUT 0x0002
183 #define BR_DBGF_OUTPUT 0x0004
184 #define BR_DBGF_RT_TABLE 0x0008
185 #define BR_DBGF_DELAYED_CALL 0x0010
186 #define BR_DBGF_IOCTL 0x0020
187 #define BR_DBGF_MBUF 0x0040
188 #define BR_DBGF_MCAST 0x0080
189 #define BR_DBGF_HOSTFILTER 0x0100
190 #endif /* BRIDGE_DEBUG */
191
192 #define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
193 #define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
194 #define BRIDGE_LOCK_ASSERT_HELD(_sc) \
195 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
196 #define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
197 LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
198
199 #if BRIDGE_DEBUG
200
201 #define BR_LCKDBG_MAX 4
202
203 #define BRIDGE_LOCK(_sc) bridge_lock(_sc)
204 #define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
205 #define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
206 #define BRIDGE_UNREF(_sc) bridge_unref(_sc)
207 #define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
208 #define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
209
210 #else /* !BRIDGE_DEBUG */
211
212 #define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
213 #define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
214 #define BRIDGE_LOCK2REF(_sc, _err) do { \
215 BRIDGE_LOCK_ASSERT_HELD(_sc); \
216 if ((_sc)->sc_iflist_xcnt > 0) \
217 (_err) = EBUSY; \
218 else \
219 (_sc)->sc_iflist_ref++; \
220 _BRIDGE_UNLOCK(_sc); \
221 } while (0)
222 #define BRIDGE_UNREF(_sc) do { \
223 _BRIDGE_LOCK(_sc); \
224 (_sc)->sc_iflist_ref--; \
225 if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
226 _BRIDGE_UNLOCK(_sc); \
227 wakeup(&(_sc)->sc_cv); \
228 } else \
229 _BRIDGE_UNLOCK(_sc); \
230 } while (0)
231 #define BRIDGE_XLOCK(_sc) do { \
232 BRIDGE_LOCK_ASSERT_HELD(_sc); \
233 (_sc)->sc_iflist_xcnt++; \
234 while ((_sc)->sc_iflist_ref > 0) \
235 msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
236 "BRIDGE_XLOCK", NULL); \
237 } while (0)
238 #define BRIDGE_XDROP(_sc) do { \
239 BRIDGE_LOCK_ASSERT_HELD(_sc); \
240 (_sc)->sc_iflist_xcnt--; \
241 } while (0)
242
243 #endif /* BRIDGE_DEBUG */
244
245 #if NBPFILTER > 0
246 #define BRIDGE_BPF_MTAP_INPUT(sc, m) \
247 if (sc->sc_bpf_input) \
248 bridge_bpf_input(sc->sc_ifp, m)
249 #else /* NBPFILTER */
250 #define BRIDGE_BPF_MTAP_INPUT(ifp, m)
251 #endif /* NBPFILTER */
252
253 /*
254 * Initial size of the route hash table. Must be a power of two.
255 */
256 #ifndef BRIDGE_RTHASH_SIZE
257 #define BRIDGE_RTHASH_SIZE 16
258 #endif
259
260 /*
261 * Maximum size of the routing hash table
262 */
263 #define BRIDGE_RTHASH_SIZE_MAX 2048
264
265 #define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
266
267 /*
268 * Maximum number of addresses to cache.
269 */
270 #ifndef BRIDGE_RTABLE_MAX
271 #define BRIDGE_RTABLE_MAX 100
272 #endif
273
274
275 /*
276 * Timeout (in seconds) for entries learned dynamically.
277 */
278 #ifndef BRIDGE_RTABLE_TIMEOUT
279 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
280 #endif
281
282 /*
283 * Number of seconds between walks of the route list.
284 */
285 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
286 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
287 #endif
288
289 /*
290 * List of capabilities to possibly mask on the member interface.
291 */
292 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
293 /*
294 * List of capabilities to disable on the member interface.
295 */
296 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
297
298 /*
299 * Bridge interface list entry.
300 */
301 struct bridge_iflist {
302 TAILQ_ENTRY(bridge_iflist) bif_next;
303 struct ifnet *bif_ifp; /* member if */
304 struct bstp_port bif_stp; /* STP state */
305 uint32_t bif_ifflags; /* member if flags */
306 int bif_savedcaps; /* saved capabilities */
307 uint32_t bif_addrmax; /* max # of addresses */
308 uint32_t bif_addrcnt; /* cur. # of addresses */
309 uint32_t bif_addrexceeded; /* # of address violations */
310
311 interface_filter_t bif_iff_ref;
312 struct bridge_softc *bif_sc;
313 uint32_t bif_flags;
314
315 struct in_addr bif_hf_ipsrc;
316 uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN];
317 };
318
319 #define BIFF_PROMISC 0x01 /* promiscuous mode set */
320 #define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
321 #define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
322 #define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
323 #define BIFF_HOST_FILTER 0x10 /* host filter enabled */
324 #define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
325 #define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
326
327 /*
328 * Bridge route node.
329 */
330 struct bridge_rtnode {
331 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
332 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
333 struct bridge_iflist *brt_dst; /* destination if */
334 unsigned long brt_expire; /* expiration time */
335 uint8_t brt_flags; /* address flags */
336 uint8_t brt_addr[ETHER_ADDR_LEN];
337 uint16_t brt_vlan; /* vlan id */
338
339 };
340 #define brt_ifp brt_dst->bif_ifp
341
342 /*
343 * Bridge delayed function call context
344 */
345 typedef void (*bridge_delayed_func_t)(struct bridge_softc *);
346
347 struct bridge_delayed_call {
348 struct bridge_softc *bdc_sc;
349 bridge_delayed_func_t bdc_func; /* Function to call */
350 struct timespec bdc_ts; /* Time to call */
351 u_int32_t bdc_flags;
352 thread_call_t bdc_thread_call;
353 };
354
355 #define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
356 #define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
357
358
359 /*
360 * Software state for each bridge.
361 */
362 LIST_HEAD(_bridge_rtnode_list, bridge_rtnode);
363
364 typedef struct {
365 struct _bridge_rtnode_list *bb_rthash; /* our forwarding table */
366 struct _bridge_rtnode_list bb_rtlist; /* list version of above */
367 uint32_t bb_rthash_key; /* key for hash */
368 uint32_t bb_rthash_size; /* size of the hash table */
369 struct bridge_delayed_call bb_aging_timer;
370 struct bridge_delayed_call bb_resize_call;
371 TAILQ_HEAD(, bridge_iflist) bb_spanlist; /* span ports list */
372 struct bstp_state bb_stp; /* STP state */
373 bpf_packet_func bb_bpf_input;
374 bpf_packet_func bb_bpf_output;
375 } bridge_bsd, *bridge_bsd_t;
376
377 #define sc_rthash sc_u.scu_bsd.bb_rthash
378 #define sc_rtlist sc_u.scu_bsd.bb_rtlist
379 #define sc_rthash_key sc_u.scu_bsd.bb_rthash_key
380 #define sc_rthash_size sc_u.scu_bsd.bb_rthash_size
381 #define sc_aging_timer sc_u.scu_bsd.bb_aging_timer
382 #define sc_resize_call sc_u.scu_bsd.bb_resize_call
383 #define sc_spanlist sc_u.scu_bsd.bb_spanlist
384 #define sc_stp sc_u.scu_bsd.bb_stp
385 #define sc_bpf_input sc_u.scu_bsd.bb_bpf_input
386 #define sc_bpf_output sc_u.scu_bsd.bb_bpf_output
387
388 struct bridge_softc {
389 struct ifnet *sc_ifp; /* make this an interface */
390 u_int32_t sc_flags;
391 union {
392 bridge_bsd scu_bsd;
393 } sc_u;
394 LIST_ENTRY(bridge_softc) sc_list;
395 decl_lck_mtx_data(, sc_mtx);
396 void *sc_cv;
397 uint32_t sc_brtmax; /* max # of addresses */
398 uint32_t sc_brtcnt; /* cur. # of addresses */
399 uint32_t sc_brttimeout; /* rt timeout in seconds */
400 uint32_t sc_iflist_ref; /* refcount for sc_iflist */
401 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
402 TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
403 uint32_t sc_brtexceeded; /* # of cache drops */
404 uint32_t sc_filter_flags; /* ipf and flags */
405 struct ifnet *sc_ifaddr; /* member mac copied from */
406 u_char sc_defaddr[6]; /* Default MAC address */
407 char sc_if_xname[IFNAMSIZ];
408
409 #if BRIDGE_DEBUG
410 /*
411 * Locking and unlocking calling history
412 */
413 void *lock_lr[BR_LCKDBG_MAX];
414 int next_lock_lr;
415 void *unlock_lr[BR_LCKDBG_MAX];
416 int next_unlock_lr;
417 #endif /* BRIDGE_DEBUG */
418 };
419
420 #define SCF_DETACHING 0x01
421 #define SCF_RESIZING 0x02
422 #define SCF_MEDIA_ACTIVE 0x04
423 #define SCF_BSD_MODE 0x08
424
425 static inline void
426 bridge_set_bsd_mode(struct bridge_softc * sc)
427 {
428 sc->sc_flags |= SCF_BSD_MODE;
429 }
430
431 static inline boolean_t
432 bridge_in_bsd_mode(const struct bridge_softc * sc)
433 {
434 return ((sc->sc_flags & SCF_BSD_MODE) != 0);
435 }
436
437 struct bridge_hostfilter_stats bridge_hostfilter_stats;
438
439 decl_lck_mtx_data(static, bridge_list_mtx);
440
441 static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
442
443 static zone_t bridge_rtnode_pool = NULL;
444
445 static int bridge_clone_create(struct if_clone *, uint32_t, void *);
446 static int bridge_clone_destroy(struct ifnet *);
447
448 static errno_t bridge_ioctl(struct ifnet *, u_long, void *);
449 #if HAS_IF_CAP
450 static void bridge_mutecaps(struct bridge_softc *);
451 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
452 int);
453 #endif
454 static errno_t bridge_set_tso(struct bridge_softc *);
455 __private_extern__ void bridge_ifdetach(struct bridge_iflist *, struct ifnet *);
456 static int bridge_init(struct ifnet *);
457 #if HAS_BRIDGE_DUMMYNET
458 static void bridge_dummynet(struct mbuf *, struct ifnet *);
459 #endif
460 static void bridge_ifstop(struct ifnet *, int);
461 static int bridge_output(struct ifnet *, struct mbuf *);
462 static void bridge_finalize_cksum(struct ifnet *, struct mbuf *);
463 static void bridge_start(struct ifnet *);
464 __private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *);
465 #if BRIDGE_MEMBER_OUT_FILTER
466 static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t,
467 mbuf_t *);
468 static int bridge_member_output(struct ifnet *, struct mbuf *,
469 struct sockaddr *, struct rtentry *);
470 #endif
471 static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
472 struct mbuf *);
473 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
474
475 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
476 struct mbuf *);
477
478 static void bridge_aging_timer(struct bridge_softc *sc);
479
480 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
481 struct mbuf *, int);
482 static void bridge_span(struct bridge_softc *, struct mbuf *);
483
484 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
485 uint16_t, struct bridge_iflist *, int, uint8_t);
486 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
487 uint16_t);
488 static void bridge_rttrim(struct bridge_softc *);
489 static void bridge_rtage(struct bridge_softc *);
490 static void bridge_rtflush(struct bridge_softc *, int);
491 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
492 uint16_t);
493
494 static int bridge_rtable_init(struct bridge_softc *);
495 static void bridge_rtable_fini(struct bridge_softc *);
496
497 static void bridge_rthash_resize(struct bridge_softc *);
498
499 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
500 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
501 const uint8_t *, uint16_t);
502 static int bridge_rtnode_hash(struct bridge_softc *,
503 struct bridge_rtnode *);
504 static int bridge_rtnode_insert(struct bridge_softc *,
505 struct bridge_rtnode *);
506 static void bridge_rtnode_destroy(struct bridge_softc *,
507 struct bridge_rtnode *);
508 #if BRIDGESTP
509 static void bridge_rtable_expire(struct ifnet *, int);
510 static void bridge_state_change(struct ifnet *, int);
511 #endif /* BRIDGESTP */
512
513 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
514 const char *name);
515 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
516 struct ifnet *ifp);
517 static void bridge_delete_member(struct bridge_softc *,
518 struct bridge_iflist *, int);
519 static void bridge_delete_span(struct bridge_softc *,
520 struct bridge_iflist *);
521
522 static int bridge_ioctl_add(struct bridge_softc *, void *);
523 static int bridge_ioctl_del(struct bridge_softc *, void *);
524 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
525 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
526 static int bridge_ioctl_scache(struct bridge_softc *, void *);
527 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
528 static int bridge_ioctl_gifs32(struct bridge_softc *, void *);
529 static int bridge_ioctl_gifs64(struct bridge_softc *, void *);
530 static int bridge_ioctl_rts32(struct bridge_softc *, void *);
531 static int bridge_ioctl_rts64(struct bridge_softc *, void *);
532 static int bridge_ioctl_saddr32(struct bridge_softc *, void *);
533 static int bridge_ioctl_saddr64(struct bridge_softc *, void *);
534 static int bridge_ioctl_sto(struct bridge_softc *, void *);
535 static int bridge_ioctl_gto(struct bridge_softc *, void *);
536 static int bridge_ioctl_daddr32(struct bridge_softc *, void *);
537 static int bridge_ioctl_daddr64(struct bridge_softc *, void *);
538 static int bridge_ioctl_flush(struct bridge_softc *, void *);
539 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
540 static int bridge_ioctl_spri(struct bridge_softc *, void *);
541 static int bridge_ioctl_ght(struct bridge_softc *, void *);
542 static int bridge_ioctl_sht(struct bridge_softc *, void *);
543 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
544 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
545 static int bridge_ioctl_gma(struct bridge_softc *, void *);
546 static int bridge_ioctl_sma(struct bridge_softc *, void *);
547 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
548 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
549 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
550 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
551 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
552 static int bridge_ioctl_gbparam32(struct bridge_softc *, void *);
553 static int bridge_ioctl_gbparam64(struct bridge_softc *, void *);
554 static int bridge_ioctl_grte(struct bridge_softc *, void *);
555 static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
556 static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
557 static int bridge_ioctl_sproto(struct bridge_softc *, void *);
558 static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
559 static int bridge_ioctl_purge(struct bridge_softc *sc, void *);
560 static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
561 static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
562 static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *);
563 static int bridge_ioctl_shostfilter(struct bridge_softc *, void *);
564 #ifdef PFIL_HOOKS
565 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
566 int);
567 static int bridge_ip_checkbasic(struct mbuf **);
568 #ifdef INET6
569 static int bridge_ip6_checkbasic(struct mbuf **);
570 #endif /* INET6 */
571 static int bridge_fragment(struct ifnet *, struct mbuf *,
572 struct ether_header *, int, struct llc *);
573 #endif /* PFIL_HOOKS */
574
575 static errno_t bridge_set_bpf_tap(ifnet_t, bpf_tap_mode, bpf_packet_func);
576 __private_extern__ errno_t bridge_bpf_input(ifnet_t, struct mbuf *);
577 __private_extern__ errno_t bridge_bpf_output(ifnet_t, struct mbuf *);
578
579 static void bridge_detach(ifnet_t);
580 static void bridge_link_event(struct ifnet *, u_int32_t);
581 static void bridge_iflinkevent(struct ifnet *);
582 static u_int32_t bridge_updatelinkstatus(struct bridge_softc *);
583 static int interface_media_active(struct ifnet *);
584 static void bridge_schedule_delayed_call(struct bridge_delayed_call *);
585 static void bridge_cancel_delayed_call(struct bridge_delayed_call *);
586 static void bridge_cleanup_delayed_call(struct bridge_delayed_call *);
587 static int bridge_host_filter(struct bridge_iflist *, struct mbuf *);
588
589
590 #define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
591
592 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
593 #define VLANTAGOF(_m) 0
594
595 u_int8_t bstp_etheraddr[ETHER_ADDR_LEN] =
596 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
597
598 static u_int8_t ethernulladdr[ETHER_ADDR_LEN] =
599 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
600
601 #if BRIDGESTP
602 static struct bstp_cb_ops bridge_ops = {
603 .bcb_state = bridge_state_change,
604 .bcb_rtage = bridge_rtable_expire
605 };
606 #endif /* BRIDGESTP */
607
608 SYSCTL_DECL(_net_link);
609 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
610 "Bridge");
611
612 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */
613 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
614 CTLFLAG_RW|CTLFLAG_LOCKED,
615 &bridge_inherit_mac, 0,
616 "Inherit MAC address from the first bridge member");
617
618 SYSCTL_INT(_net_link_bridge, OID_AUTO, rtable_prune_period,
619 CTLFLAG_RW|CTLFLAG_LOCKED,
620 &bridge_rtable_prune_period, 0,
621 "Interval between pruning of routing table");
622
623 static unsigned int bridge_rtable_hash_size_max = BRIDGE_RTHASH_SIZE_MAX;
624 SYSCTL_UINT(_net_link_bridge, OID_AUTO, rtable_hash_size_max,
625 CTLFLAG_RW|CTLFLAG_LOCKED,
626 &bridge_rtable_hash_size_max, 0,
627 "Maximum size of the routing hash table");
628
629 #if BRIDGE_DEBUG_DELAYED_CALLBACK
630 static int bridge_delayed_callback_delay = 0;
631 SYSCTL_INT(_net_link_bridge, OID_AUTO, delayed_callback_delay,
632 CTLFLAG_RW|CTLFLAG_LOCKED,
633 &bridge_delayed_callback_delay, 0,
634 "Delay before calling delayed function");
635 #endif
636
637 static int bridge_bsd_mode = 1;
638 #if (DEVELOPMENT || DEBUG)
639 SYSCTL_INT(_net_link_bridge, OID_AUTO, bsd_mode,
640 CTLFLAG_RW|CTLFLAG_LOCKED,
641 &bridge_bsd_mode, 0,
642 "Bridge using bsd mode");
643 #endif /* (DEVELOPMENT || DEBUG) */
644
645 SYSCTL_STRUCT(_net_link_bridge, OID_AUTO,
646 hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED,
647 &bridge_hostfilter_stats, bridge_hostfilter_stats, "");
648
649 #if defined(PFIL_HOOKS)
650 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
651 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
652 static int pfil_member = 1; /* run pfil hooks on the member interface */
653 static int pfil_ipfw = 0; /* layer2 filter with ipfw */
654 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */
655 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface */
656 /* for locally destined packets */
657 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW|CTLFLAG_LOCKED,
658 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
659 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW|CTLFLAG_LOCKED,
660 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
661 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW|CTLFLAG_LOCKED,
662 &pfil_bridge, 0, "Packet filter on the bridge interface");
663 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW|CTLFLAG_LOCKED,
664 &pfil_member, 0, "Packet filter on the member interface");
665 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
666 CTLFLAG_RW|CTLFLAG_LOCKED, &pfil_local_phys, 0,
667 "Packet filter on the physical interface for locally destined packets");
668 #endif /* PFIL_HOOKS */
669
670 #if BRIDGESTP
671 static int log_stp = 0; /* log STP state changes */
672 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
673 &log_stp, 0, "Log STP state changes");
674 #endif /* BRIDGESTP */
675
676 struct bridge_control {
677 int (*bc_func)(struct bridge_softc *, void *);
678 unsigned int bc_argsize;
679 unsigned int bc_flags;
680 };
681
682 #define BC_F_COPYIN 0x01 /* copy arguments in */
683 #define BC_F_COPYOUT 0x02 /* copy arguments out */
684 #define BC_F_SUSER 0x04 /* do super-user check */
685
686 static const struct bridge_control bridge_control_table32[] = {
687 { bridge_ioctl_add, sizeof (struct ifbreq), /* 0 */
688 BC_F_COPYIN|BC_F_SUSER },
689 { bridge_ioctl_del, sizeof (struct ifbreq),
690 BC_F_COPYIN|BC_F_SUSER },
691
692 { bridge_ioctl_gifflags, sizeof (struct ifbreq),
693 BC_F_COPYIN|BC_F_COPYOUT },
694 { bridge_ioctl_sifflags, sizeof (struct ifbreq),
695 BC_F_COPYIN|BC_F_SUSER },
696
697 { bridge_ioctl_scache, sizeof (struct ifbrparam),
698 BC_F_COPYIN|BC_F_SUSER },
699 { bridge_ioctl_gcache, sizeof (struct ifbrparam),
700 BC_F_COPYOUT },
701
702 { bridge_ioctl_gifs32, sizeof (struct ifbifconf32),
703 BC_F_COPYIN|BC_F_COPYOUT },
704 { bridge_ioctl_rts32, sizeof (struct ifbaconf32),
705 BC_F_COPYIN|BC_F_COPYOUT },
706
707 { bridge_ioctl_saddr32, sizeof (struct ifbareq32),
708 BC_F_COPYIN|BC_F_SUSER },
709
710 { bridge_ioctl_sto, sizeof (struct ifbrparam),
711 BC_F_COPYIN|BC_F_SUSER },
712 { bridge_ioctl_gto, sizeof (struct ifbrparam), /* 10 */
713 BC_F_COPYOUT },
714
715 { bridge_ioctl_daddr32, sizeof (struct ifbareq32),
716 BC_F_COPYIN|BC_F_SUSER },
717
718 { bridge_ioctl_flush, sizeof (struct ifbreq),
719 BC_F_COPYIN|BC_F_SUSER },
720
721 { bridge_ioctl_gpri, sizeof (struct ifbrparam),
722 BC_F_COPYOUT },
723 { bridge_ioctl_spri, sizeof (struct ifbrparam),
724 BC_F_COPYIN|BC_F_SUSER },
725
726 { bridge_ioctl_ght, sizeof (struct ifbrparam),
727 BC_F_COPYOUT },
728 { bridge_ioctl_sht, sizeof (struct ifbrparam),
729 BC_F_COPYIN|BC_F_SUSER },
730
731 { bridge_ioctl_gfd, sizeof (struct ifbrparam),
732 BC_F_COPYOUT },
733 { bridge_ioctl_sfd, sizeof (struct ifbrparam),
734 BC_F_COPYIN|BC_F_SUSER },
735
736 { bridge_ioctl_gma, sizeof (struct ifbrparam),
737 BC_F_COPYOUT },
738 { bridge_ioctl_sma, sizeof (struct ifbrparam), /* 20 */
739 BC_F_COPYIN|BC_F_SUSER },
740
741 { bridge_ioctl_sifprio, sizeof (struct ifbreq),
742 BC_F_COPYIN|BC_F_SUSER },
743
744 { bridge_ioctl_sifcost, sizeof (struct ifbreq),
745 BC_F_COPYIN|BC_F_SUSER },
746
747 { bridge_ioctl_gfilt, sizeof (struct ifbrparam),
748 BC_F_COPYOUT },
749 { bridge_ioctl_sfilt, sizeof (struct ifbrparam),
750 BC_F_COPYIN|BC_F_SUSER },
751
752 { bridge_ioctl_purge, sizeof (struct ifbreq),
753 BC_F_COPYIN|BC_F_SUSER },
754
755 { bridge_ioctl_addspan, sizeof (struct ifbreq),
756 BC_F_COPYIN|BC_F_SUSER },
757 { bridge_ioctl_delspan, sizeof (struct ifbreq),
758 BC_F_COPYIN|BC_F_SUSER },
759
760 { bridge_ioctl_gbparam32, sizeof (struct ifbropreq32),
761 BC_F_COPYOUT },
762
763 { bridge_ioctl_grte, sizeof (struct ifbrparam),
764 BC_F_COPYOUT },
765
766 { bridge_ioctl_gifsstp32, sizeof (struct ifbpstpconf32), /* 30 */
767 BC_F_COPYIN|BC_F_COPYOUT },
768
769 { bridge_ioctl_sproto, sizeof (struct ifbrparam),
770 BC_F_COPYIN|BC_F_SUSER },
771
772 { bridge_ioctl_stxhc, sizeof (struct ifbrparam),
773 BC_F_COPYIN|BC_F_SUSER },
774
775 { bridge_ioctl_sifmaxaddr, sizeof (struct ifbreq),
776 BC_F_COPYIN|BC_F_SUSER },
777
778 { bridge_ioctl_ghostfilter, sizeof (struct ifbrhostfilter),
779 BC_F_COPYIN|BC_F_COPYOUT },
780 { bridge_ioctl_shostfilter, sizeof (struct ifbrhostfilter),
781 BC_F_COPYIN|BC_F_SUSER },
782 };
783
784 static const struct bridge_control bridge_control_table64[] = {
785 { bridge_ioctl_add, sizeof (struct ifbreq), /* 0 */
786 BC_F_COPYIN|BC_F_SUSER },
787 { bridge_ioctl_del, sizeof (struct ifbreq),
788 BC_F_COPYIN|BC_F_SUSER },
789
790 { bridge_ioctl_gifflags, sizeof (struct ifbreq),
791 BC_F_COPYIN|BC_F_COPYOUT },
792 { bridge_ioctl_sifflags, sizeof (struct ifbreq),
793 BC_F_COPYIN|BC_F_SUSER },
794
795 { bridge_ioctl_scache, sizeof (struct ifbrparam),
796 BC_F_COPYIN|BC_F_SUSER },
797 { bridge_ioctl_gcache, sizeof (struct ifbrparam),
798 BC_F_COPYOUT },
799
800 { bridge_ioctl_gifs64, sizeof (struct ifbifconf64),
801 BC_F_COPYIN|BC_F_COPYOUT },
802 { bridge_ioctl_rts64, sizeof (struct ifbaconf64),
803 BC_F_COPYIN|BC_F_COPYOUT },
804
805 { bridge_ioctl_saddr64, sizeof (struct ifbareq64),
806 BC_F_COPYIN|BC_F_SUSER },
807
808 { bridge_ioctl_sto, sizeof (struct ifbrparam),
809 BC_F_COPYIN|BC_F_SUSER },
810 { bridge_ioctl_gto, sizeof (struct ifbrparam), /* 10 */
811 BC_F_COPYOUT },
812
813 { bridge_ioctl_daddr64, sizeof (struct ifbareq64),
814 BC_F_COPYIN|BC_F_SUSER },
815
816 { bridge_ioctl_flush, sizeof (struct ifbreq),
817 BC_F_COPYIN|BC_F_SUSER },
818
819 { bridge_ioctl_gpri, sizeof (struct ifbrparam),
820 BC_F_COPYOUT },
821 { bridge_ioctl_spri, sizeof (struct ifbrparam),
822 BC_F_COPYIN|BC_F_SUSER },
823
824 { bridge_ioctl_ght, sizeof (struct ifbrparam),
825 BC_F_COPYOUT },
826 { bridge_ioctl_sht, sizeof (struct ifbrparam),
827 BC_F_COPYIN|BC_F_SUSER },
828
829 { bridge_ioctl_gfd, sizeof (struct ifbrparam),
830 BC_F_COPYOUT },
831 { bridge_ioctl_sfd, sizeof (struct ifbrparam),
832 BC_F_COPYIN|BC_F_SUSER },
833
834 { bridge_ioctl_gma, sizeof (struct ifbrparam),
835 BC_F_COPYOUT },
836 { bridge_ioctl_sma, sizeof (struct ifbrparam), /* 20 */
837 BC_F_COPYIN|BC_F_SUSER },
838
839 { bridge_ioctl_sifprio, sizeof (struct ifbreq),
840 BC_F_COPYIN|BC_F_SUSER },
841
842 { bridge_ioctl_sifcost, sizeof (struct ifbreq),
843 BC_F_COPYIN|BC_F_SUSER },
844
845 { bridge_ioctl_gfilt, sizeof (struct ifbrparam),
846 BC_F_COPYOUT },
847 { bridge_ioctl_sfilt, sizeof (struct ifbrparam),
848 BC_F_COPYIN|BC_F_SUSER },
849
850 { bridge_ioctl_purge, sizeof (struct ifbreq),
851 BC_F_COPYIN|BC_F_SUSER },
852
853 { bridge_ioctl_addspan, sizeof (struct ifbreq),
854 BC_F_COPYIN|BC_F_SUSER },
855 { bridge_ioctl_delspan, sizeof (struct ifbreq),
856 BC_F_COPYIN|BC_F_SUSER },
857
858 { bridge_ioctl_gbparam64, sizeof (struct ifbropreq64),
859 BC_F_COPYOUT },
860
861 { bridge_ioctl_grte, sizeof (struct ifbrparam),
862 BC_F_COPYOUT },
863
864 { bridge_ioctl_gifsstp64, sizeof (struct ifbpstpconf64), /* 30 */
865 BC_F_COPYIN|BC_F_COPYOUT },
866
867 { bridge_ioctl_sproto, sizeof (struct ifbrparam),
868 BC_F_COPYIN|BC_F_SUSER },
869
870 { bridge_ioctl_stxhc, sizeof (struct ifbrparam),
871 BC_F_COPYIN|BC_F_SUSER },
872
873 { bridge_ioctl_sifmaxaddr, sizeof (struct ifbreq),
874 BC_F_COPYIN|BC_F_SUSER },
875
876 { bridge_ioctl_ghostfilter, sizeof (struct ifbrhostfilter),
877 BC_F_COPYIN|BC_F_COPYOUT },
878 { bridge_ioctl_shostfilter, sizeof (struct ifbrhostfilter),
879 BC_F_COPYIN|BC_F_SUSER },
880 };
881
882 static const unsigned int bridge_control_table_size =
883 sizeof (bridge_control_table32) / sizeof (bridge_control_table32[0]);
884
885 static LIST_HEAD(, bridge_softc) bridge_list =
886 LIST_HEAD_INITIALIZER(bridge_list);
887
888 static lck_grp_t *bridge_lock_grp = NULL;
889 static lck_attr_t *bridge_lock_attr = NULL;
890
891 static if_clone_t bridge_cloner = NULL;
892
893 static int if_bridge_txstart = 0;
894 SYSCTL_INT(_net_link_bridge, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED,
895 &if_bridge_txstart, 0, "Bridge interface uses TXSTART model");
896
897 #if BRIDGE_DEBUG
898 static int if_bridge_debug = 0;
899 SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
900 &if_bridge_debug, 0, "Bridge debug");
901
902 static void printf_ether_header(struct ether_header *);
903 static void printf_mbuf_data(mbuf_t, size_t, size_t);
904 static void printf_mbuf_pkthdr(mbuf_t, const char *, const char *);
905 static void printf_mbuf(mbuf_t, const char *, const char *);
906 static void link_print(struct bridge_softc * sc);
907
908 static void bridge_lock(struct bridge_softc *);
909 static void bridge_unlock(struct bridge_softc *);
910 static int bridge_lock2ref(struct bridge_softc *);
911 static void bridge_unref(struct bridge_softc *);
912 static void bridge_xlock(struct bridge_softc *);
913 static void bridge_xdrop(struct bridge_softc *);
914
915 static void
916 bridge_lock(struct bridge_softc *sc)
917 {
918 void *lr_saved = __builtin_return_address(0);
919
920 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
921
922 _BRIDGE_LOCK(sc);
923
924 sc->lock_lr[sc->next_lock_lr] = lr_saved;
925 sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
926 }
927
928 static void
929 bridge_unlock(struct bridge_softc *sc)
930 {
931 void *lr_saved = __builtin_return_address(0);
932
933 BRIDGE_LOCK_ASSERT_HELD(sc);
934
935 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
936 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
937
938 _BRIDGE_UNLOCK(sc);
939 }
940
941 static int
942 bridge_lock2ref(struct bridge_softc *sc)
943 {
944 int error = 0;
945 void *lr_saved = __builtin_return_address(0);
946
947 BRIDGE_LOCK_ASSERT_HELD(sc);
948
949 if (sc->sc_iflist_xcnt > 0)
950 error = EBUSY;
951 else
952 sc->sc_iflist_ref++;
953
954 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
955 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
956
957 _BRIDGE_UNLOCK(sc);
958
959 return (error);
960 }
961
962 static void
963 bridge_unref(struct bridge_softc *sc)
964 {
965 void *lr_saved = __builtin_return_address(0);
966
967 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
968
969 _BRIDGE_LOCK(sc);
970 sc->lock_lr[sc->next_lock_lr] = lr_saved;
971 sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
972
973 sc->sc_iflist_ref--;
974
975 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
976 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
977 if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) {
978 _BRIDGE_UNLOCK(sc);
979 wakeup(&sc->sc_cv);
980 } else
981 _BRIDGE_UNLOCK(sc);
982 }
983
984 static void
985 bridge_xlock(struct bridge_softc *sc)
986 {
987 void *lr_saved = __builtin_return_address(0);
988
989 BRIDGE_LOCK_ASSERT_HELD(sc);
990
991 sc->sc_iflist_xcnt++;
992 while (sc->sc_iflist_ref > 0) {
993 sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
994 sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
995
996 msleep(&sc->sc_cv, &sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL);
997
998 sc->lock_lr[sc->next_lock_lr] = lr_saved;
999 sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
1000 }
1001 }
1002
1003 static void
1004 bridge_xdrop(struct bridge_softc *sc)
1005 {
1006 BRIDGE_LOCK_ASSERT_HELD(sc);
1007
1008 sc->sc_iflist_xcnt--;
1009 }
1010
1011 void
1012 printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
1013 {
1014 if (m)
1015 printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
1016 "nextpkt: 0x%llx%s",
1017 prefix ? prefix : "", (unsigned int)mbuf_pkthdr_len(m),
1018 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
1019 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m)),
1020 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m)),
1021 suffix ? suffix : "");
1022 else
1023 printf("%s<NULL>%s\n", prefix, suffix);
1024 }
1025
1026 void
1027 printf_mbuf(mbuf_t m, const char *prefix, const char *suffix)
1028 {
1029 if (m) {
1030 printf("%s0x%llx type: %u flags: 0x%x len: %u data: 0x%llx "
1031 "maxlen: %u datastart: 0x%llx next: 0x%llx%s",
1032 prefix ? prefix : "", (uint64_t)VM_KERNEL_ADDRPERM(m),
1033 mbuf_type(m), mbuf_flags(m), (unsigned int)mbuf_len(m),
1034 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)),
1035 (unsigned int)mbuf_maxlen(m),
1036 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m)),
1037 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m)),
1038 !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
1039 if ((mbuf_flags(m) & MBUF_PKTHDR))
1040 printf_mbuf_pkthdr(m, " ", suffix);
1041 } else
1042 printf("%s<NULL>%s\n", prefix, suffix);
1043 }
1044
1045 void
1046 printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
1047 {
1048 mbuf_t n;
1049 size_t i, j;
1050 size_t pktlen, mlen, maxlen;
1051 unsigned char *ptr;
1052
1053 pktlen = mbuf_pkthdr_len(m);
1054
1055 if (offset > pktlen)
1056 return;
1057
1058 maxlen = (pktlen - offset > len) ? len : pktlen - offset;
1059 n = m;
1060 mlen = mbuf_len(n);
1061 ptr = mbuf_data(n);
1062 for (i = 0, j = 0; i < maxlen; i++, j++) {
1063 if (j >= mlen) {
1064 n = mbuf_next(n);
1065 if (n == 0)
1066 break;
1067 ptr = mbuf_data(n);
1068 mlen = mbuf_len(n);
1069 j = 0;
1070 }
1071 if (i >= offset) {
1072 printf("%02x%s", ptr[j], i % 2 ? " " : "");
1073 }
1074 }
1075 }
1076
1077 static void
1078 printf_ether_header(struct ether_header *eh)
1079 {
1080 printf("%02x:%02x:%02x:%02x:%02x:%02x > "
1081 "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
1082 eh->ether_shost[0], eh->ether_shost[1], eh->ether_shost[2],
1083 eh->ether_shost[3], eh->ether_shost[4], eh->ether_shost[5],
1084 eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
1085 eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5],
1086 ntohs(eh->ether_type));
1087 }
1088
1089 static void
1090 link_print(struct bridge_softc * sc)
1091 {
1092 int i;
1093 uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) +
1094 IFNAMSIZ + ETHER_ADDR_LEN];
1095 struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
1096
1097 memset(sdl, 0, sizeof (sdl_buffer));
1098 sdl->sdl_family = AF_LINK;
1099 sdl->sdl_nlen = strlen(sc->sc_if_xname);
1100 sdl->sdl_alen = ETHER_ADDR_LEN;
1101 sdl->sdl_len = offsetof(struct sockaddr_dl, sdl_data);
1102 memcpy(sdl->sdl_data, sc->sc_if_xname, sdl->sdl_nlen);
1103 memcpy(LLADDR(sdl), sc->sc_defaddr, ETHER_ADDR_LEN);
1104
1105 #if 1
1106 printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
1107 " slen %d addr ", sdl->sdl_len, sdl->sdl_index,
1108 sdl->sdl_family, sdl->sdl_type, sdl->sdl_nlen,
1109 sdl->sdl_alen, sdl->sdl_slen);
1110 #endif
1111 for (i = 0; i < sdl->sdl_alen; i++)
1112 printf("%s%x", i ? ":" : "", (CONST_LLADDR(sdl))[i]);
1113 printf("\n");
1114 }
1115
1116 #endif /* BRIDGE_DEBUG */
1117
1118 /*
1119 * bridgeattach:
1120 *
1121 * Pseudo-device attach routine.
1122 */
1123 __private_extern__ int
1124 bridgeattach(int n)
1125 {
1126 #pragma unused(n)
1127 int error;
1128 lck_grp_attr_t *lck_grp_attr = NULL;
1129 struct ifnet_clone_params ifnet_clone_params;
1130
1131 bridge_rtnode_pool = zinit(sizeof (struct bridge_rtnode),
1132 1024 * sizeof (struct bridge_rtnode), 0, "bridge_rtnode");
1133 zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE);
1134
1135 lck_grp_attr = lck_grp_attr_alloc_init();
1136
1137 bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr);
1138
1139 bridge_lock_attr = lck_attr_alloc_init();
1140
1141 #if BRIDGE_DEBUG
1142 lck_attr_setdebug(bridge_lock_attr);
1143 #endif
1144
1145 lck_mtx_init(&bridge_list_mtx, bridge_lock_grp, bridge_lock_attr);
1146
1147 /* can free the attributes once we've allocated the group lock */
1148 lck_grp_attr_free(lck_grp_attr);
1149
1150 LIST_INIT(&bridge_list);
1151
1152 #if BRIDGESTP
1153 bstp_sys_init();
1154 #endif /* BRIDGESTP */
1155
1156 ifnet_clone_params.ifc_name = "bridge";
1157 ifnet_clone_params.ifc_create = bridge_clone_create;
1158 ifnet_clone_params.ifc_destroy = bridge_clone_destroy;
1159
1160 error = ifnet_clone_attach(&ifnet_clone_params, &bridge_cloner);
1161 if (error != 0)
1162 printf("%s: ifnet_clone_attach failed %d\n", __func__, error);
1163
1164 return (error);
1165 }
1166
1167 #if defined(PFIL_HOOKS)
1168 /*
1169 * handler for net.link.bridge.pfil_ipfw
1170 */
1171 static int
1172 sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1173 {
1174 #pragma unused(arg1, arg2)
1175 int enable = pfil_ipfw;
1176 int error;
1177
1178 error = sysctl_handle_int(oidp, &enable, 0, req);
1179 enable = (enable) ? 1 : 0;
1180
1181 if (enable != pfil_ipfw) {
1182 pfil_ipfw = enable;
1183
1184 /*
1185 * Disable pfil so that ipfw doesnt run twice, if the user
1186 * really wants both then they can re-enable pfil_bridge and/or
1187 * pfil_member. Also allow non-ip packets as ipfw can filter by
1188 * layer2 type.
1189 */
1190 if (pfil_ipfw) {
1191 pfil_onlyip = 0;
1192 pfil_bridge = 0;
1193 pfil_member = 0;
1194 }
1195 }
1196
1197 return (error);
1198 }
1199
1200 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
1201 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
1202 #endif /* PFIL_HOOKS */
1203
1204 static errno_t
1205 bridge_ifnet_set_attrs(struct ifnet * ifp)
1206 {
1207 errno_t error;
1208
1209 error = ifnet_set_mtu(ifp, ETHERMTU);
1210 if (error != 0) {
1211 printf("%s: ifnet_set_mtu failed %d\n", __func__, error);
1212 goto done;
1213 }
1214 error = ifnet_set_addrlen(ifp, ETHER_ADDR_LEN);
1215 if (error != 0) {
1216 printf("%s: ifnet_set_addrlen failed %d\n", __func__, error);
1217 goto done;
1218 }
1219 error = ifnet_set_hdrlen(ifp, ETHER_HDR_LEN);
1220 if (error != 0) {
1221 printf("%s: ifnet_set_hdrlen failed %d\n", __func__, error);
1222 goto done;
1223 }
1224 error = ifnet_set_flags(ifp,
1225 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST,
1226 0xffff);
1227
1228 if (error != 0) {
1229 printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1230 goto done;
1231 }
1232 done:
1233 return (error);
1234 }
1235
1236 /*
1237 * bridge_clone_create:
1238 *
1239 * Create a new bridge instance.
1240 */
1241 static int
1242 bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params)
1243 {
1244 #pragma unused(params)
1245 struct ifnet *ifp = NULL;
1246 struct bridge_softc *sc, *sc2;
1247 struct ifnet_init_eparams init_params;
1248 errno_t error = 0;
1249 uint8_t eth_hostid[ETHER_ADDR_LEN];
1250 int fb, retry, has_hostid;
1251
1252 sc = _MALLOC(sizeof (*sc), M_DEVBUF, M_WAITOK | M_ZERO);
1253
1254 lck_mtx_init(&sc->sc_mtx, bridge_lock_grp, bridge_lock_attr);
1255 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
1256 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
1257 sc->sc_filter_flags = IFBF_FILT_DEFAULT;
1258 #ifndef BRIDGE_IPF
1259 /*
1260 * For backwards compatibility with previous behaviour...
1261 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1262 * not defined.
1263 */
1264 sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
1265 #endif
1266
1267 if (bridge_bsd_mode != 0) {
1268 bridge_set_bsd_mode(sc);
1269 }
1270
1271 TAILQ_INIT(&sc->sc_iflist);
1272
1273 /* use the interface name as the unique id for ifp recycle */
1274 snprintf(sc->sc_if_xname, sizeof (sc->sc_if_xname), "%s%d",
1275 ifc->ifc_name, unit);
1276 bzero(&init_params, sizeof (init_params));
1277 init_params.ver = IFNET_INIT_CURRENT_VERSION;
1278 init_params.len = sizeof (init_params);
1279 if (bridge_in_bsd_mode(sc)) {
1280 /* Initialize our routing table. */
1281 error = bridge_rtable_init(sc);
1282 if (error != 0) {
1283 printf("%s: bridge_rtable_init failed %d\n",
1284 __func__, error);
1285 goto done;
1286 }
1287 TAILQ_INIT(&sc->sc_spanlist);
1288 if (if_bridge_txstart) {
1289 init_params.start = bridge_start;
1290 } else {
1291 init_params.flags = IFNET_INIT_LEGACY;
1292 init_params.output = bridge_output;
1293 }
1294 init_params.set_bpf_tap = bridge_set_bpf_tap;
1295 }
1296 init_params.uniqueid = sc->sc_if_xname;
1297 init_params.uniqueid_len = strlen(sc->sc_if_xname);
1298 init_params.sndq_maxlen = IFQ_MAXLEN;
1299 init_params.name = ifc->ifc_name;
1300 init_params.unit = unit;
1301 init_params.family = IFNET_FAMILY_ETHERNET;
1302 init_params.type = IFT_BRIDGE;
1303 init_params.demux = ether_demux;
1304 init_params.add_proto = ether_add_proto;
1305 init_params.del_proto = ether_del_proto;
1306 init_params.check_multi = ether_check_multi;
1307 init_params.framer_extended = ether_frameout_extended;
1308 init_params.softc = sc;
1309 init_params.ioctl = bridge_ioctl;
1310 init_params.detach = bridge_detach;
1311 init_params.broadcast_addr = etherbroadcastaddr;
1312 init_params.broadcast_len = ETHER_ADDR_LEN;
1313
1314 if (bridge_in_bsd_mode(sc)) {
1315 error = ifnet_allocate_extended(&init_params, &ifp);
1316 if (error != 0) {
1317 printf("%s: ifnet_allocate failed %d\n",
1318 __func__, error);
1319 goto done;
1320 }
1321 sc->sc_ifp = ifp;
1322 error = bridge_ifnet_set_attrs(ifp);
1323 if (error != 0) {
1324 printf("%s: bridge_ifnet_set_attrs failed %d\n",
1325 __func__, error);
1326 goto done;
1327 }
1328 }
1329
1330 /*
1331 * Generate an ethernet address with a locally administered address.
1332 *
1333 * Since we are using random ethernet addresses for the bridge, it is
1334 * possible that we might have address collisions, so make sure that
1335 * this hardware address isn't already in use on another bridge.
1336 * The first try uses the "hostid" and falls back to read_frandom();
1337 * for "hostid", we use the MAC address of the first-encountered
1338 * Ethernet-type interface that is currently configured.
1339 */
1340 fb = 0;
1341 has_hostid = (uuid_get_ethernet(&eth_hostid[0]) == 0);
1342 for (retry = 1; retry != 0; ) {
1343 if (fb || has_hostid == 0) {
1344 read_frandom(&sc->sc_defaddr, ETHER_ADDR_LEN);
1345 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
1346 sc->sc_defaddr[0] |= 2; /* set the LAA bit */
1347 } else {
1348 bcopy(&eth_hostid[0], &sc->sc_defaddr,
1349 ETHER_ADDR_LEN);
1350 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
1351 sc->sc_defaddr[0] |= 2; /* set the LAA bit */
1352 sc->sc_defaddr[3] = /* stir it up a bit */
1353 ((sc->sc_defaddr[3] & 0x0f) << 4) |
1354 ((sc->sc_defaddr[3] & 0xf0) >> 4);
1355 /*
1356 * Mix in the LSB as it's actually pretty significant,
1357 * see rdar://14076061
1358 */
1359 sc->sc_defaddr[4] =
1360 (((sc->sc_defaddr[4] & 0x0f) << 4) |
1361 ((sc->sc_defaddr[4] & 0xf0) >> 4)) ^
1362 sc->sc_defaddr[5];
1363 sc->sc_defaddr[5] = ifp->if_unit & 0xff;
1364 }
1365
1366 fb = 1;
1367 retry = 0;
1368 lck_mtx_lock(&bridge_list_mtx);
1369 LIST_FOREACH(sc2, &bridge_list, sc_list) {
1370 if (memcmp(sc->sc_defaddr,
1371 IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0)
1372 retry = 1;
1373 }
1374 lck_mtx_unlock(&bridge_list_mtx);
1375 }
1376
1377 sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
1378
1379 #if BRIDGE_DEBUG
1380 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1381 link_print(sc);
1382 #endif
1383 if (bridge_in_bsd_mode(sc)) {
1384 error = ifnet_attach(ifp, NULL);
1385 if (error != 0) {
1386 printf("%s: ifnet_attach failed %d\n", __func__, error);
1387 goto done;
1388 }
1389 }
1390
1391 error = ifnet_set_lladdr_and_type(ifp, sc->sc_defaddr, ETHER_ADDR_LEN,
1392 IFT_ETHER);
1393 if (error != 0) {
1394 printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__,
1395 error);
1396 goto done;
1397 }
1398
1399 if (bridge_in_bsd_mode(sc)) {
1400 ifnet_set_offload(ifp,
1401 IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP |
1402 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES);
1403 error = bridge_set_tso(sc);
1404 if (error != 0) {
1405 printf("%s: bridge_set_tso failed %d\n",
1406 __func__, error);
1407 goto done;
1408 }
1409 #if BRIDGESTP
1410 bstp_attach(&sc->sc_stp, &bridge_ops);
1411 #endif /* BRIDGESTP */
1412 }
1413
1414 lck_mtx_lock(&bridge_list_mtx);
1415 LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
1416 lck_mtx_unlock(&bridge_list_mtx);
1417
1418 /* attach as ethernet */
1419 error = bpf_attach(ifp, DLT_EN10MB, sizeof (struct ether_header),
1420 NULL, NULL);
1421
1422 done:
1423 if (error != 0) {
1424 printf("%s failed error %d\n", __func__, error);
1425 /* Cleanup TBD */
1426 }
1427
1428 return (error);
1429 }
1430
1431 /*
1432 * bridge_clone_destroy:
1433 *
1434 * Destroy a bridge instance.
1435 */
1436 static int
1437 bridge_clone_destroy(struct ifnet *ifp)
1438 {
1439 struct bridge_softc *sc = ifp->if_softc;
1440 struct bridge_iflist *bif;
1441 errno_t error;
1442
1443 BRIDGE_LOCK(sc);
1444 if ((sc->sc_flags & SCF_DETACHING)) {
1445 BRIDGE_UNLOCK(sc);
1446 return (0);
1447 }
1448 sc->sc_flags |= SCF_DETACHING;
1449
1450 bridge_ifstop(ifp, 1);
1451
1452 if (bridge_in_bsd_mode(sc)) {
1453 bridge_cancel_delayed_call(&sc->sc_resize_call);
1454
1455 bridge_cleanup_delayed_call(&sc->sc_resize_call);
1456 bridge_cleanup_delayed_call(&sc->sc_aging_timer);
1457 }
1458
1459 error = ifnet_set_flags(ifp, 0, IFF_UP);
1460 if (error != 0) {
1461 printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1462 }
1463
1464 while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL)
1465 bridge_delete_member(sc, bif, 0);
1466
1467 if (bridge_in_bsd_mode(sc)) {
1468 while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
1469 bridge_delete_span(sc, bif);
1470 }
1471 BRIDGE_UNLOCK(sc);
1472 }
1473
1474 error = ifnet_detach(ifp);
1475 if (error != 0) {
1476 panic("%s: ifnet_detach(%p) failed %d\n",
1477 __func__, ifp, error);
1478 }
1479 return (0);
1480 }
1481
1482 #define DRVSPEC do { \
1483 if (ifd->ifd_cmd >= bridge_control_table_size) { \
1484 error = EINVAL; \
1485 break; \
1486 } \
1487 bc = &bridge_control_table[ifd->ifd_cmd]; \
1488 \
1489 if (cmd == SIOCGDRVSPEC && \
1490 (bc->bc_flags & BC_F_COPYOUT) == 0) { \
1491 error = EINVAL; \
1492 break; \
1493 } else if (cmd == SIOCSDRVSPEC && \
1494 (bc->bc_flags & BC_F_COPYOUT) != 0) { \
1495 error = EINVAL; \
1496 break; \
1497 } \
1498 \
1499 if (bc->bc_flags & BC_F_SUSER) { \
1500 error = kauth_authorize_generic(kauth_cred_get(), \
1501 KAUTH_GENERIC_ISSUSER); \
1502 if (error) \
1503 break; \
1504 } \
1505 \
1506 if (ifd->ifd_len != bc->bc_argsize || \
1507 ifd->ifd_len > sizeof (args)) { \
1508 error = EINVAL; \
1509 break; \
1510 } \
1511 \
1512 bzero(&args, sizeof (args)); \
1513 if (bc->bc_flags & BC_F_COPYIN) { \
1514 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
1515 if (error) \
1516 break; \
1517 } \
1518 \
1519 BRIDGE_LOCK(sc); \
1520 error = (*bc->bc_func)(sc, &args); \
1521 BRIDGE_UNLOCK(sc); \
1522 if (error) \
1523 break; \
1524 \
1525 if (bc->bc_flags & BC_F_COPYOUT) \
1526 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
1527 } while (0)
1528
1529 /*
1530 * bridge_ioctl:
1531 *
1532 * Handle a control request from the operator.
1533 */
1534 static errno_t
1535 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1536 {
1537 struct bridge_softc *sc = ifp->if_softc;
1538 struct ifreq *ifr = (struct ifreq *)data;
1539 struct bridge_iflist *bif;
1540 int error = 0;
1541
1542 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1543
1544 #if BRIDGE_DEBUG
1545 if (if_bridge_debug & BR_DBGF_IOCTL)
1546 printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1547 __func__, ifp->if_xname, cmd, (cmd & IOC_IN) ? 'I' : ' ',
1548 (cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd),
1549 (char)IOCGROUP(cmd), cmd & 0xff);
1550 #endif /* BRIDGE_DEBUG */
1551
1552 switch (cmd) {
1553
1554 case SIOCSIFADDR:
1555 case SIOCAIFADDR:
1556 ifnet_set_flags(ifp, IFF_UP, IFF_UP);
1557 break;
1558
1559 case SIOCGIFMEDIA32:
1560 case SIOCGIFMEDIA64: {
1561 struct ifmediareq *ifmr = (struct ifmediareq *)data;
1562 user_addr_t user_addr;
1563
1564 user_addr = (cmd == SIOCGIFMEDIA64) ?
1565 ((struct ifmediareq64 *)ifmr)->ifmu_ulist :
1566 CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist);
1567
1568 ifmr->ifm_status = IFM_AVALID;
1569 ifmr->ifm_mask = 0;
1570 ifmr->ifm_count = 1;
1571
1572 BRIDGE_LOCK(sc);
1573 if (!(sc->sc_flags & SCF_DETACHING) &&
1574 (sc->sc_flags & SCF_MEDIA_ACTIVE)) {
1575 ifmr->ifm_status |= IFM_ACTIVE;
1576 ifmr->ifm_active = ifmr->ifm_current =
1577 IFM_ETHER | IFM_AUTO;
1578 } else {
1579 ifmr->ifm_active = ifmr->ifm_current = IFM_NONE;
1580 }
1581 BRIDGE_UNLOCK(sc);
1582
1583 if (user_addr != USER_ADDR_NULL) {
1584 error = copyout(&ifmr->ifm_current, user_addr,
1585 sizeof (int));
1586 }
1587 break;
1588 }
1589
1590 case SIOCADDMULTI:
1591 case SIOCDELMULTI:
1592 break;
1593
1594 case SIOCSDRVSPEC32:
1595 case SIOCGDRVSPEC32: {
1596 union {
1597 struct ifbreq ifbreq;
1598 struct ifbifconf32 ifbifconf;
1599 struct ifbareq32 ifbareq;
1600 struct ifbaconf32 ifbaconf;
1601 struct ifbrparam ifbrparam;
1602 struct ifbropreq32 ifbropreq;
1603 } args;
1604 struct ifdrv32 *ifd = (struct ifdrv32 *)data;
1605 const struct bridge_control *bridge_control_table =
1606 bridge_control_table32, *bc;
1607
1608 DRVSPEC;
1609
1610 break;
1611 }
1612 case SIOCSDRVSPEC64:
1613 case SIOCGDRVSPEC64: {
1614 union {
1615 struct ifbreq ifbreq;
1616 struct ifbifconf64 ifbifconf;
1617 struct ifbareq64 ifbareq;
1618 struct ifbaconf64 ifbaconf;
1619 struct ifbrparam ifbrparam;
1620 struct ifbropreq64 ifbropreq;
1621 } args;
1622 struct ifdrv64 *ifd = (struct ifdrv64 *)data;
1623 const struct bridge_control *bridge_control_table =
1624 bridge_control_table64, *bc;
1625
1626 DRVSPEC;
1627
1628 break;
1629 }
1630
1631 case SIOCSIFFLAGS:
1632 if (!(ifp->if_flags & IFF_UP) &&
1633 (ifp->if_flags & IFF_RUNNING)) {
1634 /*
1635 * If interface is marked down and it is running,
1636 * then stop and disable it.
1637 */
1638 BRIDGE_LOCK(sc);
1639 bridge_ifstop(ifp, 1);
1640 BRIDGE_UNLOCK(sc);
1641 } else if ((ifp->if_flags & IFF_UP) &&
1642 !(ifp->if_flags & IFF_RUNNING)) {
1643 /*
1644 * If interface is marked up and it is stopped, then
1645 * start it.
1646 */
1647 BRIDGE_LOCK(sc);
1648 error = bridge_init(ifp);
1649 BRIDGE_UNLOCK(sc);
1650 }
1651 break;
1652
1653 case SIOCSIFLLADDR:
1654 error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data,
1655 ifr->ifr_addr.sa_len);
1656 if (error != 0)
1657 printf("%s: SIOCSIFLLADDR error %d\n", ifp->if_xname,
1658 error);
1659 break;
1660
1661 case SIOCSIFMTU:
1662 if (ifr->ifr_mtu < 576) {
1663 error = EINVAL;
1664 break;
1665 }
1666 BRIDGE_LOCK(sc);
1667 if (TAILQ_EMPTY(&sc->sc_iflist)) {
1668 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1669 BRIDGE_UNLOCK(sc);
1670 break;
1671 }
1672 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1673 if (bif->bif_ifp->if_mtu != (unsigned)ifr->ifr_mtu) {
1674 printf("%s: invalid MTU: %u(%s) != %d\n",
1675 sc->sc_ifp->if_xname,
1676 bif->bif_ifp->if_mtu,
1677 bif->bif_ifp->if_xname, ifr->ifr_mtu);
1678 error = EINVAL;
1679 break;
1680 }
1681 }
1682 if (!error)
1683 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1684 BRIDGE_UNLOCK(sc);
1685 break;
1686
1687 default:
1688 error = ether_ioctl(ifp, cmd, data);
1689 #if BRIDGE_DEBUG
1690 if (error != 0 && error != EOPNOTSUPP)
1691 printf("%s: ifp %s cmd 0x%08lx "
1692 "(%c%c [%lu] %c %lu) failed error: %d\n",
1693 __func__, ifp->if_xname, cmd,
1694 (cmd & IOC_IN) ? 'I' : ' ',
1695 (cmd & IOC_OUT) ? 'O' : ' ',
1696 IOCPARM_LEN(cmd), (char)IOCGROUP(cmd),
1697 cmd & 0xff, error);
1698 #endif /* BRIDGE_DEBUG */
1699 break;
1700 }
1701 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1702
1703 return (error);
1704 }
1705
1706 #if HAS_IF_CAP
1707 /*
1708 * bridge_mutecaps:
1709 *
1710 * Clear or restore unwanted capabilities on the member interface
1711 */
1712 static void
1713 bridge_mutecaps(struct bridge_softc *sc)
1714 {
1715 struct bridge_iflist *bif;
1716 int enabled, mask;
1717
1718 /* Initial bitmask of capabilities to test */
1719 mask = BRIDGE_IFCAPS_MASK;
1720
1721 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1722 /* Every member must support it or its disabled */
1723 mask &= bif->bif_savedcaps;
1724 }
1725
1726 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1727 enabled = bif->bif_ifp->if_capenable;
1728 enabled &= ~BRIDGE_IFCAPS_STRIP;
1729 /* strip off mask bits and enable them again if allowed */
1730 enabled &= ~BRIDGE_IFCAPS_MASK;
1731 enabled |= mask;
1732
1733 bridge_set_ifcap(sc, bif, enabled);
1734 }
1735
1736 }
1737
1738 static void
1739 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1740 {
1741 struct ifnet *ifp = bif->bif_ifp;
1742 struct ifreq ifr;
1743 int error;
1744
1745 bzero(&ifr, sizeof (ifr));
1746 ifr.ifr_reqcap = set;
1747
1748 if (ifp->if_capenable != set) {
1749 IFF_LOCKGIANT(ifp);
1750 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1751 IFF_UNLOCKGIANT(ifp);
1752 if (error)
1753 printf("%s: %s error setting interface capabilities "
1754 "on %s\n", __func__, sc->sc_ifp->if_xname,
1755 ifp->if_xname);
1756 }
1757 }
1758 #endif /* HAS_IF_CAP */
1759
1760 static errno_t
1761 bridge_set_tso(struct bridge_softc *sc)
1762 {
1763 struct bridge_iflist *bif;
1764 u_int32_t tso_v4_mtu;
1765 u_int32_t tso_v6_mtu;
1766 ifnet_offload_t offload;
1767 errno_t error = 0;
1768
1769 /* By default, support TSO */
1770 offload = sc->sc_ifp->if_hwassist | IFNET_TSO_IPV4 | IFNET_TSO_IPV6;
1771 tso_v4_mtu = IP_MAXPACKET;
1772 tso_v6_mtu = IP_MAXPACKET;
1773
1774 /* Use the lowest common denominator of the members */
1775 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1776 ifnet_t ifp = bif->bif_ifp;
1777
1778 if (ifp == NULL)
1779 continue;
1780
1781 if (offload & IFNET_TSO_IPV4) {
1782 if (ifp->if_hwassist & IFNET_TSO_IPV4) {
1783 if (tso_v4_mtu > ifp->if_tso_v4_mtu)
1784 tso_v4_mtu = ifp->if_tso_v4_mtu;
1785 } else {
1786 offload &= ~IFNET_TSO_IPV4;
1787 tso_v4_mtu = 0;
1788 }
1789 }
1790 if (offload & IFNET_TSO_IPV6) {
1791 if (ifp->if_hwassist & IFNET_TSO_IPV6) {
1792 if (tso_v6_mtu > ifp->if_tso_v6_mtu)
1793 tso_v6_mtu = ifp->if_tso_v6_mtu;
1794 } else {
1795 offload &= ~IFNET_TSO_IPV6;
1796 tso_v6_mtu = 0;
1797 }
1798 }
1799 }
1800
1801 if (offload != sc->sc_ifp->if_hwassist) {
1802 error = ifnet_set_offload(sc->sc_ifp, offload);
1803 if (error != 0) {
1804 #if BRIDGE_DEBUG
1805 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1806 printf("%s: ifnet_set_offload(%s, 0x%x) "
1807 "failed %d\n", __func__,
1808 sc->sc_ifp->if_xname, offload, error);
1809 #endif /* BRIDGE_DEBUG */
1810 goto done;
1811 }
1812 /*
1813 * For ifnet_set_tso_mtu() sake, the TSO MTU must be at least
1814 * as large as the interface MTU
1815 */
1816 if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV4) {
1817 if (tso_v4_mtu < sc->sc_ifp->if_mtu)
1818 tso_v4_mtu = sc->sc_ifp->if_mtu;
1819 error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET,
1820 tso_v4_mtu);
1821 if (error != 0) {
1822 #if BRIDGE_DEBUG
1823 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1824 printf("%s: ifnet_set_tso_mtu(%s, "
1825 "AF_INET, %u) failed %d\n",
1826 __func__, sc->sc_ifp->if_xname,
1827 tso_v4_mtu, error);
1828 #endif /* BRIDGE_DEBUG */
1829 goto done;
1830 }
1831 }
1832 if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV6) {
1833 if (tso_v6_mtu < sc->sc_ifp->if_mtu)
1834 tso_v6_mtu = sc->sc_ifp->if_mtu;
1835 error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET6,
1836 tso_v6_mtu);
1837 if (error != 0) {
1838 #if BRIDGE_DEBUG
1839 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1840 printf("%s: ifnet_set_tso_mtu(%s, "
1841 "AF_INET6, %u) failed %d\n",
1842 __func__, sc->sc_ifp->if_xname,
1843 tso_v6_mtu, error);
1844 #endif /* BRIDGE_DEBUG */
1845 goto done;
1846 }
1847 }
1848 }
1849 done:
1850 return (error);
1851 }
1852
1853 /*
1854 * bridge_lookup_member:
1855 *
1856 * Lookup a bridge member interface.
1857 */
1858 static struct bridge_iflist *
1859 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1860 {
1861 struct bridge_iflist *bif;
1862 struct ifnet *ifp;
1863
1864 BRIDGE_LOCK_ASSERT_HELD(sc);
1865
1866 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1867 ifp = bif->bif_ifp;
1868 if (strcmp(ifp->if_xname, name) == 0)
1869 return (bif);
1870 }
1871
1872 return (NULL);
1873 }
1874
1875 /*
1876 * bridge_lookup_member_if:
1877 *
1878 * Lookup a bridge member interface by ifnet*.
1879 */
1880 static struct bridge_iflist *
1881 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1882 {
1883 struct bridge_iflist *bif;
1884
1885 BRIDGE_LOCK_ASSERT_HELD(sc);
1886
1887 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1888 if (bif->bif_ifp == member_ifp)
1889 return (bif);
1890 }
1891
1892 return (NULL);
1893 }
1894
1895 static errno_t
1896 bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol,
1897 mbuf_t *data, char **frame_ptr)
1898 {
1899 #pragma unused(protocol)
1900 errno_t error = 0;
1901 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1902 struct bridge_softc *sc = bif->bif_sc;
1903 int included = 0;
1904 size_t frmlen = 0;
1905 mbuf_t m = *data;
1906
1907 if ((m->m_flags & M_PROTO1))
1908 goto out;
1909
1910 if (*frame_ptr >= (char *)mbuf_datastart(m) &&
1911 *frame_ptr <= (char *)mbuf_data(m)) {
1912 included = 1;
1913 frmlen = (char *)mbuf_data(m) - *frame_ptr;
1914 }
1915 #if BRIDGE_DEBUG
1916 if (if_bridge_debug & BR_DBGF_INPUT) {
1917 printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
1918 "frmlen %lu\n", __func__, sc->sc_ifp->if_xname,
1919 ifp->if_xname, (uint64_t)VM_KERNEL_ADDRPERM(m),
1920 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)),
1921 (uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr),
1922 included ? "inside" : "outside", frmlen);
1923
1924 if (if_bridge_debug & BR_DBGF_MBUF) {
1925 printf_mbuf(m, "bridge_iff_input[", "\n");
1926 printf_ether_header((struct ether_header *)
1927 (void *)*frame_ptr);
1928 printf_mbuf_data(m, 0, 20);
1929 printf("\n");
1930 }
1931 }
1932 #endif /* BRIDGE_DEBUG */
1933
1934 /* Move data pointer to start of frame to the link layer header */
1935 if (included) {
1936 (void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen,
1937 mbuf_len(m) + frmlen);
1938 (void) mbuf_pkthdr_adjustlen(m, frmlen);
1939 } else {
1940 printf("%s: frame_ptr outside mbuf\n", __func__);
1941 goto out;
1942 }
1943
1944 error = bridge_input(ifp, m, *frame_ptr);
1945
1946 /* Adjust packet back to original */
1947 if (error == 0) {
1948 (void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen,
1949 mbuf_len(m) - frmlen);
1950 (void) mbuf_pkthdr_adjustlen(m, -frmlen);
1951 }
1952 #if BRIDGE_DEBUG
1953 if ((if_bridge_debug & BR_DBGF_INPUT) &&
1954 (if_bridge_debug & BR_DBGF_MBUF)) {
1955 printf("\n");
1956 printf_mbuf(m, "bridge_iff_input]", "\n");
1957 }
1958 #endif /* BRIDGE_DEBUG */
1959
1960 out:
1961 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1962
1963 return (error);
1964 }
1965
1966 #if BRIDGE_MEMBER_OUT_FILTER
1967 static errno_t
1968 bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol,
1969 mbuf_t *data)
1970 {
1971 #pragma unused(protocol)
1972 errno_t error = 0;
1973 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1974 struct bridge_softc *sc = bif->bif_sc;
1975 mbuf_t m = *data;
1976
1977 if ((m->m_flags & M_PROTO1))
1978 goto out;
1979
1980 #if BRIDGE_DEBUG
1981 if (if_bridge_debug & BR_DBGF_OUTPUT) {
1982 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
1983 sc->sc_ifp->if_xname, ifp->if_xname,
1984 (uint64_t)VM_KERNEL_ADDRPERM(m),
1985 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
1986 }
1987 #endif /* BRIDGE_DEBUG */
1988
1989 error = bridge_member_output(sc, ifp, m);
1990 if (error != 0) {
1991 printf("%s: bridge_member_output failed error %d\n", __func__,
1992 error);
1993 }
1994
1995 out:
1996 BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1997
1998 return (error);
1999 }
2000 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2001
2002 static void
2003 bridge_iff_event(void *cookie, ifnet_t ifp, protocol_family_t protocol,
2004 const struct kev_msg *event_msg)
2005 {
2006 #pragma unused(protocol)
2007 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
2008 struct bridge_softc *sc = bif->bif_sc;
2009
2010 if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
2011 event_msg->kev_class == KEV_NETWORK_CLASS &&
2012 event_msg->kev_subclass == KEV_DL_SUBCLASS) {
2013 #if BRIDGE_DEBUG
2014 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2015 printf("%s: %s event_code %u - %s\n", __func__,
2016 ifp->if_xname, event_msg->event_code,
2017 dlil_kev_dl_code_str(event_msg->event_code));
2018 #endif /* BRIDGE_DEBUG */
2019
2020 switch (event_msg->event_code) {
2021 case KEV_DL_IF_DETACHING:
2022 case KEV_DL_IF_DETACHED: {
2023 bridge_ifdetach(bif, ifp);
2024 break;
2025 }
2026 case KEV_DL_LINK_OFF:
2027 case KEV_DL_LINK_ON: {
2028 bridge_iflinkevent(ifp);
2029 #if BRIDGESTP
2030 bstp_linkstate(ifp, event_msg->event_code);
2031 #endif /* BRIDGESTP */
2032 break;
2033 }
2034 case KEV_DL_SIFFLAGS: {
2035 if ((bif->bif_flags & BIFF_PROMISC) == 0 &&
2036 (ifp->if_flags & IFF_UP)) {
2037 errno_t error;
2038
2039 error = ifnet_set_promiscuous(ifp, 1);
2040 if (error != 0) {
2041 printf("%s: "
2042 "ifnet_set_promiscuous (%s)"
2043 " failed %d\n",
2044 __func__, ifp->if_xname,
2045 error);
2046 } else {
2047 bif->bif_flags |= BIFF_PROMISC;
2048 }
2049 }
2050 break;
2051 }
2052 case KEV_DL_IFCAP_CHANGED: {
2053 BRIDGE_LOCK(sc);
2054 bridge_set_tso(sc);
2055 BRIDGE_UNLOCK(sc);
2056 break;
2057 }
2058 default:
2059 break;
2060 }
2061 }
2062 }
2063
2064 /*
2065 * bridge_iff_detached:
2066 *
2067 * Detach an interface from a bridge. Called when a member
2068 * interface is detaching.
2069 */
2070 static void
2071 bridge_iff_detached(void *cookie, ifnet_t ifp)
2072 {
2073 struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
2074
2075 #if BRIDGE_DEBUG
2076 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2077 printf("%s: %s\n", __func__, ifp->if_xname);
2078 #endif /* BRIDGE_DEBUG */
2079
2080 bridge_ifdetach(bif, ifp);
2081
2082 _FREE(bif, M_DEVBUF);
2083 }
2084
2085 static errno_t
2086 bridge_proto_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet,
2087 char *header)
2088 {
2089 #pragma unused(protocol, packet, header)
2090 #if BRIDGE_DEBUG
2091 printf("%s: unexpected packet from %s\n", __func__,
2092 ifp->if_xname);
2093 #endif /* BRIDGE_DEBUG */
2094 return (0);
2095 }
2096
2097 static int
2098 bridge_attach_protocol(struct ifnet *ifp)
2099 {
2100 int error;
2101 struct ifnet_attach_proto_param reg;
2102
2103 #if BRIDGE_DEBUG
2104 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2105 printf("%s: %s\n", __func__, ifp->if_xname);
2106 #endif /* BRIDGE_DEBUG */
2107
2108 bzero(&reg, sizeof (reg));
2109 reg.input = bridge_proto_input;
2110
2111 error = ifnet_attach_protocol(ifp, PF_BRIDGE, &reg);
2112 if (error)
2113 printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
2114 __func__, ifp->if_xname, error);
2115
2116 return (error);
2117 }
2118
2119 static int
2120 bridge_detach_protocol(struct ifnet *ifp)
2121 {
2122 int error;
2123
2124 #if BRIDGE_DEBUG
2125 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2126 printf("%s: %s\n", __func__, ifp->if_xname);
2127 #endif /* BRIDGE_DEBUG */
2128 error = ifnet_detach_protocol(ifp, PF_BRIDGE);
2129 if (error)
2130 printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
2131 __func__, ifp->if_xname, error);
2132
2133 return (error);
2134 }
2135
2136 /*
2137 * bridge_delete_member:
2138 *
2139 * Delete the specified member interface.
2140 */
2141 static void
2142 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
2143 int gone)
2144 {
2145 struct ifnet *ifs = bif->bif_ifp, *bifp = sc->sc_ifp;
2146 int lladdr_changed = 0, error, filt_attached;
2147 uint8_t eaddr[ETHER_ADDR_LEN];
2148 u_int32_t event_code = 0;
2149 boolean_t bsd_mode;
2150
2151 BRIDGE_LOCK_ASSERT_HELD(sc);
2152 VERIFY(ifs != NULL);
2153
2154 bsd_mode = bridge_in_bsd_mode(sc);
2155
2156 /*
2157 * First, remove the member from the list first so it cannot be found anymore
2158 * when we release the bridge lock below
2159 */
2160 BRIDGE_XLOCK(sc);
2161 TAILQ_REMOVE(&sc->sc_iflist, bif, bif_next);
2162 BRIDGE_XDROP(sc);
2163
2164 if (!gone) {
2165 switch (ifs->if_type) {
2166 case IFT_ETHER:
2167 case IFT_L2VLAN:
2168 /*
2169 * Take the interface out of promiscuous mode.
2170 */
2171 if (bif->bif_flags & BIFF_PROMISC) {
2172 /*
2173 * Unlock to prevent deadlock with bridge_iff_event() in
2174 * case the driver generates an interface event
2175 */
2176 BRIDGE_UNLOCK(sc);
2177 (void) ifnet_set_promiscuous(ifs, 0);
2178 BRIDGE_LOCK(sc);
2179 }
2180 break;
2181
2182 case IFT_GIF:
2183 /* currently not supported */
2184 /* FALLTHRU */
2185 default:
2186 VERIFY(0);
2187 /* NOTREACHED */
2188 }
2189
2190 #if HAS_IF_CAP
2191 /* reneable any interface capabilities */
2192 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
2193 #endif
2194 }
2195
2196 if (bif->bif_flags & BIFF_PROTO_ATTACHED) {
2197 /* Respect lock ordering with DLIL lock */
2198 BRIDGE_UNLOCK(sc);
2199 (void) bridge_detach_protocol(ifs);
2200 BRIDGE_LOCK(sc);
2201 }
2202 #if BRIDGESTP
2203 if (bsd_mode && (bif->bif_ifflags & IFBIF_STP) != 0) {
2204 bstp_disable(&bif->bif_stp);
2205 }
2206 #endif /* BRIDGESTP */
2207
2208 /*
2209 * If removing the interface that gave the bridge its mac address, set
2210 * the mac address of the bridge to the address of the next member, or
2211 * to its default address if no members are left.
2212 */
2213 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) {
2214 ifnet_release(sc->sc_ifaddr);
2215 if (TAILQ_EMPTY(&sc->sc_iflist)) {
2216 bcopy(sc->sc_defaddr, eaddr, ETHER_ADDR_LEN);
2217 sc->sc_ifaddr = NULL;
2218 } else {
2219 struct ifnet *fif =
2220 TAILQ_FIRST(&sc->sc_iflist)->bif_ifp;
2221 bcopy(IF_LLADDR(fif), eaddr, ETHER_ADDR_LEN);
2222 sc->sc_ifaddr = fif;
2223 ifnet_reference(fif); /* for sc_ifaddr */
2224 }
2225 lladdr_changed = 1;
2226 }
2227
2228 #if HAS_IF_CAP
2229 bridge_mutecaps(sc); /* recalculate now this interface is removed */
2230 #endif /* HAS_IF_CAP */
2231
2232 error = bridge_set_tso(sc);
2233 if (error != 0) {
2234 printf("%s: bridge_set_tso failed %d\n", __func__, error);
2235 }
2236
2237 if (bsd_mode) {
2238 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
2239 }
2240
2241 KASSERT(bif->bif_addrcnt == 0,
2242 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
2243
2244 filt_attached = bif->bif_flags & BIFF_FILTER_ATTACHED;
2245
2246 /*
2247 * Update link status of the bridge based on its remaining members
2248 */
2249 event_code = bridge_updatelinkstatus(sc);
2250
2251 if (bsd_mode) {
2252 BRIDGE_UNLOCK(sc);
2253 }
2254
2255 if (lladdr_changed &&
2256 (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0)
2257 printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
2258
2259 if (event_code != 0)
2260 bridge_link_event(bifp, event_code);
2261
2262 #if BRIDGESTP
2263 if (bsd_mode) {
2264 bstp_destroy(&bif->bif_stp); /* prepare to free */
2265 }
2266 #endif /* BRIDGESTP */
2267
2268 if (filt_attached)
2269 iflt_detach(bif->bif_iff_ref);
2270 else
2271 _FREE(bif, M_DEVBUF);
2272
2273 ifs->if_bridge = NULL;
2274 ifnet_release(ifs);
2275
2276 BRIDGE_LOCK(sc);
2277 }
2278
2279 /*
2280 * bridge_delete_span:
2281 *
2282 * Delete the specified span interface.
2283 */
2284 static void
2285 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
2286 {
2287 BRIDGE_LOCK_ASSERT_HELD(sc);
2288
2289 KASSERT(bif->bif_ifp->if_bridge == NULL,
2290 ("%s: not a span interface", __func__));
2291
2292 ifnet_release(bif->bif_ifp);
2293
2294 TAILQ_REMOVE(&sc->sc_spanlist, bif, bif_next);
2295 _FREE(bif, M_DEVBUF);
2296 }
2297
2298 static int
2299 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
2300 {
2301 struct ifbreq *req = arg;
2302 struct bridge_iflist *bif = NULL;
2303 struct ifnet *ifs, *bifp = sc->sc_ifp;
2304 int error = 0, lladdr_changed = 0;
2305 uint8_t eaddr[ETHER_ADDR_LEN];
2306 struct iff_filter iff;
2307 u_int32_t event_code = 0;
2308 boolean_t bsd_mode = bridge_in_bsd_mode(sc);
2309
2310 ifs = ifunit(req->ifbr_ifsname);
2311 if (ifs == NULL)
2312 return (ENOENT);
2313 if (ifs->if_ioctl == NULL) /* must be supported */
2314 return (EINVAL);
2315
2316 if (IFNET_IS_INTCOPROC(ifs)) {
2317 return (EINVAL);
2318 }
2319
2320 if (bsd_mode) {
2321 /* If it's in the span list, it can't be a member. */
2322 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2323 if (ifs == bif->bif_ifp)
2324 return (EBUSY);
2325 }
2326
2327 if (ifs->if_bridge == sc)
2328 return (EEXIST);
2329
2330 if (ifs->if_bridge != NULL)
2331 return (EBUSY);
2332
2333 switch (ifs->if_type) {
2334 case IFT_ETHER:
2335 case IFT_L2VLAN:
2336 /* permitted interface types */
2337 break;
2338 case IFT_GIF:
2339 /* currently not supported */
2340 /* FALLTHRU */
2341 default:
2342 return (EINVAL);
2343 }
2344
2345 bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_WAITOK | M_ZERO);
2346 if (bif == NULL)
2347 return (ENOMEM);
2348
2349 bif->bif_ifp = ifs;
2350 ifnet_reference(ifs);
2351 bif->bif_ifflags = IFBIF_LEARNING | IFBIF_DISCOVER;
2352 #if HAS_IF_CAP
2353 bif->bif_savedcaps = ifs->if_capenable;
2354 #endif /* HAS_IF_CAP */
2355 bif->bif_sc = sc;
2356
2357 /* Allow the first Ethernet member to define the MTU */
2358 if (TAILQ_EMPTY(&sc->sc_iflist))
2359 sc->sc_ifp->if_mtu = ifs->if_mtu;
2360 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
2361 printf("%s: %s: invalid MTU for %s", __func__,
2362 sc->sc_ifp->if_xname,
2363 ifs->if_xname);
2364 return (EINVAL);
2365 }
2366
2367 /*
2368 * Assign the interface's MAC address to the bridge if it's the first
2369 * member and the MAC address of the bridge has not been changed from
2370 * the default (randomly) generated one.
2371 */
2372 if (bridge_inherit_mac && TAILQ_EMPTY(&sc->sc_iflist) &&
2373 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) {
2374 bcopy(IF_LLADDR(ifs), eaddr, ETHER_ADDR_LEN);
2375 sc->sc_ifaddr = ifs;
2376 ifnet_reference(ifs); /* for sc_ifaddr */
2377 lladdr_changed = 1;
2378 }
2379
2380 ifs->if_bridge = sc;
2381 #if BRIDGESTP
2382 if (bsd_mode) {
2383 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
2384 }
2385 #endif /* BRIDGESTP */
2386
2387 /*
2388 * XXX: XLOCK HERE!?!
2389 */
2390 TAILQ_INSERT_TAIL(&sc->sc_iflist, bif, bif_next);
2391
2392 #if HAS_IF_CAP
2393 /* Set interface capabilities to the intersection set of all members */
2394 bridge_mutecaps(sc);
2395 #endif /* HAS_IF_CAP */
2396
2397 bridge_set_tso(sc);
2398
2399
2400 /*
2401 * Place the interface into promiscuous mode.
2402 */
2403 switch (ifs->if_type) {
2404 case IFT_ETHER:
2405 case IFT_L2VLAN:
2406 error = ifnet_set_promiscuous(ifs, 1);
2407 if (error) {
2408 /* Ignore error when device is not up */
2409 if (error != ENETDOWN)
2410 goto out;
2411 error = 0;
2412 } else {
2413 bif->bif_flags |= BIFF_PROMISC;
2414 }
2415 break;
2416
2417 default:
2418 break;
2419 }
2420
2421 /*
2422 * The new member may change the link status of the bridge interface
2423 */
2424 if (interface_media_active(ifs))
2425 bif->bif_flags |= BIFF_MEDIA_ACTIVE;
2426 else
2427 bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
2428
2429 event_code = bridge_updatelinkstatus(sc);
2430
2431 /*
2432 * Respect lock ordering with DLIL lock for the following operations
2433 */
2434 if (bsd_mode) {
2435 BRIDGE_UNLOCK(sc);
2436 }
2437
2438 /*
2439 * install an interface filter
2440 */
2441 memset(&iff, 0, sizeof (struct iff_filter));
2442 iff.iff_cookie = bif;
2443 iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
2444 if (bsd_mode) {
2445 iff.iff_input = bridge_iff_input;
2446 #if BRIDGE_MEMBER_OUT_FILTER
2447 iff.iff_output = bridge_iff_output;
2448 #endif /* BRIDGE_MEMBER_OUT_FILTER */
2449 }
2450 iff.iff_event = bridge_iff_event;
2451 iff.iff_detached = bridge_iff_detached;
2452 error = dlil_attach_filter(ifs, &iff, &bif->bif_iff_ref,
2453 DLIL_IFF_TSO | DLIL_IFF_INTERNAL);
2454 if (error != 0) {
2455 printf("%s: iflt_attach failed %d\n", __func__, error);
2456 BRIDGE_LOCK(sc);
2457 goto out;
2458 }
2459 bif->bif_flags |= BIFF_FILTER_ATTACHED;
2460
2461 /*
2462 * install an dummy "bridge" protocol
2463 */
2464 if ((error = bridge_attach_protocol(ifs)) != 0) {
2465 if (error != 0) {
2466 printf("%s: bridge_attach_protocol failed %d\n",
2467 __func__, error);
2468 BRIDGE_LOCK(sc);
2469 goto out;
2470 }
2471 }
2472 bif->bif_flags |= BIFF_PROTO_ATTACHED;
2473
2474 if (lladdr_changed &&
2475 (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0)
2476 printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
2477
2478 if (event_code != 0)
2479 bridge_link_event(bifp, event_code);
2480
2481 BRIDGE_LOCK(sc);
2482
2483 out:
2484 if (error && bif != NULL)
2485 bridge_delete_member(sc, bif, 1);
2486
2487 return (error);
2488 }
2489
2490 static int
2491 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
2492 {
2493 struct ifbreq *req = arg;
2494 struct bridge_iflist *bif;
2495
2496 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2497 if (bif == NULL)
2498 return (ENOENT);
2499
2500 bridge_delete_member(sc, bif, 0);
2501
2502 return (0);
2503 }
2504
2505 static int
2506 bridge_ioctl_purge(struct bridge_softc *sc, void *arg)
2507 {
2508 #pragma unused(sc, arg)
2509 return (0);
2510 }
2511
2512 static int
2513 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
2514 {
2515 struct ifbreq *req = arg;
2516 struct bridge_iflist *bif;
2517
2518 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2519 if (bif == NULL)
2520 return (ENOENT);
2521
2522 if (bridge_in_bsd_mode(sc)) {
2523 struct bstp_port *bp;
2524
2525 bp = &bif->bif_stp;
2526 req->ifbr_state = bp->bp_state;
2527 req->ifbr_priority = bp->bp_priority;
2528 req->ifbr_path_cost = bp->bp_path_cost;
2529 req->ifbr_proto = bp->bp_protover;
2530 req->ifbr_role = bp->bp_role;
2531 req->ifbr_stpflags = bp->bp_flags;
2532 /* Copy STP state options as flags */
2533 if (bp->bp_operedge)
2534 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
2535 if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
2536 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
2537 if (bp->bp_ptp_link)
2538 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
2539 if (bp->bp_flags & BSTP_PORT_AUTOPTP)
2540 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
2541 if (bp->bp_flags & BSTP_PORT_ADMEDGE)
2542 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
2543 if (bp->bp_flags & BSTP_PORT_ADMCOST)
2544 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
2545 }
2546 req->ifbr_ifsflags = bif->bif_ifflags;
2547 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
2548 req->ifbr_addrcnt = bif->bif_addrcnt;
2549 req->ifbr_addrmax = bif->bif_addrmax;
2550 req->ifbr_addrexceeded = bif->bif_addrexceeded;
2551
2552 return (0);
2553 }
2554
2555 static int
2556 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
2557 {
2558 struct ifbreq *req = arg;
2559 struct bridge_iflist *bif;
2560 #if BRIDGESTP
2561 struct bstp_port *bp;
2562 int error;
2563 #endif /* BRIDGESTP */
2564
2565 if (!bridge_in_bsd_mode(sc)) {
2566 return (EINVAL);
2567 }
2568
2569 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2570 if (bif == NULL)
2571 return (ENOENT);
2572
2573 if (req->ifbr_ifsflags & IFBIF_SPAN)
2574 /* SPAN is readonly */
2575 return (EINVAL);
2576
2577
2578 #if BRIDGESTP
2579 if (req->ifbr_ifsflags & IFBIF_STP) {
2580 if ((bif->bif_ifflags & IFBIF_STP) == 0) {
2581 error = bstp_enable(&bif->bif_stp);
2582 if (error)
2583 return (error);
2584 }
2585 } else {
2586 if ((bif->bif_ifflags & IFBIF_STP) != 0)
2587 bstp_disable(&bif->bif_stp);
2588 }
2589
2590 /* Pass on STP flags */
2591 bp = &bif->bif_stp;
2592 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
2593 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
2594 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
2595 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
2596 #else /* !BRIDGESTP */
2597 if (req->ifbr_ifsflags & IFBIF_STP)
2598 return (EOPNOTSUPP);
2599 #endif /* !BRIDGESTP */
2600
2601 /* Save the bits relating to the bridge */
2602 bif->bif_ifflags = req->ifbr_ifsflags & IFBIFMASK;
2603
2604
2605 return (0);
2606 }
2607
2608 static int
2609 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
2610 {
2611 struct ifbrparam *param = arg;
2612
2613 sc->sc_brtmax = param->ifbrp_csize;
2614 if (bridge_in_bsd_mode(sc)) {
2615 bridge_rttrim(sc);
2616 }
2617 return (0);
2618 }
2619
2620 static int
2621 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
2622 {
2623 struct ifbrparam *param = arg;
2624
2625 param->ifbrp_csize = sc->sc_brtmax;
2626
2627 return (0);
2628 }
2629
2630 #define BRIDGE_IOCTL_GIFS do { \
2631 struct bridge_iflist *bif; \
2632 struct ifbreq breq; \
2633 char *buf, *outbuf; \
2634 unsigned int count, buflen, len; \
2635 \
2636 count = 0; \
2637 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
2638 count++; \
2639 if (bridge_in_bsd_mode(sc)) { \
2640 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
2641 count++; \
2642 } \
2643 \
2644 buflen = sizeof (breq) * count; \
2645 if (bifc->ifbic_len == 0) { \
2646 bifc->ifbic_len = buflen; \
2647 return (0); \
2648 } \
2649 BRIDGE_UNLOCK(sc); \
2650 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2651 BRIDGE_LOCK(sc); \
2652 \
2653 count = 0; \
2654 buf = outbuf; \
2655 len = min(bifc->ifbic_len, buflen); \
2656 bzero(&breq, sizeof (breq)); \
2657 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
2658 if (len < sizeof (breq)) \
2659 break; \
2660 \
2661 snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
2662 "%s", bif->bif_ifp->if_xname); \
2663 /* Fill in the ifbreq structure */ \
2664 error = bridge_ioctl_gifflags(sc, &breq); \
2665 if (error) \
2666 break; \
2667 memcpy(buf, &breq, sizeof (breq)); \
2668 count++; \
2669 buf += sizeof (breq); \
2670 len -= sizeof (breq); \
2671 } \
2672 if (bridge_in_bsd_mode(sc)) { \
2673 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
2674 if (len < sizeof (breq)) \
2675 break; \
2676 \
2677 snprintf(breq.ifbr_ifsname, \
2678 sizeof (breq.ifbr_ifsname), \
2679 "%s", bif->bif_ifp->if_xname); \
2680 breq.ifbr_ifsflags = bif->bif_ifflags; \
2681 breq.ifbr_portno \
2682 = bif->bif_ifp->if_index & 0xfff; \
2683 memcpy(buf, &breq, sizeof (breq)); \
2684 count++; \
2685 buf += sizeof (breq); \
2686 len -= sizeof (breq); \
2687 } \
2688 } \
2689 \
2690 BRIDGE_UNLOCK(sc); \
2691 bifc->ifbic_len = sizeof (breq) * count; \
2692 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
2693 BRIDGE_LOCK(sc); \
2694 _FREE(outbuf, M_TEMP); \
2695 } while (0)
2696
2697 static int
2698 bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg)
2699 {
2700 struct ifbifconf64 *bifc = arg;
2701 int error = 0;
2702
2703 BRIDGE_IOCTL_GIFS;
2704
2705 return (error);
2706 }
2707
2708 static int
2709 bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg)
2710 {
2711 struct ifbifconf32 *bifc = arg;
2712 int error = 0;
2713
2714 BRIDGE_IOCTL_GIFS;
2715
2716 return (error);
2717 }
2718
2719 #define BRIDGE_IOCTL_RTS do { \
2720 struct bridge_rtnode *brt; \
2721 char *buf; \
2722 char *outbuf = NULL; \
2723 unsigned int count, buflen, len; \
2724 unsigned long now; \
2725 \
2726 if (bac->ifbac_len == 0) \
2727 return (0); \
2728 \
2729 bzero(&bareq, sizeof (bareq)); \
2730 count = 0; \
2731 if (!bridge_in_bsd_mode(sc)) { \
2732 goto out; \
2733 } \
2734 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
2735 count++; \
2736 buflen = sizeof (bareq) * count; \
2737 \
2738 BRIDGE_UNLOCK(sc); \
2739 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
2740 BRIDGE_LOCK(sc); \
2741 \
2742 count = 0; \
2743 buf = outbuf; \
2744 len = min(bac->ifbac_len, buflen); \
2745 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
2746 if (len < sizeof (bareq)) \
2747 goto out; \
2748 snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
2749 "%s", brt->brt_ifp->if_xname); \
2750 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2751 bareq.ifba_vlan = brt->brt_vlan; \
2752 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
2753 now = (unsigned long) net_uptime(); \
2754 if (now < brt->brt_expire) \
2755 bareq.ifba_expire = \
2756 brt->brt_expire - now; \
2757 } else \
2758 bareq.ifba_expire = 0; \
2759 bareq.ifba_flags = brt->brt_flags; \
2760 \
2761 memcpy(buf, &bareq, sizeof (bareq)); \
2762 count++; \
2763 buf += sizeof (bareq); \
2764 len -= sizeof (bareq); \
2765 } \
2766 out: \
2767 bac->ifbac_len = sizeof (bareq) * count; \
2768 if (outbuf != NULL) { \
2769 BRIDGE_UNLOCK(sc); \
2770 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
2771 _FREE(outbuf, M_TEMP); \
2772 BRIDGE_LOCK(sc); \
2773 } \
2774 return (error); \
2775 } while (0)
2776
2777 static int
2778 bridge_ioctl_rts64(struct bridge_softc *sc, void *arg)
2779 {
2780 struct ifbaconf64 *bac = arg;
2781 struct ifbareq64 bareq;
2782 int error = 0;
2783
2784 BRIDGE_IOCTL_RTS;
2785 return (error);
2786 }
2787
2788 static int
2789 bridge_ioctl_rts32(struct bridge_softc *sc, void *arg)
2790 {
2791 struct ifbaconf32 *bac = arg;
2792 struct ifbareq32 bareq;
2793 int error = 0;
2794
2795 BRIDGE_IOCTL_RTS;
2796 return (error);
2797 }
2798
2799 static int
2800 bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg)
2801 {
2802 struct ifbareq32 *req = arg;
2803 struct bridge_iflist *bif;
2804 int error;
2805
2806 if (!bridge_in_bsd_mode(sc)) {
2807 return (0);
2808 }
2809
2810 bif = bridge_lookup_member(sc, req->ifba_ifsname);
2811 if (bif == NULL)
2812 return (ENOENT);
2813
2814 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2815 req->ifba_flags);
2816
2817 return (error);
2818 }
2819
2820 static int
2821 bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg)
2822 {
2823 struct ifbareq64 *req = arg;
2824 struct bridge_iflist *bif;
2825 int error;
2826
2827 if (!bridge_in_bsd_mode(sc)) {
2828 return (0);
2829 }
2830
2831 bif = bridge_lookup_member(sc, req->ifba_ifsname);
2832 if (bif == NULL)
2833 return (ENOENT);
2834
2835 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2836 req->ifba_flags);
2837
2838 return (error);
2839 }
2840
2841 static int
2842 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
2843 {
2844 struct ifbrparam *param = arg;
2845
2846 sc->sc_brttimeout = param->ifbrp_ctime;
2847 return (0);
2848 }
2849
2850 static int
2851 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
2852 {
2853 struct ifbrparam *param = arg;
2854
2855 param->ifbrp_ctime = sc->sc_brttimeout;
2856 return (0);
2857 }
2858
2859 static int
2860 bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg)
2861 {
2862 struct ifbareq32 *req = arg;
2863
2864 if (!bridge_in_bsd_mode(sc)) {
2865 return (0);
2866 }
2867 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2868 }
2869
2870 static int
2871 bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg)
2872 {
2873 struct ifbareq64 *req = arg;
2874
2875 if (!bridge_in_bsd_mode(sc)) {
2876 return (0);
2877 }
2878 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2879 }
2880
2881 static int
2882 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
2883 {
2884 struct ifbreq *req = arg;
2885
2886 if (!bridge_in_bsd_mode(sc)) {
2887 return (0);
2888 }
2889 bridge_rtflush(sc, req->ifbr_ifsflags);
2890 return (0);
2891 }
2892
2893 static int
2894 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
2895 {
2896 struct ifbrparam *param = arg;
2897 struct bstp_state *bs = &sc->sc_stp;
2898
2899 if (!bridge_in_bsd_mode(sc)) {
2900 return (0);
2901 }
2902 param->ifbrp_prio = bs->bs_bridge_priority;
2903 return (0);
2904 }
2905
2906 static int
2907 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
2908 {
2909 #if BRIDGESTP
2910 struct ifbrparam *param = arg;
2911
2912 if (!bridge_in_bsd_mode(sc)) {
2913 return (EOPNOTSUPP);
2914 }
2915 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
2916 #else /* !BRIDGESTP */
2917 #pragma unused(sc, arg)
2918 return (EOPNOTSUPP);
2919 #endif /* !BRIDGESTP */
2920 }
2921
2922 static int
2923 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
2924 {
2925 struct ifbrparam *param = arg;
2926 struct bstp_state *bs = &sc->sc_stp;
2927
2928 if (!bridge_in_bsd_mode(sc)) {
2929 return (0);
2930 }
2931 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
2932 return (0);
2933 }
2934
2935 static int
2936 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
2937 {
2938 #if BRIDGESTP
2939 struct ifbrparam *param = arg;
2940
2941 if (!bridge_in_bsd_mode(sc)) {
2942 return (EOPNOTSUPP);
2943 }
2944 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
2945 #else /* !BRIDGESTP */
2946 #pragma unused(sc, arg)
2947 return (EOPNOTSUPP);
2948 #endif /* !BRIDGESTP */
2949 }
2950
2951 static int
2952 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
2953 {
2954 struct ifbrparam *param;
2955 struct bstp_state *bs;
2956
2957 if (!bridge_in_bsd_mode(sc)) {
2958 return (0);
2959 }
2960 param = arg;
2961 bs = &sc->sc_stp;
2962 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
2963 return (0);
2964 }
2965
2966 static int
2967 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
2968 {
2969 #if BRIDGESTP
2970 struct ifbrparam *param = arg;
2971
2972 if (!bridge_in_bsd_mode(sc)) {
2973 return (EOPNOTSUPP);
2974 }
2975 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
2976 #else /* !BRIDGESTP */
2977 #pragma unused(sc, arg)
2978 return (EOPNOTSUPP);
2979 #endif /* !BRIDGESTP */
2980 }
2981
2982 static int
2983 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
2984 {
2985 struct ifbrparam *param;
2986 struct bstp_state *bs;
2987
2988 if (!bridge_in_bsd_mode(sc)) {
2989 return (EOPNOTSUPP);
2990 }
2991 param = arg;
2992 bs = &sc->sc_stp;
2993 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
2994 return (0);
2995 }
2996
2997 static int
2998 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
2999 {
3000 #if BRIDGESTP
3001 struct ifbrparam *param = arg;
3002
3003 if (!bridge_in_bsd_mode(sc)) {
3004 return (EOPNOTSUPP);
3005 }
3006 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
3007 #else /* !BRIDGESTP */
3008 #pragma unused(sc, arg)
3009 return (EOPNOTSUPP);
3010 #endif /* !BRIDGESTP */
3011 }
3012
3013 static int
3014 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
3015 {
3016 #if BRIDGESTP
3017 struct ifbreq *req = arg;
3018 struct bridge_iflist *bif;
3019
3020 if (!bridge_in_bsd_mode(sc)) {
3021 return (EOPNOTSUPP);
3022 }
3023 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
3024 if (bif == NULL)
3025 return (ENOENT);
3026
3027 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
3028 #else /* !BRIDGESTP */
3029 #pragma unused(sc, arg)
3030 return (EOPNOTSUPP);
3031 #endif /* !BRIDGESTP */
3032 }
3033
3034 static int
3035 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
3036 {
3037 #if BRIDGESTP
3038 struct ifbreq *req = arg;
3039 struct bridge_iflist *bif;
3040
3041 if (!bridge_in_bsd_mode(sc)) {
3042 return (EOPNOTSUPP);
3043 }
3044 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
3045 if (bif == NULL)
3046 return (ENOENT);
3047
3048 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
3049 #else /* !BRIDGESTP */
3050 #pragma unused(sc, arg)
3051 return (EOPNOTSUPP);
3052 #endif /* !BRIDGESTP */
3053 }
3054
3055 static int
3056 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
3057 {
3058 struct ifbrparam *param = arg;
3059
3060 param->ifbrp_filter = sc->sc_filter_flags;
3061
3062 return (0);
3063 }
3064
3065 static int
3066 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
3067 {
3068 struct ifbrparam *param = arg;
3069
3070 if (param->ifbrp_filter & ~IFBF_FILT_MASK)
3071 return (EINVAL);
3072
3073 #ifndef BRIDGE_IPF
3074 if (param->ifbrp_filter & IFBF_FILT_USEIPF)
3075 return (EINVAL);
3076 #endif
3077
3078 sc->sc_filter_flags = param->ifbrp_filter;
3079
3080 return (0);
3081 }
3082
3083 static int
3084 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
3085 {
3086 struct ifbreq *req = arg;
3087 struct bridge_iflist *bif;
3088
3089 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
3090 if (bif == NULL)
3091 return (ENOENT);
3092
3093 bif->bif_addrmax = req->ifbr_addrmax;
3094 return (0);
3095 }
3096
3097 static int
3098 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
3099 {
3100 struct ifbreq *req = arg;
3101 struct bridge_iflist *bif = NULL;
3102 struct ifnet *ifs;
3103
3104 if (!bridge_in_bsd_mode(sc)) {
3105 return (EOPNOTSUPP);
3106 }
3107 ifs = ifunit(req->ifbr_ifsname);
3108 if (ifs == NULL)
3109 return (ENOENT);
3110
3111 if (IFNET_IS_INTCOPROC(ifs)) {
3112 return (EINVAL);
3113 }
3114
3115 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3116 if (ifs == bif->bif_ifp)
3117 return (EBUSY);
3118
3119 if (ifs->if_bridge != NULL)
3120 return (EBUSY);
3121
3122 switch (ifs->if_type) {
3123 case IFT_ETHER:
3124 case IFT_L2VLAN:
3125 break;
3126 case IFT_GIF:
3127 /* currently not supported */
3128 /* FALLTHRU */
3129 default:
3130 return (EINVAL);
3131 }
3132
3133 bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3134 if (bif == NULL)
3135 return (ENOMEM);
3136
3137 bif->bif_ifp = ifs;
3138 bif->bif_ifflags = IFBIF_SPAN;
3139
3140 ifnet_reference(bif->bif_ifp);
3141
3142 TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
3143
3144 return (0);
3145 }
3146
3147 static int
3148 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
3149 {
3150 struct ifbreq *req = arg;
3151 struct bridge_iflist *bif;
3152 struct ifnet *ifs;
3153
3154 if (!bridge_in_bsd_mode(sc)) {
3155 return (EOPNOTSUPP);
3156 }
3157 ifs = ifunit(req->ifbr_ifsname);
3158 if (ifs == NULL)
3159 return (ENOENT);
3160
3161 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3162 if (ifs == bif->bif_ifp)
3163 break;
3164
3165 if (bif == NULL)
3166 return (ENOENT);
3167
3168 bridge_delete_span(sc, bif);
3169
3170 return (0);
3171 }
3172
3173 #define BRIDGE_IOCTL_GBPARAM do { \
3174 struct bstp_state *bs = &sc->sc_stp; \
3175 struct bstp_port *root_port; \
3176 \
3177 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
3178 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
3179 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
3180 \
3181 root_port = bs->bs_root_port; \
3182 if (root_port == NULL) \
3183 req->ifbop_root_port = 0; \
3184 else \
3185 req->ifbop_root_port = root_port->bp_ifp->if_index; \
3186 \
3187 req->ifbop_holdcount = bs->bs_txholdcount; \
3188 req->ifbop_priority = bs->bs_bridge_priority; \
3189 req->ifbop_protocol = bs->bs_protover; \
3190 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
3191 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
3192 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
3193 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
3194 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
3195 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
3196 } while (0)
3197
3198 static int
3199 bridge_ioctl_gbparam32(struct bridge_softc *sc, void *arg)
3200 {
3201 struct ifbropreq32 *req = arg;
3202
3203 if (bridge_in_bsd_mode(sc)) {
3204 BRIDGE_IOCTL_GBPARAM;
3205 }
3206 return (0);
3207 }
3208
3209 static int
3210 bridge_ioctl_gbparam64(struct bridge_softc *sc, void *arg)
3211 {
3212 struct ifbropreq64 *req = arg;
3213
3214 if (bridge_in_bsd_mode(sc)) {
3215 BRIDGE_IOCTL_GBPARAM;
3216 }
3217 return (0);
3218 }
3219
3220 static int
3221 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
3222 {
3223 struct ifbrparam *param = arg;
3224
3225 param->ifbrp_cexceeded = sc->sc_brtexceeded;
3226 return (0);
3227 }
3228
3229 #define BRIDGE_IOCTL_GIFSSTP do { \
3230 struct bridge_iflist *bif; \
3231 struct bstp_port *bp; \
3232 struct ifbpstpreq bpreq; \
3233 char *buf, *outbuf; \
3234 unsigned int count, buflen, len; \
3235 \
3236 count = 0; \
3237 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3238 if ((bif->bif_ifflags & IFBIF_STP) != 0) \
3239 count++; \
3240 } \
3241 \
3242 buflen = sizeof (bpreq) * count; \
3243 if (bifstp->ifbpstp_len == 0) { \
3244 bifstp->ifbpstp_len = buflen; \
3245 return (0); \
3246 } \
3247 \
3248 BRIDGE_UNLOCK(sc); \
3249 outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
3250 BRIDGE_LOCK(sc); \
3251 \
3252 count = 0; \
3253 buf = outbuf; \
3254 len = min(bifstp->ifbpstp_len, buflen); \
3255 bzero(&bpreq, sizeof (bpreq)); \
3256 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
3257 if (len < sizeof (bpreq)) \
3258 break; \
3259 \
3260 if ((bif->bif_ifflags & IFBIF_STP) == 0) \
3261 continue; \
3262 \
3263 bp = &bif->bif_stp; \
3264 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
3265 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
3266 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
3267 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
3268 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
3269 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
3270 \
3271 memcpy(buf, &bpreq, sizeof (bpreq)); \
3272 count++; \
3273 buf += sizeof (bpreq); \
3274 len -= sizeof (bpreq); \
3275 } \
3276 \
3277 BRIDGE_UNLOCK(sc); \
3278 bifstp->ifbpstp_len = sizeof (bpreq) * count; \
3279 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
3280 BRIDGE_LOCK(sc); \
3281 _FREE(outbuf, M_TEMP); \
3282 return (error); \
3283 } while (0)
3284
3285 static int
3286 bridge_ioctl_gifsstp32(struct bridge_softc *sc, void *arg)
3287 {
3288 struct ifbpstpconf32 *bifstp = arg;
3289 int error = 0;
3290
3291 if (bridge_in_bsd_mode(sc)) {
3292 BRIDGE_IOCTL_GIFSSTP;
3293 }
3294 return (error);
3295 }
3296
3297 static int
3298 bridge_ioctl_gifsstp64(struct bridge_softc *sc, void *arg)
3299 {
3300 struct ifbpstpconf64 *bifstp = arg;
3301 int error = 0;
3302
3303 if (bridge_in_bsd_mode(sc)) {
3304 BRIDGE_IOCTL_GIFSSTP;
3305 }
3306 return (error);
3307 }
3308
3309 static int
3310 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
3311 {
3312 #if BRIDGESTP
3313 struct ifbrparam *param = arg;
3314
3315 if (!bridge_in_bsd_mode(sc)) {
3316 return (EOPNOTSUPP);
3317 }
3318 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
3319 #else /* !BRIDGESTP */
3320 #pragma unused(sc, arg)
3321 return (EOPNOTSUPP);
3322 #endif /* !BRIDGESTP */
3323 }
3324
3325 static int
3326 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
3327 {
3328 #if BRIDGESTP
3329 struct ifbrparam *param = arg;
3330
3331 if (!bridge_in_bsd_mode(sc)) {
3332 return (EOPNOTSUPP);
3333 }
3334 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
3335 #else /* !BRIDGESTP */
3336 #pragma unused(sc, arg)
3337 return (EOPNOTSUPP);
3338 #endif /* !BRIDGESTP */
3339 }
3340
3341
3342 static int
3343 bridge_ioctl_ghostfilter(struct bridge_softc *sc, void *arg)
3344 {
3345 struct ifbrhostfilter *req = arg;
3346 struct bridge_iflist *bif;
3347
3348 bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
3349 if (bif == NULL)
3350 return (ENOENT);
3351
3352 bzero(req, sizeof(struct ifbrhostfilter));
3353 if (bif->bif_flags & BIFF_HOST_FILTER) {
3354 req->ifbrhf_flags |= IFBRHF_ENABLED;
3355 bcopy(bif->bif_hf_hwsrc, req->ifbrhf_hwsrca,
3356 ETHER_ADDR_LEN);
3357 req->ifbrhf_ipsrc = bif->bif_hf_ipsrc.s_addr;
3358 }
3359 return (0);
3360 }
3361
3362 static int
3363 bridge_ioctl_shostfilter(struct bridge_softc *sc, void *arg)
3364 {
3365 struct ifbrhostfilter *req = arg;
3366 struct bridge_iflist *bif;
3367
3368 bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
3369 if (bif == NULL)
3370 return (ENOENT);
3371
3372 INC_ATOMIC_INT64_LIM(net_api_stats.nas_vmnet_total);
3373
3374 if (req->ifbrhf_flags & IFBRHF_ENABLED) {
3375 bif->bif_flags |= BIFF_HOST_FILTER;
3376
3377 if (req->ifbrhf_flags & IFBRHF_HWSRC) {
3378 bcopy(req->ifbrhf_hwsrca, bif->bif_hf_hwsrc,
3379 ETHER_ADDR_LEN);
3380 if (bcmp(req->ifbrhf_hwsrca, ethernulladdr,
3381 ETHER_ADDR_LEN) != 0)
3382 bif->bif_flags |= BIFF_HF_HWSRC;
3383 else
3384 bif->bif_flags &= ~BIFF_HF_HWSRC;
3385 }
3386 if (req->ifbrhf_flags & IFBRHF_IPSRC) {
3387 bif->bif_hf_ipsrc.s_addr = req->ifbrhf_ipsrc;
3388 if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY)
3389 bif->bif_flags |= BIFF_HF_IPSRC;
3390 else
3391 bif->bif_flags &= ~BIFF_HF_IPSRC;
3392 }
3393 } else {
3394 bif->bif_flags &= ~(BIFF_HOST_FILTER | BIFF_HF_HWSRC |
3395 BIFF_HF_IPSRC);
3396 bzero(bif->bif_hf_hwsrc, ETHER_ADDR_LEN);
3397 bif->bif_hf_ipsrc.s_addr = INADDR_ANY;
3398 }
3399
3400 return (0);
3401 }
3402
3403
3404 /*
3405 * bridge_ifdetach:
3406 *
3407 * Detach an interface from a bridge. Called when a member
3408 * interface is detaching.
3409 */
3410 __private_extern__ void
3411 bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
3412 {
3413 struct bridge_softc *sc = ifp->if_bridge;
3414
3415 #if BRIDGE_DEBUG
3416 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
3417 printf("%s: %s\n", __func__, ifp->if_xname);
3418 #endif /* BRIDGE_DEBUG */
3419
3420 /* Check if the interface is a bridge member */
3421 if (sc != NULL) {
3422 BRIDGE_LOCK(sc);
3423 bif = bridge_lookup_member_if(sc, ifp);
3424 if (bif != NULL)
3425 bridge_delete_member(sc, bif, 1);
3426 BRIDGE_UNLOCK(sc);
3427 return;
3428 }
3429 /* Check if the interface is a span port */
3430 lck_mtx_lock(&bridge_list_mtx);
3431 LIST_FOREACH(sc, &bridge_list, sc_list) {
3432 if (bridge_in_bsd_mode(sc)) {
3433 BRIDGE_LOCK(sc);
3434 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3435 if (ifp == bif->bif_ifp) {
3436 bridge_delete_span(sc, bif);
3437 break;
3438 }
3439 BRIDGE_UNLOCK(sc);
3440 }
3441 }
3442 lck_mtx_unlock(&bridge_list_mtx);
3443 }
3444
3445 /*
3446 * interface_media_active:
3447 *
3448 * Tells if an interface media is active.
3449 */
3450 static int
3451 interface_media_active(struct ifnet *ifp)
3452 {
3453 struct ifmediareq ifmr;
3454 int status = 0;
3455
3456 bzero(&ifmr, sizeof(ifmr));
3457 if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) {
3458 if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0)
3459 status = ifmr.ifm_status & IFM_ACTIVE ? 1 : 0;
3460 }
3461
3462 return (status);
3463 }
3464
3465 /*
3466 * bridge_updatelinkstatus:
3467 *
3468 * Update the media active status of the bridge based on the
3469 * media active status of its member.
3470 * If changed, return the corresponding onf/off link event.
3471 */
3472 static u_int32_t
3473 bridge_updatelinkstatus(struct bridge_softc *sc)
3474 {
3475 struct bridge_iflist *bif;
3476 int active_member = 0;
3477 u_int32_t event_code = 0;
3478
3479 BRIDGE_LOCK_ASSERT_HELD(sc);
3480
3481 /*
3482 * Find out if we have an active interface
3483 */
3484 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
3485 if (bif->bif_flags & BIFF_MEDIA_ACTIVE) {
3486 active_member = 1;
3487 break;
3488 }
3489 }
3490
3491 if (active_member && !(sc->sc_flags & SCF_MEDIA_ACTIVE)) {
3492 sc->sc_flags |= SCF_MEDIA_ACTIVE;
3493 event_code = KEV_DL_LINK_ON;
3494 } else if (!active_member && (sc->sc_flags & SCF_MEDIA_ACTIVE)) {
3495 sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
3496 event_code = KEV_DL_LINK_OFF;
3497 }
3498
3499 return (event_code);
3500 }
3501
3502 /*
3503 * bridge_iflinkevent:
3504 */
3505 static void
3506 bridge_iflinkevent(struct ifnet *ifp)
3507 {
3508 struct bridge_softc *sc = ifp->if_bridge;
3509 struct bridge_iflist *bif;
3510 u_int32_t event_code = 0;
3511
3512 #if BRIDGE_DEBUG
3513 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
3514 printf("%s: %s\n", __func__, ifp->if_xname);
3515 #endif /* BRIDGE_DEBUG */
3516
3517 /* Check if the interface is a bridge member */
3518 if (sc == NULL)
3519 return;
3520
3521 BRIDGE_LOCK(sc);
3522 bif = bridge_lookup_member_if(sc, ifp);
3523 if (bif != NULL) {
3524 if (interface_media_active(ifp))
3525 bif->bif_flags |= BIFF_MEDIA_ACTIVE;
3526 else
3527 bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
3528
3529 event_code = bridge_updatelinkstatus(sc);
3530 }
3531 BRIDGE_UNLOCK(sc);
3532
3533 if (event_code != 0)
3534 bridge_link_event(sc->sc_ifp, event_code);
3535 }
3536
3537 /*
3538 * bridge_delayed_callback:
3539 *
3540 * Makes a delayed call
3541 */
3542 static void
3543 bridge_delayed_callback(void *param)
3544 {
3545 struct bridge_delayed_call *call = (struct bridge_delayed_call *)param;
3546 struct bridge_softc *sc = call->bdc_sc;
3547
3548 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3549 if (bridge_delayed_callback_delay > 0) {
3550 struct timespec ts;
3551
3552 ts.tv_sec = bridge_delayed_callback_delay;
3553 ts.tv_nsec = 0;
3554
3555 printf("%s: sleeping for %d seconds\n",
3556 __func__, bridge_delayed_callback_delay);
3557
3558 msleep(&bridge_delayed_callback_delay, NULL, PZERO,
3559 __func__, &ts);
3560
3561 printf("%s: awoken\n", __func__);
3562 }
3563 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3564
3565 BRIDGE_LOCK(sc);
3566
3567 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3568 if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
3569 printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3570 sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3571 call->bdc_flags);
3572 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3573
3574 if (call->bdc_flags & BDCF_CANCELLING) {
3575 wakeup(call);
3576 } else {
3577 if ((sc->sc_flags & SCF_DETACHING) == 0)
3578 (*call->bdc_func)(sc);
3579 }
3580 call->bdc_flags &= ~BDCF_OUTSTANDING;
3581 BRIDGE_UNLOCK(sc);
3582 }
3583
3584 /*
3585 * bridge_schedule_delayed_call:
3586 *
3587 * Schedule a function to be called on a separate thread
3588 * The actual call may be scheduled to run at a given time or ASAP.
3589 */
3590 static void
3591 bridge_schedule_delayed_call(struct bridge_delayed_call *call)
3592 {
3593 uint64_t deadline = 0;
3594 struct bridge_softc *sc = call->bdc_sc;
3595
3596 BRIDGE_LOCK_ASSERT_HELD(sc);
3597
3598 if ((sc->sc_flags & SCF_DETACHING) ||
3599 (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING)))
3600 return;
3601
3602 if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) {
3603 nanoseconds_to_absolutetime(
3604 (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC +
3605 call->bdc_ts.tv_nsec, &deadline);
3606 clock_absolutetime_interval_to_deadline(deadline, &deadline);
3607 }
3608
3609 call->bdc_flags = BDCF_OUTSTANDING;
3610
3611 #if BRIDGE_DEBUG_DELAYED_CALLBACK
3612 if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
3613 printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3614 sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3615 call->bdc_flags);
3616 #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3617
3618 if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec)
3619 thread_call_func_delayed(
3620 (thread_call_func_t)bridge_delayed_callback,
3621 call, deadline);
3622 else {
3623 if (call->bdc_thread_call == NULL)
3624 call->bdc_thread_call = thread_call_allocate(
3625 (thread_call_func_t)bridge_delayed_callback,
3626 call);
3627 thread_call_enter(call->bdc_thread_call);
3628 }
3629 }
3630
3631 /*
3632 * bridge_cancel_delayed_call:
3633 *
3634 * Cancel a queued or running delayed call.
3635 * If call is running, does not return until the call is done to
3636 * prevent race condition with the brigde interface getting destroyed
3637 */
3638 static void
3639 bridge_cancel_delayed_call(struct bridge_delayed_call *call)
3640 {
3641 boolean_t result;
3642 struct bridge_softc *sc = call->bdc_sc;
3643
3644 /*
3645 * The call was never scheduled
3646 */
3647 if (sc == NULL)
3648 return;
3649
3650 BRIDGE_LOCK_ASSERT_HELD(sc);
3651
3652 call->bdc_flags |= BDCF_CANCELLING;
3653
3654 while (call->bdc_flags & BDCF_OUTSTANDING) {
3655 #if BRIDGE_DEBUG
3656 if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
3657 printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3658 sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3659 call->bdc_flags);
3660 #endif /* BRIDGE_DEBUG */
3661 result = thread_call_func_cancel(
3662 (thread_call_func_t)bridge_delayed_callback, call, FALSE);
3663
3664 if (result) {
3665 /*
3666 * We managed to dequeue the delayed call
3667 */
3668 call->bdc_flags &= ~BDCF_OUTSTANDING;
3669 } else {
3670 /*
3671 * Wait for delayed call do be done running
3672 */
3673 msleep(call, &sc->sc_mtx, PZERO, __func__, NULL);
3674 }
3675 }
3676 call->bdc_flags &= ~BDCF_CANCELLING;
3677 }
3678
3679 /*
3680 * bridge_cleanup_delayed_call:
3681 *
3682 * Dispose resource allocated for a delayed call
3683 * Assume the delayed call is not queued or running .
3684 */
3685 static void
3686 bridge_cleanup_delayed_call(struct bridge_delayed_call *call)
3687 {
3688 boolean_t result;
3689 struct bridge_softc *sc = call->bdc_sc;
3690
3691 /*
3692 * The call was never scheduled
3693 */
3694 if (sc == NULL)
3695 return;
3696
3697 BRIDGE_LOCK_ASSERT_HELD(sc);
3698
3699 VERIFY((call->bdc_flags & BDCF_OUTSTANDING) == 0);
3700 VERIFY((call->bdc_flags & BDCF_CANCELLING) == 0);
3701
3702 if (call->bdc_thread_call != NULL) {
3703 result = thread_call_free(call->bdc_thread_call);
3704 if (result == FALSE)
3705 panic("%s thread_call_free() failed for call %p",
3706 __func__, call);
3707 call->bdc_thread_call = NULL;
3708 }
3709 }
3710
3711 /*
3712 * bridge_init:
3713 *
3714 * Initialize a bridge interface.
3715 */
3716 static int
3717 bridge_init(struct ifnet *ifp)
3718 {
3719 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
3720 errno_t error;
3721
3722 BRIDGE_LOCK_ASSERT_HELD(sc);
3723
3724 if ((ifnet_flags(ifp) & IFF_RUNNING))
3725 return (0);
3726
3727 error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
3728
3729 if (bridge_in_bsd_mode(sc)) {
3730 /*
3731 * Calling bridge_aging_timer() is OK as there are no entries to
3732 * age so we're just going to arm the timer
3733 */
3734 bridge_aging_timer(sc);
3735 #if BRIDGESTP
3736 if (error == 0)
3737 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
3738 #endif /* BRIDGESTP */
3739 }
3740 return (error);
3741 }
3742
3743 /*
3744 * bridge_ifstop:
3745 *
3746 * Stop the bridge interface.
3747 */
3748 static void
3749 bridge_ifstop(struct ifnet *ifp, int disable)
3750 {
3751 #pragma unused(disable)
3752 struct bridge_softc *sc = ifp->if_softc;
3753
3754 BRIDGE_LOCK_ASSERT_HELD(sc);
3755
3756 if ((ifnet_flags(ifp) & IFF_RUNNING) == 0)
3757 return;
3758
3759 if (bridge_in_bsd_mode(sc)) {
3760 bridge_cancel_delayed_call(&sc->sc_aging_timer);
3761
3762 #if BRIDGESTP
3763 bstp_stop(&sc->sc_stp);
3764 #endif /* BRIDGESTP */
3765
3766 bridge_rtflush(sc, IFBF_FLUSHDYN);
3767 }
3768 (void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
3769 }
3770
3771 /*
3772 * bridge_enqueue:
3773 *
3774 * Enqueue a packet on a bridge member interface.
3775 *
3776 */
3777 static int
3778 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
3779 {
3780 int len, error = 0;
3781 short mflags;
3782 struct mbuf *m0;
3783
3784 VERIFY(dst_ifp != NULL);
3785
3786 /*
3787 * We may be sending a fragment so traverse the mbuf
3788 *
3789 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
3790 */
3791 for (; m; m = m0) {
3792 errno_t _error;
3793 struct flowadv adv = { FADV_SUCCESS };
3794
3795 m0 = m->m_nextpkt;
3796 m->m_nextpkt = NULL;
3797
3798 len = m->m_pkthdr.len;
3799 mflags = m->m_flags;
3800 m->m_flags |= M_PROTO1; /* set to avoid loops */
3801
3802 bridge_finalize_cksum(dst_ifp, m);
3803
3804 #if HAS_IF_CAP
3805 /*
3806 * If underlying interface can not do VLAN tag insertion itself
3807 * then attach a packet tag that holds it.
3808 */
3809 if ((m->m_flags & M_VLANTAG) &&
3810 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
3811 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
3812 if (m == NULL) {
3813 printf("%s: %s: unable to prepend VLAN "
3814 "header\n", __func__, dst_ifp->if_xname);
3815 (void) ifnet_stat_increment_out(dst_ifp,
3816 0, 0, 1);
3817 continue;
3818 }
3819 m->m_flags &= ~M_VLANTAG;
3820 }
3821 #endif /* HAS_IF_CAP */
3822
3823 _error = dlil_output(dst_ifp, 0, m, NULL, NULL, 1, &adv);
3824
3825 /* Preserve existing error value */
3826 if (error == 0) {
3827 if (_error != 0)
3828 error = _error;
3829 else if (adv.code == FADV_FLOW_CONTROLLED)
3830 error = EQFULL;
3831 else if (adv.code == FADV_SUSPENDED)
3832 error = EQSUSPENDED;
3833 }
3834
3835 if (_error == 0) {
3836 (void) ifnet_stat_increment_out(sc->sc_ifp, 1, len, 0);
3837 } else {
3838 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
3839 }
3840 }
3841
3842 return (error);
3843 }
3844
3845 #if HAS_BRIDGE_DUMMYNET
3846 /*
3847 * bridge_dummynet:
3848 *
3849 * Receive a queued packet from dummynet and pass it on to the output
3850 * interface.
3851 *
3852 * The mbuf has the Ethernet header already attached.
3853 */
3854 static void
3855 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
3856 {
3857 struct bridge_softc *sc;
3858
3859 sc = ifp->if_bridge;
3860
3861 /*
3862 * The packet didnt originate from a member interface. This should only
3863 * ever happen if a member interface is removed while packets are
3864 * queued for it.
3865 */
3866 if (sc == NULL) {
3867 m_freem(m);
3868 return;
3869 }
3870
3871 if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
3872 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
3873 return;
3874 if (m == NULL)
3875 return;
3876 }
3877
3878 (void) bridge_enqueue(sc, ifp, m);
3879 }
3880 #endif /* HAS_BRIDGE_DUMMYNET */
3881
3882 #if BRIDGE_MEMBER_OUT_FILTER
3883 /*
3884 * bridge_member_output:
3885 *
3886 * Send output from a bridge member interface. This
3887 * performs the bridging function for locally originated
3888 * packets.
3889 *
3890 * The mbuf has the Ethernet header already attached. We must
3891 * enqueue or free the mbuf before returning.
3892 */
3893 static int
3894 bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
3895 struct rtentry *rt)
3896 {
3897 #pragma unused(sa, rt)
3898 struct ether_header *eh;
3899 struct ifnet *dst_if;
3900 struct bridge_softc *sc;
3901 uint16_t vlan;
3902
3903 #if BRIDGE_DEBUG
3904 if (if_bridge_debug & BR_DBGF_OUTPUT)
3905 printf("%s: ifp %s\n", __func__, ifp->if_xname);
3906 #endif /* BRIDGE_DEBUG */
3907
3908 if (m->m_len < ETHER_HDR_LEN) {
3909 m = m_pullup(m, ETHER_HDR_LEN);
3910 if (m == NULL)
3911 return (0);
3912 }
3913
3914 eh = mtod(m, struct ether_header *);
3915 sc = ifp->if_bridge;
3916 vlan = VLANTAGOF(m);
3917
3918 BRIDGE_LOCK(sc);
3919
3920 /*
3921 * APPLE MODIFICATION
3922 * If the packet is an 802.1X ethertype, then only send on the
3923 * original output interface.
3924 */
3925 if (eh->ether_type == htons(ETHERTYPE_PAE)) {
3926 dst_if = ifp;
3927 goto sendunicast;
3928 }
3929
3930 /*
3931 * If bridge is down, but the original output interface is up,
3932 * go ahead and send out that interface. Otherwise, the packet
3933 * is dropped below.
3934 */
3935 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
3936 dst_if = ifp;
3937 goto sendunicast;
3938 }
3939
3940 /*
3941 * If the packet is a multicast, or we don't know a better way to
3942 * get there, send to all interfaces.
3943 */
3944 if (ETHER_IS_MULTICAST(eh->ether_dhost))
3945 dst_if = NULL;
3946 else
3947 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
3948 if (dst_if == NULL) {
3949 struct bridge_iflist *bif;
3950 struct mbuf *mc;
3951 int error = 0, used = 0;
3952
3953 bridge_span(sc, m);
3954
3955 BRIDGE_LOCK2REF(sc, error);
3956 if (error) {
3957 m_freem(m);
3958 return (0);
3959 }
3960
3961 TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
3962 dst_if = bif->bif_ifp;
3963
3964 if (dst_if->if_type == IFT_GIF)
3965 continue;
3966 if ((dst_if->if_flags & IFF_RUNNING) == 0)
3967 continue;
3968
3969 /*
3970 * If this is not the original output interface,
3971 * and the interface is participating in spanning
3972 * tree, make sure the port is in a state that
3973 * allows forwarding.
3974 */
3975 if (dst_if != ifp && (bif->bif_ifflags & IFBIF_STP) &&
3976 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3977 continue;
3978
3979 if (LIST_NEXT(bif, bif_next) == NULL) {
3980 used = 1;
3981 mc = m;
3982 } else {
3983 mc = m_copypacket(m, M_DONTWAIT);
3984 if (mc == NULL) {
3985 (void) ifnet_stat_increment_out(
3986 sc->sc_ifp, 0, 0, 1);
3987 continue;
3988 }
3989 }
3990
3991 (void) bridge_enqueue(sc, dst_if, mc);
3992 }
3993 if (used == 0)
3994 m_freem(m);
3995 BRIDGE_UNREF(sc);
3996 return (0);
3997 }
3998
3999 sendunicast:
4000 /*
4001 * XXX Spanning tree consideration here?
4002 */
4003
4004 bridge_span(sc, m);
4005 if ((dst_if->if_flags & IFF_RUNNING) == 0) {
4006 m_freem(m);
4007 BRIDGE_UNLOCK(sc);
4008 return (0);
4009 }
4010
4011 BRIDGE_UNLOCK(sc);
4012 (void) bridge_enqueue(sc, dst_if, m);
4013 return (0);
4014 }
4015 #endif /* BRIDGE_MEMBER_OUT_FILTER */
4016
4017 /*
4018 * Output callback.
4019 *
4020 * This routine is called externally from above only when if_bridge_txstart
4021 * is disabled; otherwise it is called internally by bridge_start().
4022 */
4023 static int
4024 bridge_output(struct ifnet *ifp, struct mbuf *m)
4025 {
4026 struct bridge_softc *sc = ifnet_softc(ifp);
4027 struct ether_header *eh;
4028 struct ifnet *dst_if;
4029 int error = 0;
4030
4031 eh = mtod(m, struct ether_header *);
4032 dst_if = NULL;
4033
4034 BRIDGE_LOCK(sc);
4035 ASSERT(bridge_in_bsd_mode(sc));
4036
4037 if (!(m->m_flags & (M_BCAST|M_MCAST)))
4038 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0);
4039
4040 (void) ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0);
4041
4042 #if NBPFILTER > 0
4043 if (sc->sc_bpf_output)
4044 bridge_bpf_output(ifp, m);
4045 #endif
4046
4047 if (dst_if == NULL) {
4048 /* callee will unlock */
4049 bridge_broadcast(sc, ifp, m, 0);
4050 } else {
4051 BRIDGE_UNLOCK(sc);
4052 error = bridge_enqueue(sc, dst_if, m);
4053 }
4054
4055 return (error);
4056 }
4057
4058 static void
4059 bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m)
4060 {
4061 struct ether_header *eh = mtod(m, struct ether_header *);
4062 uint32_t sw_csum, hwcap;
4063
4064 if (ifp != NULL)
4065 hwcap = (ifp->if_hwassist | CSUM_DATA_VALID);
4066 else
4067 hwcap = 0;
4068
4069 /* do in software what the hardware cannot */
4070 sw_csum = m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_FLAGS(hwcap);
4071 sw_csum &= IF_HWASSIST_CSUM_MASK;
4072
4073 switch (ntohs(eh->ether_type)) {
4074 case ETHERTYPE_IP:
4075 if ((hwcap & CSUM_PARTIAL) && !(sw_csum & CSUM_DELAY_DATA) &&
4076 (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
4077 if (m->m_pkthdr.csum_flags & CSUM_TCP) {
4078 uint16_t start =
4079 sizeof (*eh) + sizeof (struct ip);
4080 uint16_t ulpoff =
4081 m->m_pkthdr.csum_data & 0xffff;
4082 m->m_pkthdr.csum_flags |=
4083 (CSUM_DATA_VALID | CSUM_PARTIAL);
4084 m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
4085 m->m_pkthdr.csum_tx_start = start;
4086 } else {
4087 sw_csum |= (CSUM_DELAY_DATA &
4088 m->m_pkthdr.csum_flags);
4089 }
4090 }
4091 (void) in_finalize_cksum(m, sizeof (*eh), sw_csum);
4092 break;
4093
4094 #if INET6
4095 case ETHERTYPE_IPV6:
4096 if ((hwcap & CSUM_PARTIAL) &&
4097 !(sw_csum & CSUM_DELAY_IPV6_DATA) &&
4098 (m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) {
4099 if (m->m_pkthdr.csum_flags & CSUM_TCPIPV6) {
4100 uint16_t start =
4101 sizeof (*eh) + sizeof (struct ip6_hdr);
4102 uint16_t ulpoff =
4103 m->m_pkthdr.csum_data & 0xffff;
4104 m->m_pkthdr.csum_flags |=
4105 (CSUM_DATA_VALID | CSUM_PARTIAL);
4106 m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
4107 m->m_pkthdr.csum_tx_start = start;
4108 } else {
4109 sw_csum |= (CSUM_DELAY_IPV6_DATA &
4110 m->m_pkthdr.csum_flags);
4111 }
4112 }
4113 (void) in6_finalize_cksum(m, sizeof (*eh), -1, -1, sw_csum);
4114 break;
4115 #endif /* INET6 */
4116 }
4117 }
4118
4119 /*
4120 * bridge_start:
4121 *
4122 * Start output on a bridge.
4123 *
4124 * This routine is invoked by the start worker thread; because we never call
4125 * it directly, there is no need do deploy any serialization mechanism other
4126 * than what's already used by the worker thread, i.e. this is already single
4127 * threaded.
4128 *
4129 * This routine is called only when if_bridge_txstart is enabled.
4130 */
4131 static void
4132 bridge_start(struct ifnet *ifp)
4133 {
4134 struct mbuf *m;
4135
4136 for (;;) {
4137 if (ifnet_dequeue(ifp, &m) != 0)
4138 break;
4139
4140 (void) bridge_output(ifp, m);
4141 }
4142 }
4143
4144 /*
4145 * bridge_forward:
4146 *
4147 * The forwarding function of the bridge.
4148 *
4149 * NOTE: Releases the lock on return.
4150 */
4151 static void
4152 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
4153 struct mbuf *m)
4154 {
4155 struct bridge_iflist *dbif;
4156 struct ifnet *src_if, *dst_if, *ifp;
4157 struct ether_header *eh;
4158 uint16_t vlan;
4159 uint8_t *dst;
4160 int error;
4161
4162 BRIDGE_LOCK_ASSERT_HELD(sc);
4163 ASSERT(bridge_in_bsd_mode(sc));
4164
4165 #if BRIDGE_DEBUG
4166 if (if_bridge_debug & BR_DBGF_OUTPUT)
4167 printf("%s: %s m 0x%llx\n", __func__, sc->sc_ifp->if_xname,
4168 (uint64_t)VM_KERNEL_ADDRPERM(m));
4169 #endif /* BRIDGE_DEBUG */
4170
4171 src_if = m->m_pkthdr.rcvif;
4172 ifp = sc->sc_ifp;
4173
4174 (void) ifnet_stat_increment_in(ifp, 1, m->m_pkthdr.len, 0);
4175 vlan = VLANTAGOF(m);
4176
4177
4178 if ((sbif->bif_ifflags & IFBIF_STP) &&
4179 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
4180 goto drop;
4181
4182 eh = mtod(m, struct ether_header *);
4183 dst = eh->ether_dhost;
4184
4185 /* If the interface is learning, record the address. */
4186 if (sbif->bif_ifflags & IFBIF_LEARNING) {
4187 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
4188 sbif, 0, IFBAF_DYNAMIC);
4189 /*
4190 * If the interface has addresses limits then deny any source
4191 * that is not in the cache.
4192 */
4193 if (error && sbif->bif_addrmax)
4194 goto drop;
4195 }
4196
4197 if ((sbif->bif_ifflags & IFBIF_STP) != 0 &&
4198 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
4199 goto drop;
4200
4201 /*
4202 * At this point, the port either doesn't participate
4203 * in spanning tree or it is in the forwarding state.
4204 */
4205
4206 /*
4207 * If the packet is unicast, destined for someone on
4208 * "this" side of the bridge, drop it.
4209 */
4210 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
4211 dst_if = bridge_rtlookup(sc, dst, vlan);
4212 if (src_if == dst_if)
4213 goto drop;
4214 } else {
4215 /*
4216 * Check if its a reserved multicast address, any address
4217 * listed in 802.1D section 7.12.6 may not be forwarded by the
4218 * bridge.
4219 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
4220 */
4221 if (dst[0] == 0x01 && dst[1] == 0x80 &&
4222 dst[2] == 0xc2 && dst[3] == 0x00 &&
4223 dst[4] == 0x00 && dst[5] <= 0x0f)
4224 goto drop;
4225
4226
4227 /* ...forward it to all interfaces. */
4228 atomic_add_64(&ifp->if_imcasts, 1);
4229 dst_if = NULL;
4230 }
4231
4232 /*
4233 * If we have a destination interface which is a member of our bridge,
4234 * OR this is a unicast packet, push it through the bpf(4) machinery.
4235 * For broadcast or multicast packets, don't bother because it will
4236 * be reinjected into ether_input. We do this before we pass the packets
4237 * through the pfil(9) framework, as it is possible that pfil(9) will
4238 * drop the packet, or possibly modify it, making it difficult to debug
4239 * firewall issues on the bridge.
4240 */
4241 #if NBPFILTER > 0
4242 if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) ||
4243 dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
4244 m->m_pkthdr.rcvif = ifp;
4245 if (sc->sc_bpf_input)
4246 bridge_bpf_input(ifp, m);
4247 }
4248 #endif /* NBPFILTER */
4249
4250 #if defined(PFIL_HOOKS)
4251 /* run the packet filter */
4252 if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
4253 BRIDGE_UNLOCK(sc);
4254 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
4255 return;
4256 if (m == NULL)
4257 return;
4258 BRIDGE_LOCK(sc);
4259 }
4260 #endif /* PFIL_HOOKS */
4261
4262 if (dst_if == NULL) {
4263 bridge_broadcast(sc, src_if, m, 1);
4264 return;
4265 }
4266
4267 /*
4268 * At this point, we're dealing with a unicast frame
4269 * going to a different interface.
4270 */
4271 if ((dst_if->if_flags & IFF_RUNNING) == 0)
4272 goto drop;
4273
4274 dbif = bridge_lookup_member_if(sc, dst_if);
4275 if (dbif == NULL)
4276 /* Not a member of the bridge (anymore?) */
4277 goto drop;
4278
4279 /* Private segments can not talk to each other */
4280 if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)
4281 goto drop;
4282
4283 if ((dbif->bif_ifflags & IFBIF_STP) &&
4284 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
4285 goto drop;
4286
4287 #if HAS_DHCPRA_MASK
4288 /* APPLE MODIFICATION <rdar:6985737> */
4289 if ((dst_if->if_extflags & IFEXTF_DHCPRA_MASK) != 0) {
4290 m = ip_xdhcpra_output(dst_if, m);
4291 if (!m) {
4292 ++sc->sc_sc.sc_ifp.if_xdhcpra;
4293 return;
4294 }
4295 }
4296 #endif /* HAS_DHCPRA_MASK */
4297
4298 BRIDGE_UNLOCK(sc);
4299
4300 #if defined(PFIL_HOOKS)
4301 if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
4302 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
4303 return;
4304 if (m == NULL)
4305 return;
4306 }
4307 #endif /* PFIL_HOOKS */
4308
4309 (void) bridge_enqueue(sc, dst_if, m);
4310 return;
4311
4312 drop:
4313 BRIDGE_UNLOCK(sc);
4314 m_freem(m);
4315 }
4316
4317 #if BRIDGE_DEBUG
4318
4319 char *ether_ntop(char *, size_t, const u_char *);
4320
4321 __private_extern__ char *
4322 ether_ntop(char *buf, size_t len, const u_char *ap)
4323 {
4324 snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
4325 ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
4326
4327 return (buf);
4328 }
4329
4330 #endif /* BRIDGE_DEBUG */
4331
4332 /*
4333 * bridge_input:
4334 *
4335 * Filter input from a member interface. Queue the packet for
4336 * bridging if it is not for us.
4337 */
4338 __private_extern__ errno_t
4339 bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header)
4340 {
4341 struct bridge_softc *sc = ifp->if_bridge;
4342 struct bridge_iflist *bif, *bif2;
4343 struct ifnet *bifp;
4344 struct ether_header *eh;
4345 struct mbuf *mc, *mc2;
4346 uint16_t vlan;
4347 int error;
4348
4349 ASSERT(bridge_in_bsd_mode(sc));
4350 #if BRIDGE_DEBUG
4351 if (if_bridge_debug & BR_DBGF_INPUT)
4352 printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
4353 sc->sc_ifp->if_xname, ifp->if_xname,
4354 (uint64_t)VM_KERNEL_ADDRPERM(m),
4355 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
4356 #endif /* BRIDGE_DEBUG */
4357
4358 if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
4359 #if BRIDGE_DEBUG
4360 if (if_bridge_debug & BR_DBGF_INPUT)
4361 printf("%s: %s not running passing along\n",
4362 __func__, sc->sc_ifp->if_xname);
4363 #endif /* BRIDGE_DEBUG */
4364 return (0);
4365 }
4366
4367 bifp = sc->sc_ifp;
4368 vlan = VLANTAGOF(m);
4369
4370 #ifdef IFF_MONITOR
4371 /*
4372 * Implement support for bridge monitoring. If this flag has been
4373 * set on this interface, discard the packet once we push it through
4374 * the bpf(4) machinery, but before we do, increment the byte and
4375 * packet counters associated with this interface.
4376 */
4377 if ((bifp->if_flags & IFF_MONITOR) != 0) {
4378 m->m_pkthdr.rcvif = bifp;
4379 BRIDGE_BPF_MTAP_INPUT(sc, m);
4380 (void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0);
4381 m_freem(m);
4382 return (EJUSTRETURN);
4383 }
4384 #endif /* IFF_MONITOR */
4385
4386 /*
4387 * Need to clear the promiscous flags otherwise it will be
4388 * dropped by DLIL after processing filters
4389 */
4390 if ((mbuf_flags(m) & MBUF_PROMISC))
4391 mbuf_setflags_mask(m, 0, MBUF_PROMISC);
4392
4393 BRIDGE_LOCK(sc);
4394 bif = bridge_lookup_member_if(sc, ifp);
4395 if (bif == NULL) {
4396 BRIDGE_UNLOCK(sc);
4397 #if BRIDGE_DEBUG
4398 if (if_bridge_debug & BR_DBGF_INPUT)
4399 printf("%s: %s bridge_lookup_member_if failed\n",
4400 __func__, sc->sc_ifp->if_xname);
4401 #endif /* BRIDGE_DEBUG */
4402 return (0);
4403 }
4404
4405 if (bif->bif_flags & BIFF_HOST_FILTER) {
4406 error = bridge_host_filter(bif, m);
4407 if (error != 0) {
4408 if (if_bridge_debug & BR_DBGF_INPUT)
4409 printf("%s: %s bridge_host_filter failed\n",
4410 __func__, bif->bif_ifp->if_xname);
4411 BRIDGE_UNLOCK(sc);
4412 return (EJUSTRETURN);
4413 }
4414 }
4415
4416 eh = mtod(m, struct ether_header *);
4417
4418 bridge_span(sc, m);
4419
4420 if (m->m_flags & (M_BCAST|M_MCAST)) {
4421
4422 #if BRIDGE_DEBUG
4423 if (if_bridge_debug & BR_DBGF_MCAST)
4424 if ((m->m_flags & M_MCAST))
4425 printf("%s: multicast: "
4426 "%02x:%02x:%02x:%02x:%02x:%02x\n",
4427 __func__,
4428 eh->ether_dhost[0], eh->ether_dhost[1],
4429 eh->ether_dhost[2], eh->ether_dhost[3],
4430 eh->ether_dhost[4], eh->ether_dhost[5]);
4431 #endif /* BRIDGE_DEBUG */
4432
4433 /* Tap off 802.1D packets; they do not get forwarded. */
4434 if (memcmp(eh->ether_dhost, bstp_etheraddr,
4435 ETHER_ADDR_LEN) == 0) {
4436 #if BRIDGESTP
4437 m = bstp_input(&bif->bif_stp, ifp, m);
4438 #else /* !BRIDGESTP */
4439 m_freem(m);
4440 m = NULL;
4441 #endif /* !BRIDGESTP */
4442 if (m == NULL) {
4443 BRIDGE_UNLOCK(sc);
4444 return (EJUSTRETURN);
4445 }
4446 }
4447
4448 if ((bif->bif_ifflags & IFBIF_STP) &&
4449 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4450 BRIDGE_UNLOCK(sc);
4451 return (0);
4452 }
4453
4454 /*
4455 * Make a deep copy of the packet and enqueue the copy
4456 * for bridge processing; return the original packet for
4457 * local processing.
4458 */
4459 mc = m_dup(m, M_DONTWAIT);
4460 if (mc == NULL) {
4461 BRIDGE_UNLOCK(sc);
4462 return (0);
4463 }
4464
4465 /*
4466 * Perform the bridge forwarding function with the copy.
4467 *
4468 * Note that bridge_forward calls BRIDGE_UNLOCK
4469 */
4470 bridge_forward(sc, bif, mc);
4471
4472 /*
4473 * Reinject the mbuf as arriving on the bridge so we have a
4474 * chance at claiming multicast packets. We can not loop back
4475 * here from ether_input as a bridge is never a member of a
4476 * bridge.
4477 */
4478 VERIFY(bifp->if_bridge == NULL);
4479 mc2 = m_dup(m, M_DONTWAIT);
4480 if (mc2 != NULL) {
4481 /* Keep the layer3 header aligned */
4482 int i = min(mc2->m_pkthdr.len, max_protohdr);
4483 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
4484 }
4485 if (mc2 != NULL) {
4486 /* mark packet as arriving on the bridge */
4487 mc2->m_pkthdr.rcvif = bifp;
4488 mc2->m_pkthdr.pkt_hdr = mbuf_data(mc2);
4489
4490 #if NBPFILTER > 0
4491 if (sc->sc_bpf_input)
4492 bridge_bpf_input(bifp, mc2);
4493 #endif /* NBPFILTER */
4494 (void) mbuf_setdata(mc2,
4495 (char *)mbuf_data(mc2) + ETHER_HDR_LEN,
4496 mbuf_len(mc2) - ETHER_HDR_LEN);
4497 (void) mbuf_pkthdr_adjustlen(mc2, - ETHER_HDR_LEN);
4498
4499 (void) ifnet_stat_increment_in(bifp, 1,
4500 mbuf_pkthdr_len(mc2), 0);
4501
4502 #if BRIDGE_DEBUG
4503 if (if_bridge_debug & BR_DBGF_MCAST)
4504 printf("%s: %s mcast for us\n", __func__,
4505 sc->sc_ifp->if_xname);
4506 #endif /* BRIDGE_DEBUG */
4507
4508 dlil_input_packet_list(bifp, mc2);
4509 }
4510
4511 /* Return the original packet for local processing. */
4512 return (0);
4513 }
4514
4515 if ((bif->bif_ifflags & IFBIF_STP) &&
4516 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4517 BRIDGE_UNLOCK(sc);
4518 return (0);
4519 }
4520
4521 #ifdef DEV_CARP
4522 #define CARP_CHECK_WE_ARE_DST(iface) \
4523 ((iface)->if_carp &&\
4524 carp_forus((iface)->if_carp, eh->ether_dhost))
4525 #define CARP_CHECK_WE_ARE_SRC(iface) \
4526 ((iface)->if_carp &&\
4527 carp_forus((iface)->if_carp, eh->ether_shost))
4528 #else
4529 #define CARP_CHECK_WE_ARE_DST(iface) 0
4530 #define CARP_CHECK_WE_ARE_SRC(iface) 0
4531 #endif
4532
4533 #ifdef INET6
4534 #define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
4535 #else
4536 #define PFIL_HOOKED_INET6 0
4537 #endif
4538
4539 #if defined(PFIL_HOOKS)
4540 #define PFIL_PHYS(sc, ifp, m) do { \
4541 if (pfil_local_phys && \
4542 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \
4543 if (bridge_pfil(&m, NULL, ifp, \
4544 PFIL_IN) != 0 || m == NULL) { \
4545 BRIDGE_UNLOCK(sc); \
4546 return (NULL); \
4547 } \
4548 } \
4549 } while (0)
4550 #else /* PFIL_HOOKS */
4551 #define PFIL_PHYS(sc, ifp, m)
4552 #endif /* PFIL_HOOKS */
4553
4554 #define GRAB_OUR_PACKETS(iface) \
4555 if ((iface)->if_type == IFT_GIF) \
4556 continue; \
4557 /* It is destined for us. */ \
4558 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
4559 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
4560 if ((iface)->if_type == IFT_BRIDGE) { \
4561 BRIDGE_BPF_MTAP_INPUT(sc, m); \
4562 /* Filter on the physical interface. */ \
4563 PFIL_PHYS(sc, iface, m); \
4564 } \
4565 if (bif->bif_ifflags & IFBIF_LEARNING) { \
4566 error = bridge_rtupdate(sc, eh->ether_shost, \
4567 vlan, bif, 0, IFBAF_DYNAMIC); \
4568 if (error && bif->bif_addrmax) { \
4569 BRIDGE_UNLOCK(sc); \
4570 return (EJUSTRETURN); \
4571 } \
4572 } \
4573 m->m_pkthdr.rcvif = iface; \
4574 BRIDGE_UNLOCK(sc); \
4575 return (0); \
4576 } \
4577 \
4578 /* We just received a packet that we sent out. */ \
4579 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
4580 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
4581 BRIDGE_UNLOCK(sc); \
4582 return (EJUSTRETURN); \
4583 }
4584
4585 /*
4586 * Unicast.
4587 */
4588 /*
4589 * If the packet is for us, set the packets source as the
4590 * bridge, and return the packet back to ether_input for
4591 * local processing.
4592 */
4593 if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
4594 ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bifp)) {
4595
4596 /* Mark the packet as arriving on the bridge interface */
4597 (void) mbuf_pkthdr_setrcvif(m, bifp);
4598 mbuf_pkthdr_setheader(m, frame_header);
4599
4600 /*
4601 * If the interface is learning, and the source
4602 * address is valid and not multicast, record
4603 * the address.
4604 */
4605 if (bif->bif_ifflags & IFBIF_LEARNING)
4606 (void) bridge_rtupdate(sc, eh->ether_shost,
4607 vlan, bif, 0, IFBAF_DYNAMIC);
4608
4609 BRIDGE_BPF_MTAP_INPUT(sc, m);
4610
4611 (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
4612 mbuf_len(m) - ETHER_HDR_LEN);
4613 (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
4614
4615 (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
4616
4617 BRIDGE_UNLOCK(sc);
4618
4619 #if BRIDGE_DEBUG
4620 if (if_bridge_debug & BR_DBGF_INPUT)
4621 printf("%s: %s packet for bridge\n", __func__,
4622 sc->sc_ifp->if_xname);
4623 #endif /* BRIDGE_DEBUG */
4624
4625 dlil_input_packet_list(bifp, m);
4626
4627 return (EJUSTRETURN);
4628 }
4629
4630 /*
4631 * if the destination of the packet is for the MAC address of
4632 * the member interface itself, then we don't need to forward
4633 * it -- just pass it back. Note that it'll likely just be
4634 * dropped by the stack, but if something else is bound to
4635 * the interface directly (for example, the wireless stats
4636 * protocol -- although that actually uses BPF right now),
4637 * then it will consume the packet
4638 *
4639 * ALSO, note that we do this check AFTER checking for the
4640 * bridge's own MAC address, because the bridge may be
4641 * using the SAME MAC address as one of its interfaces
4642 */
4643 if (memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) {
4644
4645 #ifdef VERY_VERY_VERY_DIAGNOSTIC
4646 printf("%s: not forwarding packet bound for member "
4647 "interface\n", __func__);
4648 #endif
4649 BRIDGE_UNLOCK(sc);
4650 return (0);
4651 }
4652
4653 /* Now check the all bridge members. */
4654 TAILQ_FOREACH(bif2, &sc->sc_iflist, bif_next) {
4655 GRAB_OUR_PACKETS(bif2->bif_ifp)
4656 }
4657
4658 #undef CARP_CHECK_WE_ARE_DST
4659 #undef CARP_CHECK_WE_ARE_SRC
4660 #undef GRAB_OUR_PACKETS
4661
4662 /*
4663 * Perform the bridge forwarding function.
4664 *
4665 * Note that bridge_forward calls BRIDGE_UNLOCK
4666 */
4667 bridge_forward(sc, bif, m);
4668
4669 return (EJUSTRETURN);
4670 }
4671
4672 /*
4673 * bridge_broadcast:
4674 *
4675 * Send a frame to all interfaces that are members of
4676 * the bridge, except for the one on which the packet
4677 * arrived.
4678 *
4679 * NOTE: Releases the lock on return.
4680 */
4681 static void
4682 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
4683 struct mbuf *m, int runfilt)
4684 {
4685 #ifndef PFIL_HOOKS
4686 #pragma unused(runfilt)
4687 #endif
4688 struct bridge_iflist *dbif, *sbif;
4689 struct mbuf *mc;
4690 struct ifnet *dst_if;
4691 int error = 0, used = 0;
4692
4693 sbif = bridge_lookup_member_if(sc, src_if);
4694
4695 BRIDGE_LOCK2REF(sc, error);
4696 if (error) {
4697 m_freem(m);
4698 return;
4699 }
4700
4701 #ifdef PFIL_HOOKS
4702 /* Filter on the bridge interface before broadcasting */
4703 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
4704 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
4705 goto out;
4706 if (m == NULL)
4707 goto out;
4708 }
4709 #endif /* PFIL_HOOKS */
4710
4711 TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) {
4712 dst_if = dbif->bif_ifp;
4713 if (dst_if == src_if)
4714 continue;
4715
4716 /* Private segments can not talk to each other */
4717 if (sbif &&
4718 (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE))
4719 continue;
4720
4721 if ((dbif->bif_ifflags & IFBIF_STP) &&
4722 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
4723 continue;
4724
4725 if ((dbif->bif_ifflags & IFBIF_DISCOVER) == 0 &&
4726 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
4727 continue;
4728
4729 if ((dst_if->if_flags & IFF_RUNNING) == 0)
4730 continue;
4731
4732 if (!(dbif->bif_flags & BIFF_MEDIA_ACTIVE)) {
4733 continue;
4734 }
4735
4736 if (TAILQ_NEXT(dbif, bif_next) == NULL) {
4737 mc = m;
4738 used = 1;
4739 } else {
4740 mc = m_dup(m, M_DONTWAIT);
4741 if (mc == NULL) {
4742 (void) ifnet_stat_increment_out(sc->sc_ifp,
4743 0, 0, 1);
4744 continue;
4745 }
4746 }
4747
4748 #ifdef PFIL_HOOKS
4749 /*
4750 * Filter on the output interface. Pass a NULL bridge interface
4751 * pointer so we do not redundantly filter on the bridge for
4752 * each interface we broadcast on.
4753 */
4754 if (runfilt &&
4755 (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
4756 if (used == 0) {
4757 /* Keep the layer3 header aligned */
4758 int i = min(mc->m_pkthdr.len, max_protohdr);
4759 mc = m_copyup(mc, i, ETHER_ALIGN);
4760 if (mc == NULL) {
4761 (void) ifnet_stat_increment_out(
4762 sc->sc_ifp, 0, 0, 1);
4763 continue;
4764 }
4765 }
4766 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
4767 continue;
4768 if (mc == NULL)
4769 continue;
4770 }
4771 #endif /* PFIL_HOOKS */
4772
4773 (void) bridge_enqueue(sc, dst_if, mc);
4774 }
4775 if (used == 0)
4776 m_freem(m);
4777
4778 #ifdef PFIL_HOOKS
4779 out:
4780 #endif /* PFIL_HOOKS */
4781
4782 BRIDGE_UNREF(sc);
4783 }
4784
4785 /*
4786 * bridge_span:
4787 *
4788 * Duplicate a packet out one or more interfaces that are in span mode,
4789 * the original mbuf is unmodified.
4790 */
4791 static void
4792 bridge_span(struct bridge_softc *sc, struct mbuf *m)
4793 {
4794 struct bridge_iflist *bif;
4795 struct ifnet *dst_if;
4796 struct mbuf *mc;
4797
4798 if (TAILQ_EMPTY(&sc->sc_spanlist))
4799 return;
4800
4801 TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
4802 dst_if = bif->bif_ifp;
4803
4804 if ((dst_if->if_flags & IFF_RUNNING) == 0)
4805 continue;
4806
4807 mc = m_copypacket(m, M_DONTWAIT);
4808 if (mc == NULL) {
4809 (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
4810 continue;
4811 }
4812
4813 (void) bridge_enqueue(sc, dst_if, mc);
4814 }
4815 }
4816
4817
4818 /*
4819 * bridge_rtupdate:
4820 *
4821 * Add a bridge routing entry.
4822 */
4823 static int
4824 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
4825 struct bridge_iflist *bif, int setflags, uint8_t flags)
4826 {
4827 struct bridge_rtnode *brt;
4828 int error;
4829
4830 BRIDGE_LOCK_ASSERT_HELD(sc);
4831 ASSERT(bridge_in_bsd_mode(sc));
4832
4833 /* Check the source address is valid and not multicast. */
4834 if (ETHER_IS_MULTICAST(dst) ||
4835 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
4836 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
4837 return (EINVAL);
4838
4839
4840 /* 802.1p frames map to vlan 1 */
4841 if (vlan == 0)
4842 vlan = 1;
4843
4844 /*
4845 * A route for this destination might already exist. If so,
4846 * update it, otherwise create a new one.
4847 */
4848 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
4849 if (sc->sc_brtcnt >= sc->sc_brtmax) {
4850 sc->sc_brtexceeded++;
4851 return (ENOSPC);
4852 }
4853 /* Check per interface address limits (if enabled) */
4854 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
4855 bif->bif_addrexceeded++;
4856 return (ENOSPC);
4857 }
4858
4859 /*
4860 * Allocate a new bridge forwarding node, and
4861 * initialize the expiration time and Ethernet
4862 * address.
4863 */
4864 brt = zalloc_noblock(bridge_rtnode_pool);
4865 if (brt == NULL)
4866 return (ENOMEM);
4867 bzero(brt, sizeof(struct bridge_rtnode));
4868
4869 if (bif->bif_ifflags & IFBIF_STICKY)
4870 brt->brt_flags = IFBAF_STICKY;
4871 else
4872 brt->brt_flags = IFBAF_DYNAMIC;
4873
4874 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
4875 brt->brt_vlan = vlan;
4876
4877
4878 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
4879 zfree(bridge_rtnode_pool, brt);
4880 return (error);
4881 }
4882 brt->brt_dst = bif;
4883 bif->bif_addrcnt++;
4884 #if BRIDGE_DEBUG
4885 if (if_bridge_debug & BR_DBGF_RT_TABLE)
4886 printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
4887 "on %s count %u hashsize %u\n", __func__,
4888 dst[0], dst[1], dst[2], dst[3], dst[4], dst[5],
4889 sc->sc_ifp->if_xname, sc->sc_brtcnt,
4890 sc->sc_rthash_size);
4891 #endif
4892 }
4893
4894 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
4895 brt->brt_dst != bif) {
4896 brt->brt_dst->bif_addrcnt--;
4897 brt->brt_dst = bif;
4898 brt->brt_dst->bif_addrcnt++;
4899 }
4900
4901 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4902 unsigned long now;
4903
4904 now = (unsigned long) net_uptime();
4905 brt->brt_expire = now + sc->sc_brttimeout;
4906 }
4907 if (setflags)
4908 brt->brt_flags = flags;
4909
4910
4911 return (0);
4912 }
4913
4914 /*
4915 * bridge_rtlookup:
4916 *
4917 * Lookup the destination interface for an address.
4918 */
4919 static struct ifnet *
4920 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4921 {
4922 struct bridge_rtnode *brt;
4923
4924 BRIDGE_LOCK_ASSERT_HELD(sc);
4925
4926 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
4927 return (NULL);
4928
4929 return (brt->brt_ifp);
4930 }
4931
4932 /*
4933 * bridge_rttrim:
4934 *
4935 * Trim the routine table so that we have a number
4936 * of routing entries less than or equal to the
4937 * maximum number.
4938 */
4939 static void
4940 bridge_rttrim(struct bridge_softc *sc)
4941 {
4942 struct bridge_rtnode *brt, *nbrt;
4943
4944 BRIDGE_LOCK_ASSERT_HELD(sc);
4945
4946 /* Make sure we actually need to do this. */
4947 if (sc->sc_brtcnt <= sc->sc_brtmax)
4948 return;
4949
4950 /* Force an aging cycle; this might trim enough addresses. */
4951 bridge_rtage(sc);
4952 if (sc->sc_brtcnt <= sc->sc_brtmax)
4953 return;
4954
4955 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4956 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4957 bridge_rtnode_destroy(sc, brt);
4958 if (sc->sc_brtcnt <= sc->sc_brtmax)
4959 return;
4960 }
4961 }
4962 }
4963
4964 /*
4965 * bridge_aging_timer:
4966 *
4967 * Aging periodic timer for the bridge routing table.
4968 */
4969 static void
4970 bridge_aging_timer(struct bridge_softc *sc)
4971 {
4972 BRIDGE_LOCK_ASSERT_HELD(sc);
4973
4974 bridge_rtage(sc);
4975
4976 if ((sc->sc_ifp->if_flags & IFF_RUNNING) &&
4977 (sc->sc_flags & SCF_DETACHING) == 0) {
4978 sc->sc_aging_timer.bdc_sc = sc;
4979 sc->sc_aging_timer.bdc_func = bridge_aging_timer;
4980 sc->sc_aging_timer.bdc_ts.tv_sec = bridge_rtable_prune_period;
4981 bridge_schedule_delayed_call(&sc->sc_aging_timer);
4982 }
4983 }
4984
4985 /*
4986 * bridge_rtage:
4987 *
4988 * Perform an aging cycle.
4989 */
4990 static void
4991 bridge_rtage(struct bridge_softc *sc)
4992 {
4993 struct bridge_rtnode *brt, *nbrt;
4994 unsigned long now;
4995
4996 BRIDGE_LOCK_ASSERT_HELD(sc);
4997
4998 now = (unsigned long) net_uptime();
4999
5000 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5001 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
5002 if (now >= brt->brt_expire)
5003 bridge_rtnode_destroy(sc, brt);
5004 }
5005 }
5006 }
5007
5008 /*
5009 * bridge_rtflush:
5010 *
5011 * Remove all dynamic addresses from the bridge.
5012 */
5013 static void
5014 bridge_rtflush(struct bridge_softc *sc, int full)
5015 {
5016 struct bridge_rtnode *brt, *nbrt;
5017
5018 BRIDGE_LOCK_ASSERT_HELD(sc);
5019
5020 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5021 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
5022 bridge_rtnode_destroy(sc, brt);
5023 }
5024 }
5025
5026 /*
5027 * bridge_rtdaddr:
5028 *
5029 * Remove an address from the table.
5030 */
5031 static int
5032 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
5033 {
5034 struct bridge_rtnode *brt;
5035 int found = 0;
5036
5037 BRIDGE_LOCK_ASSERT_HELD(sc);
5038
5039 /*
5040 * If vlan is zero then we want to delete for all vlans so the lookup
5041 * may return more than one.
5042 */
5043 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
5044 bridge_rtnode_destroy(sc, brt);
5045 found = 1;
5046 }
5047
5048 return (found ? 0 : ENOENT);
5049 }
5050
5051 /*
5052 * bridge_rtdelete:
5053 *
5054 * Delete routes to a speicifc member interface.
5055 */
5056 static void
5057 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
5058 {
5059 struct bridge_rtnode *brt, *nbrt;
5060
5061 BRIDGE_LOCK_ASSERT_HELD(sc);
5062
5063 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
5064 if (brt->brt_ifp == ifp && (full ||
5065 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
5066 bridge_rtnode_destroy(sc, brt);
5067 }
5068 }
5069
5070 /*
5071 * bridge_rtable_init:
5072 *
5073 * Initialize the route table for this bridge.
5074 */
5075 static int
5076 bridge_rtable_init(struct bridge_softc *sc)
5077 {
5078 u_int32_t i;
5079
5080 ASSERT(bridge_in_bsd_mode(sc));
5081
5082 sc->sc_rthash = _MALLOC(sizeof (*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
5083 M_DEVBUF, M_WAITOK | M_ZERO);
5084 if (sc->sc_rthash == NULL) {
5085 printf("%s: no memory\n", __func__);
5086 return (ENOMEM);
5087 }
5088 sc->sc_rthash_size = BRIDGE_RTHASH_SIZE;
5089
5090 for (i = 0; i < sc->sc_rthash_size; i++)
5091 LIST_INIT(&sc->sc_rthash[i]);
5092
5093 sc->sc_rthash_key = RandomULong();
5094
5095 LIST_INIT(&sc->sc_rtlist);
5096
5097 return (0);
5098 }
5099
5100 /*
5101 * bridge_rthash_delayed_resize:
5102 *
5103 * Resize the routing table hash on a delayed thread call.
5104 */
5105 static void
5106 bridge_rthash_delayed_resize(struct bridge_softc *sc)
5107 {
5108 u_int32_t new_rthash_size;
5109 struct _bridge_rtnode_list *new_rthash = NULL;
5110 struct _bridge_rtnode_list *old_rthash = NULL;
5111 u_int32_t i;
5112 struct bridge_rtnode *brt;
5113 int error = 0;
5114
5115 BRIDGE_LOCK_ASSERT_HELD(sc);
5116
5117 /*
5118 * Four entries per hash bucket is our ideal load factor
5119 */
5120 if (sc->sc_brtcnt < sc->sc_rthash_size * 4)
5121 goto out;
5122
5123 /*
5124 * Doubling the number of hash buckets may be too simplistic
5125 * especially when facing a spike of new entries
5126 */
5127 new_rthash_size = sc->sc_rthash_size * 2;
5128
5129 sc->sc_flags |= SCF_RESIZING;
5130 BRIDGE_UNLOCK(sc);
5131
5132 new_rthash = _MALLOC(sizeof (*sc->sc_rthash) * new_rthash_size,
5133 M_DEVBUF, M_WAITOK | M_ZERO);
5134
5135 BRIDGE_LOCK(sc);
5136 sc->sc_flags &= ~SCF_RESIZING;
5137
5138 if (new_rthash == NULL) {
5139 error = ENOMEM;
5140 goto out;
5141 }
5142 if ((sc->sc_flags & SCF_DETACHING)) {
5143 error = ENODEV;
5144 goto out;
5145 }
5146 /*
5147 * Fail safe from here on
5148 */
5149 old_rthash = sc->sc_rthash;
5150 sc->sc_rthash = new_rthash;
5151 sc->sc_rthash_size = new_rthash_size;
5152
5153 /*
5154 * Get a new key to force entries to be shuffled around to reduce
5155 * the likelihood they will land in the same buckets
5156 */
5157 sc->sc_rthash_key = RandomULong();
5158
5159 for (i = 0; i < sc->sc_rthash_size; i++)
5160 LIST_INIT(&sc->sc_rthash[i]);
5161
5162 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
5163 LIST_REMOVE(brt, brt_hash);
5164 (void) bridge_rtnode_hash(sc, brt);
5165 }
5166 out:
5167 if (error == 0) {
5168 #if BRIDGE_DEBUG
5169 if (if_bridge_debug & BR_DBGF_RT_TABLE)
5170 printf("%s: %s new size %u\n", __func__,
5171 sc->sc_ifp->if_xname, sc->sc_rthash_size);
5172 #endif /* BRIDGE_DEBUG */
5173 if (old_rthash)
5174 _FREE(old_rthash, M_DEVBUF);
5175 } else {
5176 #if BRIDGE_DEBUG
5177 printf("%s: %s failed %d\n", __func__,
5178 sc->sc_ifp->if_xname, error);
5179 #endif /* BRIDGE_DEBUG */
5180 if (new_rthash != NULL)
5181 _FREE(new_rthash, M_DEVBUF);
5182 }
5183 }
5184
5185 /*
5186 * Resize the number of hash buckets based on the load factor
5187 * Currently only grow
5188 * Failing to resize the hash table is not fatal
5189 */
5190 static void
5191 bridge_rthash_resize(struct bridge_softc *sc)
5192 {
5193 BRIDGE_LOCK_ASSERT_HELD(sc);
5194
5195 if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING))
5196 return;
5197
5198 /*
5199 * Four entries per hash bucket is our ideal load factor
5200 */
5201 if (sc->sc_brtcnt < sc->sc_rthash_size * 4)
5202 return;
5203 /*
5204 * Hard limit on the size of the routing hash table
5205 */
5206 if (sc->sc_rthash_size >= bridge_rtable_hash_size_max)
5207 return;
5208
5209 sc->sc_resize_call.bdc_sc = sc;
5210 sc->sc_resize_call.bdc_func = bridge_rthash_delayed_resize;
5211 bridge_schedule_delayed_call(&sc->sc_resize_call);
5212 }
5213
5214 /*
5215 * bridge_rtable_fini:
5216 *
5217 * Deconstruct the route table for this bridge.
5218 */
5219 static void
5220 bridge_rtable_fini(struct bridge_softc *sc)
5221 {
5222 KASSERT(sc->sc_brtcnt == 0,
5223 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
5224 if (sc->sc_rthash) {
5225 _FREE(sc->sc_rthash, M_DEVBUF);
5226 sc->sc_rthash = NULL;
5227 }
5228 }
5229
5230 /*
5231 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
5232 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
5233 */
5234 #define mix(a, b, c) \
5235 do { \
5236 a -= b; a -= c; a ^= (c >> 13); \
5237 b -= c; b -= a; b ^= (a << 8); \
5238 c -= a; c -= b; c ^= (b >> 13); \
5239 a -= b; a -= c; a ^= (c >> 12); \
5240 b -= c; b -= a; b ^= (a << 16); \
5241 c -= a; c -= b; c ^= (b >> 5); \
5242 a -= b; a -= c; a ^= (c >> 3); \
5243 b -= c; b -= a; b ^= (a << 10); \
5244 c -= a; c -= b; c ^= (b >> 15); \
5245 } while (/*CONSTCOND*/0)
5246
5247 static __inline uint32_t
5248 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
5249 {
5250 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
5251
5252 b += addr[5] << 8;
5253 b += addr[4];
5254 a += addr[3] << 24;
5255 a += addr[2] << 16;
5256 a += addr[1] << 8;
5257 a += addr[0];
5258
5259 mix(a, b, c);
5260
5261 return (c & BRIDGE_RTHASH_MASK(sc));
5262 }
5263
5264 #undef mix
5265
5266 static int
5267 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
5268 {
5269 int i, d;
5270
5271 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
5272 d = ((int)a[i]) - ((int)b[i]);
5273 }
5274
5275 return (d);
5276 }
5277
5278 /*
5279 * bridge_rtnode_lookup:
5280 *
5281 * Look up a bridge route node for the specified destination. Compare the
5282 * vlan id or if zero then just return the first match.
5283 */
5284 static struct bridge_rtnode *
5285 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
5286 uint16_t vlan)
5287 {
5288 struct bridge_rtnode *brt;
5289 uint32_t hash;
5290 int dir;
5291
5292 BRIDGE_LOCK_ASSERT_HELD(sc);
5293 ASSERT(bridge_in_bsd_mode(sc));
5294
5295 hash = bridge_rthash(sc, addr);
5296 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
5297 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
5298 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
5299 return (brt);
5300 if (dir > 0)
5301 return (NULL);
5302 }
5303
5304 return (NULL);
5305 }
5306
5307 /*
5308 * bridge_rtnode_hash:
5309 *
5310 * Insert the specified bridge node into the route hash table.
5311 * This is used when adding a new node or to rehash when resizing
5312 * the hash table
5313 */
5314 static int
5315 bridge_rtnode_hash(struct bridge_softc *sc, struct bridge_rtnode *brt)
5316 {
5317 struct bridge_rtnode *lbrt;
5318 uint32_t hash;
5319 int dir;
5320
5321 BRIDGE_LOCK_ASSERT_HELD(sc);
5322
5323 hash = bridge_rthash(sc, brt->brt_addr);
5324
5325 lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
5326 if (lbrt == NULL) {
5327 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
5328 goto out;
5329 }
5330
5331 do {
5332 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
5333 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) {
5334 #if BRIDGE_DEBUG
5335 if (if_bridge_debug & BR_DBGF_RT_TABLE)
5336 printf("%s: %s EEXIST "
5337 "%02x:%02x:%02x:%02x:%02x:%02x\n",
5338 __func__, sc->sc_ifp->if_xname,
5339 brt->brt_addr[0], brt->brt_addr[1],
5340 brt->brt_addr[2], brt->brt_addr[3],
5341 brt->brt_addr[4], brt->brt_addr[5]);
5342 #endif
5343 return (EEXIST);
5344 }
5345 if (dir > 0) {
5346 LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
5347 goto out;
5348 }
5349 if (LIST_NEXT(lbrt, brt_hash) == NULL) {
5350 LIST_INSERT_AFTER(lbrt, brt, brt_hash);
5351 goto out;
5352 }
5353 lbrt = LIST_NEXT(lbrt, brt_hash);
5354 } while (lbrt != NULL);
5355
5356 #if BRIDGE_DEBUG
5357 if (if_bridge_debug & BR_DBGF_RT_TABLE)
5358 printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
5359 __func__, sc->sc_ifp->if_xname,
5360 brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
5361 brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5]);
5362 #endif
5363
5364 out:
5365 return (0);
5366 }
5367
5368 /*
5369 * bridge_rtnode_insert:
5370 *
5371 * Insert the specified bridge node into the route table. We
5372 * assume the entry is not already in the table.
5373 */
5374 static int
5375 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
5376 {
5377 int error;
5378
5379 error = bridge_rtnode_hash(sc, brt);
5380 if (error != 0)
5381 return (error);
5382
5383 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
5384 sc->sc_brtcnt++;
5385
5386 bridge_rthash_resize(sc);
5387
5388 return (0);
5389 }
5390
5391 /*
5392 * bridge_rtnode_destroy:
5393 *
5394 * Destroy a bridge rtnode.
5395 */
5396 static void
5397 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
5398 {
5399 BRIDGE_LOCK_ASSERT_HELD(sc);
5400
5401 LIST_REMOVE(brt, brt_hash);
5402
5403 LIST_REMOVE(brt, brt_list);
5404 sc->sc_brtcnt--;
5405 brt->brt_dst->bif_addrcnt--;
5406 zfree(bridge_rtnode_pool, brt);
5407 }
5408
5409 #if BRIDGESTP
5410 /*
5411 * bridge_rtable_expire:
5412 *
5413 * Set the expiry time for all routes on an interface.
5414 */
5415 static void
5416 bridge_rtable_expire(struct ifnet *ifp, int age)
5417 {
5418 struct bridge_softc *sc = ifp->if_bridge;
5419 struct bridge_rtnode *brt;
5420
5421 BRIDGE_LOCK(sc);
5422
5423 /*
5424 * If the age is zero then flush, otherwise set all the expiry times to
5425 * age for the interface
5426 */
5427 if (age == 0) {
5428 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
5429 } else {
5430 unsigned long now;
5431
5432 now = (unsigned long) net_uptime();
5433
5434 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
5435 /* Cap the expiry time to 'age' */
5436 if (brt->brt_ifp == ifp &&
5437 brt->brt_expire > now + age &&
5438 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
5439 brt->brt_expire = now + age;
5440 }
5441 }
5442 BRIDGE_UNLOCK(sc);
5443 }
5444
5445 /*
5446 * bridge_state_change:
5447 *
5448 * Callback from the bridgestp code when a port changes states.
5449 */
5450 static void
5451 bridge_state_change(struct ifnet *ifp, int state)
5452 {
5453 struct bridge_softc *sc = ifp->if_bridge;
5454 static const char *stpstates[] = {
5455 "disabled",
5456 "listening",
5457 "learning",
5458 "forwarding",
5459 "blocking",
5460 "discarding"
5461 };
5462
5463 if (log_stp)
5464 log(LOG_NOTICE, "%s: state changed to %s on %s\n",
5465 sc->sc_ifp->if_xname,
5466 stpstates[state], ifp->if_xname);
5467 }
5468 #endif /* BRIDGESTP */
5469
5470 #ifdef PFIL_HOOKS
5471 /*
5472 * Send bridge packets through pfil if they are one of the types pfil can deal
5473 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
5474 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
5475 * that interface.
5476 */
5477 static int
5478 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
5479 {
5480 int snap, error, i, hlen;
5481 struct ether_header *eh1, eh2;
5482 struct ip_fw_args args;
5483 struct ip *ip;
5484 struct llc llc1;
5485 u_int16_t ether_type;
5486
5487 snap = 0;
5488 error = -1; /* Default error if not error == 0 */
5489
5490 #if 0
5491 /* we may return with the IP fields swapped, ensure its not shared */
5492 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
5493 #endif
5494
5495 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
5496 return (0); /* filtering is disabled */
5497
5498 i = min((*mp)->m_pkthdr.len, max_protohdr);
5499 if ((*mp)->m_len < i) {
5500 *mp = m_pullup(*mp, i);
5501 if (*mp == NULL) {
5502 printf("%s: m_pullup failed\n", __func__);
5503 return (-1);
5504 }
5505 }
5506
5507 eh1 = mtod(*mp, struct ether_header *);
5508 ether_type = ntohs(eh1->ether_type);
5509
5510 /*
5511 * Check for SNAP/LLC.
5512 */
5513 if (ether_type < ETHERMTU) {
5514 struct llc *llc2 = (struct llc *)(eh1 + 1);
5515
5516 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
5517 llc2->llc_dsap == LLC_SNAP_LSAP &&
5518 llc2->llc_ssap == LLC_SNAP_LSAP &&
5519 llc2->llc_control == LLC_UI) {
5520 ether_type = htons(llc2->llc_un.type_snap.ether_type);
5521 snap = 1;
5522 }
5523 }
5524
5525 /*
5526 * If we're trying to filter bridge traffic, don't look at anything
5527 * other than IP and ARP traffic. If the filter doesn't understand
5528 * IPv6, don't allow IPv6 through the bridge either. This is lame
5529 * since if we really wanted, say, an AppleTalk filter, we are hosed,
5530 * but of course we don't have an AppleTalk filter to begin with.
5531 * (Note that since pfil doesn't understand ARP it will pass *ALL*
5532 * ARP traffic.)
5533 */
5534 switch (ether_type) {
5535 case ETHERTYPE_ARP:
5536 case ETHERTYPE_REVARP:
5537 if (pfil_ipfw_arp == 0)
5538 return (0); /* Automatically pass */
5539 break;
5540
5541 case ETHERTYPE_IP:
5542 #if INET6
5543 case ETHERTYPE_IPV6:
5544 #endif /* INET6 */
5545 break;
5546 default:
5547 /*
5548 * Check to see if the user wants to pass non-ip
5549 * packets, these will not be checked by pfil(9) and
5550 * passed unconditionally so the default is to drop.
5551 */
5552 if (pfil_onlyip)
5553 goto bad;
5554 }
5555
5556 /* Strip off the Ethernet header and keep a copy. */
5557 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2);
5558 m_adj(*mp, ETHER_HDR_LEN);
5559
5560 /* Strip off snap header, if present */
5561 if (snap) {
5562 m_copydata(*mp, 0, sizeof (struct llc), (caddr_t)&llc1);
5563 m_adj(*mp, sizeof (struct llc));
5564 }
5565
5566 /*
5567 * Check the IP header for alignment and errors
5568 */
5569 if (dir == PFIL_IN) {
5570 switch (ether_type) {
5571 case ETHERTYPE_IP:
5572 error = bridge_ip_checkbasic(mp);
5573 break;
5574 #if INET6
5575 case ETHERTYPE_IPV6:
5576 error = bridge_ip6_checkbasic(mp);
5577 break;
5578 #endif /* INET6 */
5579 default:
5580 error = 0;
5581 }
5582 if (error)
5583 goto bad;
5584 }
5585
5586 if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
5587 error = -1;
5588 args.rule = ip_dn_claim_rule(*mp);
5589 if (args.rule != NULL && fw_one_pass)
5590 goto ipfwpass; /* packet already partially processed */
5591
5592 args.m = *mp;
5593 args.oif = ifp;
5594 args.next_hop = NULL;
5595 args.eh = &eh2;
5596 args.inp = NULL; /* used by ipfw uid/gid/jail rules */
5597 i = ip_fw_chk_ptr(&args);
5598 *mp = args.m;
5599
5600 if (*mp == NULL)
5601 return (error);
5602
5603 if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
5604
5605 /* put the Ethernet header back on */
5606 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
5607 if (*mp == NULL)
5608 return (error);
5609 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
5610
5611 /*
5612 * Pass the pkt to dummynet, which consumes it. The
5613 * packet will return to us via bridge_dummynet().
5614 */
5615 args.oif = ifp;
5616 ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW);
5617 return (error);
5618 }
5619
5620 if (i != IP_FW_PASS) /* drop */
5621 goto bad;
5622 }
5623
5624 ipfwpass:
5625 error = 0;
5626
5627 /*
5628 * Run the packet through pfil
5629 */
5630 switch (ether_type) {
5631 case ETHERTYPE_IP:
5632 /*
5633 * before calling the firewall, swap fields the same as
5634 * IP does. here we assume the header is contiguous
5635 */
5636 ip = mtod(*mp, struct ip *);
5637
5638 ip->ip_len = ntohs(ip->ip_len);
5639 ip->ip_off = ntohs(ip->ip_off);
5640
5641 /*
5642 * Run pfil on the member interface and the bridge, both can
5643 * be skipped by clearing pfil_member or pfil_bridge.
5644 *
5645 * Keep the order:
5646 * in_if -> bridge_if -> out_if
5647 */
5648 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
5649 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
5650 dir, NULL);
5651
5652 if (*mp == NULL || error != 0) /* filter may consume */
5653 break;
5654
5655 if (pfil_member && ifp != NULL)
5656 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
5657 dir, NULL);
5658
5659 if (*mp == NULL || error != 0) /* filter may consume */
5660 break;
5661
5662 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
5663 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
5664 dir, NULL);
5665
5666 if (*mp == NULL || error != 0) /* filter may consume */
5667 break;
5668
5669 /* check if we need to fragment the packet */
5670 if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
5671 i = (*mp)->m_pkthdr.len;
5672 if (i > ifp->if_mtu) {
5673 error = bridge_fragment(ifp, *mp, &eh2, snap,
5674 &llc1);
5675 return (error);
5676 }
5677 }
5678
5679 /* Recalculate the ip checksum and restore byte ordering */
5680 ip = mtod(*mp, struct ip *);
5681 hlen = ip->ip_hl << 2;
5682 if (hlen < sizeof (struct ip))
5683 goto bad;
5684 if (hlen > (*mp)->m_len) {
5685 if ((*mp = m_pullup(*mp, hlen)) == 0)
5686 goto bad;
5687 ip = mtod(*mp, struct ip *);
5688 if (ip == NULL)
5689 goto bad;
5690 }
5691 ip->ip_len = htons(ip->ip_len);
5692 ip->ip_off = htons(ip->ip_off);
5693 ip->ip_sum = 0;
5694 if (hlen == sizeof (struct ip))
5695 ip->ip_sum = in_cksum_hdr(ip);
5696 else
5697 ip->ip_sum = in_cksum(*mp, hlen);
5698
5699 break;
5700 #if INET6
5701 case ETHERTYPE_IPV6:
5702 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
5703 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
5704 dir, NULL);
5705
5706 if (*mp == NULL || error != 0) /* filter may consume */
5707 break;
5708
5709 if (pfil_member && ifp != NULL)
5710 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
5711 dir, NULL);
5712
5713 if (*mp == NULL || error != 0) /* filter may consume */
5714 break;
5715
5716 if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
5717 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
5718 dir, NULL);
5719 break;
5720 #endif
5721 default:
5722 error = 0;
5723 break;
5724 }
5725
5726 if (*mp == NULL)
5727 return (error);
5728 if (error != 0)
5729 goto bad;
5730
5731 error = -1;
5732
5733 /*
5734 * Finally, put everything back the way it was and return
5735 */
5736 if (snap) {
5737 M_PREPEND(*mp, sizeof (struct llc), M_DONTWAIT, 0);
5738 if (*mp == NULL)
5739 return (error);
5740 bcopy(&llc1, mtod(*mp, caddr_t), sizeof (struct llc));
5741 }
5742
5743 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
5744 if (*mp == NULL)
5745 return (error);
5746 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
5747
5748 return (0);
5749
5750 bad:
5751 m_freem(*mp);
5752 *mp = NULL;
5753 return (error);
5754 }
5755
5756 /*
5757 * Perform basic checks on header size since
5758 * pfil assumes ip_input has already processed
5759 * it for it. Cut-and-pasted from ip_input.c.
5760 * Given how simple the IPv6 version is,
5761 * does the IPv4 version really need to be
5762 * this complicated?
5763 *
5764 * XXX Should we update ipstat here, or not?
5765 * XXX Right now we update ipstat but not
5766 * XXX csum_counter.
5767 */
5768 static int
5769 bridge_ip_checkbasic(struct mbuf **mp)
5770 {
5771 struct mbuf *m = *mp;
5772 struct ip *ip;
5773 int len, hlen;
5774 u_short sum;
5775
5776 if (*mp == NULL)
5777 return (-1);
5778
5779 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5780 /* max_linkhdr is already rounded up to nearest 4-byte */
5781 if ((m = m_copyup(m, sizeof (struct ip),
5782 max_linkhdr)) == NULL) {
5783 /* XXXJRT new stat, please */
5784 ipstat.ips_toosmall++;
5785 goto bad;
5786 }
5787 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
5788 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
5789 ipstat.ips_toosmall++;
5790 goto bad;
5791 }
5792 }
5793 ip = mtod(m, struct ip *);
5794 if (ip == NULL) goto bad;
5795
5796 if (ip->ip_v != IPVERSION) {
5797 ipstat.ips_badvers++;
5798 goto bad;
5799 }
5800 hlen = ip->ip_hl << 2;
5801 if (hlen < sizeof (struct ip)) { /* minimum header length */
5802 ipstat.ips_badhlen++;
5803 goto bad;
5804 }
5805 if (hlen > m->m_len) {
5806 if ((m = m_pullup(m, hlen)) == 0) {
5807 ipstat.ips_badhlen++;
5808 goto bad;
5809 }
5810 ip = mtod(m, struct ip *);
5811 if (ip == NULL) goto bad;
5812 }
5813
5814 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
5815 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
5816 } else {
5817 if (hlen == sizeof (struct ip)) {
5818 sum = in_cksum_hdr(ip);
5819 } else {
5820 sum = in_cksum(m, hlen);
5821 }
5822 }
5823 if (sum) {
5824 ipstat.ips_badsum++;
5825 goto bad;
5826 }
5827
5828 /* Retrieve the packet length. */
5829 len = ntohs(ip->ip_len);
5830
5831 /*
5832 * Check for additional length bogosity
5833 */
5834 if (len < hlen) {
5835 ipstat.ips_badlen++;
5836 goto bad;
5837 }
5838
5839 /*
5840 * Check that the amount of data in the buffers
5841 * is as at least much as the IP header would have us expect.
5842 * Drop packet if shorter than we expect.
5843 */
5844 if (m->m_pkthdr.len < len) {
5845 ipstat.ips_tooshort++;
5846 goto bad;
5847 }
5848
5849 /* Checks out, proceed */
5850 *mp = m;
5851 return (0);
5852
5853 bad:
5854 *mp = m;
5855 return (-1);
5856 }
5857
5858 #if INET6
5859 /*
5860 * Same as above, but for IPv6.
5861 * Cut-and-pasted from ip6_input.c.
5862 * XXX Should we update ip6stat, or not?
5863 */
5864 static int
5865 bridge_ip6_checkbasic(struct mbuf **mp)
5866 {
5867 struct mbuf *m = *mp;
5868 struct ip6_hdr *ip6;
5869
5870 /*
5871 * If the IPv6 header is not aligned, slurp it up into a new
5872 * mbuf with space for link headers, in the event we forward
5873 * it. Otherwise, if it is aligned, make sure the entire base
5874 * IPv6 header is in the first mbuf of the chain.
5875 */
5876 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5877 struct ifnet *inifp = m->m_pkthdr.rcvif;
5878 /* max_linkhdr is already rounded up to nearest 4-byte */
5879 if ((m = m_copyup(m, sizeof (struct ip6_hdr),
5880 max_linkhdr)) == NULL) {
5881 /* XXXJRT new stat, please */
5882 ip6stat.ip6s_toosmall++;
5883 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
5884 goto bad;
5885 }
5886 } else if (__predict_false(m->m_len < sizeof (struct ip6_hdr))) {
5887 struct ifnet *inifp = m->m_pkthdr.rcvif;
5888 if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == NULL) {
5889 ip6stat.ip6s_toosmall++;
5890 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
5891 goto bad;
5892 }
5893 }
5894
5895 ip6 = mtod(m, struct ip6_hdr *);
5896
5897 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
5898 ip6stat.ip6s_badvers++;
5899 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
5900 goto bad;
5901 }
5902
5903 /* Checks out, proceed */
5904 *mp = m;
5905 return (0);
5906
5907 bad:
5908 *mp = m;
5909 return (-1);
5910 }
5911 #endif /* INET6 */
5912
5913 /*
5914 * bridge_fragment:
5915 *
5916 * Return a fragmented mbuf chain.
5917 */
5918 static int
5919 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
5920 int snap, struct llc *llc)
5921 {
5922 struct mbuf *m0;
5923 struct ip *ip;
5924 int error = -1;
5925
5926 if (m->m_len < sizeof (struct ip) &&
5927 (m = m_pullup(m, sizeof (struct ip))) == NULL)
5928 goto out;
5929 ip = mtod(m, struct ip *);
5930
5931 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
5932 CSUM_DELAY_IP);
5933 if (error)
5934 goto out;
5935
5936 /* walk the chain and re-add the Ethernet header */
5937 for (m0 = m; m0; m0 = m0->m_nextpkt) {
5938 if (error == 0) {
5939 if (snap) {
5940 M_PREPEND(m0, sizeof (struct llc), M_DONTWAIT, 0);
5941 if (m0 == NULL) {
5942 error = ENOBUFS;
5943 continue;
5944 }
5945 bcopy(llc, mtod(m0, caddr_t),
5946 sizeof (struct llc));
5947 }
5948 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT, 0);
5949 if (m0 == NULL) {
5950 error = ENOBUFS;
5951 continue;
5952 }
5953 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
5954 } else {
5955 m_freem(m);
5956 }
5957 }
5958
5959 if (error == 0)
5960 ipstat.ips_fragmented++;
5961
5962 return (error);
5963
5964 out:
5965 if (m != NULL)
5966 m_freem(m);
5967 return (error);
5968 }
5969 #endif /* PFIL_HOOKS */
5970
5971 /*
5972 * bridge_set_bpf_tap:
5973 *
5974 * Sets ups the BPF callbacks.
5975 */
5976 static errno_t
5977 bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
5978 {
5979 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5980
5981 /* TBD locking */
5982 if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
5983 return (ENODEV);
5984 }
5985 ASSERT(bridge_in_bsd_mode(sc));
5986 switch (mode) {
5987 case BPF_TAP_DISABLE:
5988 sc->sc_bpf_input = sc->sc_bpf_output = NULL;
5989 break;
5990
5991 case BPF_TAP_INPUT:
5992 sc->sc_bpf_input = bpf_callback;
5993 break;
5994
5995 case BPF_TAP_OUTPUT:
5996 sc->sc_bpf_output = bpf_callback;
5997 break;
5998
5999 case BPF_TAP_INPUT_OUTPUT:
6000 sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
6001 break;
6002
6003 default:
6004 break;
6005 }
6006
6007 return (0);
6008 }
6009
6010 /*
6011 * bridge_detach:
6012 *
6013 * Callback when interface has been detached.
6014 */
6015 static void
6016 bridge_detach(ifnet_t ifp)
6017 {
6018 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6019
6020 #if BRIDGESTP
6021 bstp_detach(&sc->sc_stp);
6022 #endif /* BRIDGESTP */
6023
6024 if (bridge_in_bsd_mode(sc)) {
6025 /* Tear down the routing table. */
6026 bridge_rtable_fini(sc);
6027 }
6028
6029 lck_mtx_lock(&bridge_list_mtx);
6030 LIST_REMOVE(sc, sc_list);
6031 lck_mtx_unlock(&bridge_list_mtx);
6032
6033 ifnet_release(ifp);
6034
6035 lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp);
6036
6037 _FREE(sc, M_DEVBUF);
6038 }
6039
6040 /*
6041 * bridge_bpf_input:
6042 *
6043 * Invoke the input BPF callback if enabled
6044 */
6045 __private_extern__ errno_t
6046 bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
6047 {
6048 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6049
6050 ASSERT(bridge_in_bsd_mode(sc));
6051 if (sc->sc_bpf_input) {
6052 if (mbuf_pkthdr_rcvif(m) != ifp) {
6053 printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__,
6054 (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
6055 (uint64_t)VM_KERNEL_ADDRPERM(ifp));
6056 }
6057 (*sc->sc_bpf_input)(ifp, m);
6058 }
6059 return (0);
6060 }
6061
6062 /*
6063 * bridge_bpf_output:
6064 *
6065 * Invoke the output BPF callback if enabled
6066 */
6067 __private_extern__ errno_t
6068 bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
6069 {
6070 struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
6071
6072 ASSERT(bridge_in_bsd_mode(sc));
6073 if (sc->sc_bpf_output) {
6074 (*sc->sc_bpf_output)(ifp, m);
6075 }
6076 return (0);
6077 }
6078
6079 /*
6080 * bridge_link_event:
6081 *
6082 * Report a data link event on an interface
6083 */
6084 static void
6085 bridge_link_event(struct ifnet *ifp, u_int32_t event_code)
6086 {
6087 struct {
6088 struct kern_event_msg header;
6089 u_int32_t unit;
6090 char if_name[IFNAMSIZ];
6091 } event;
6092
6093 #if BRIDGE_DEBUG
6094 if (if_bridge_debug & BR_DBGF_LIFECYCLE)
6095 printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname,
6096 event_code, dlil_kev_dl_code_str(event_code));
6097 #endif /* BRIDGE_DEBUG */
6098
6099 bzero(&event, sizeof (event));
6100 event.header.total_size = sizeof (event);
6101 event.header.vendor_code = KEV_VENDOR_APPLE;
6102 event.header.kev_class = KEV_NETWORK_CLASS;
6103 event.header.kev_subclass = KEV_DL_SUBCLASS;
6104 event.header.event_code = event_code;
6105 event.header.event_data[0] = ifnet_family(ifp);
6106 event.unit = (u_int32_t)ifnet_unit(ifp);
6107 strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ);
6108 ifnet_event(ifp, &event.header);
6109 }
6110
6111 #define BRIDGE_HF_DROP(reason, func, line) { \
6112 bridge_hostfilter_stats.reason++; \
6113 if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
6114 printf("%s.%d" #reason, func, line); \
6115 error = EINVAL; \
6116 }
6117
6118 /*
6119 * Make sure this is a DHCP or Bootp request that match the host filter
6120 */
6121 static int
6122 bridge_dhcp_filter(struct bridge_iflist *bif, struct mbuf *m, size_t offset)
6123 {
6124 int error = EINVAL;
6125 struct dhcp dhcp;
6126
6127 /*
6128 * Note: We use the dhcp structure because bootp structure definition
6129 * is larger and some vendors do not pad the request
6130 */
6131 error = mbuf_copydata(m, offset, sizeof(struct dhcp), &dhcp);
6132 if (error != 0) {
6133 BRIDGE_HF_DROP(brhf_dhcp_too_small, __func__, __LINE__);
6134 goto done;
6135 }
6136 if (dhcp.dp_op != BOOTREQUEST) {
6137 BRIDGE_HF_DROP(brhf_dhcp_bad_op, __func__, __LINE__);
6138 goto done;
6139 }
6140 /*
6141 * The hardware address must be an exact match
6142 */
6143 if (dhcp.dp_htype != ARPHRD_ETHER) {
6144 BRIDGE_HF_DROP(brhf_dhcp_bad_htype, __func__, __LINE__);
6145 goto done;
6146 }
6147 if (dhcp.dp_hlen != ETHER_ADDR_LEN) {
6148 BRIDGE_HF_DROP(brhf_dhcp_bad_hlen, __func__, __LINE__);
6149 goto done;
6150 }
6151 if (bcmp(dhcp.dp_chaddr, bif->bif_hf_hwsrc,
6152 ETHER_ADDR_LEN) != 0) {
6153 BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr, __func__, __LINE__);
6154 goto done;
6155 }
6156 /*
6157 * Client address must match the host address or be not specified
6158 */
6159 if (dhcp.dp_ciaddr.s_addr != bif->bif_hf_ipsrc.s_addr &&
6160 dhcp.dp_ciaddr.s_addr != INADDR_ANY) {
6161 BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr, __func__, __LINE__);
6162 goto done;
6163 }
6164 error = 0;
6165 done:
6166 return (error);
6167 }
6168
6169 static int
6170 bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m)
6171 {
6172 int error = EINVAL;
6173 struct ether_header *eh;
6174 static struct in_addr inaddr_any = { .s_addr = INADDR_ANY };
6175
6176 /*
6177 * Check the Ethernet header is large enough
6178 */
6179 if (mbuf_pkthdr_len(m) < sizeof(struct ether_header)) {
6180 BRIDGE_HF_DROP(brhf_ether_too_small, __func__, __LINE__);
6181 goto done;
6182 }
6183 if (mbuf_len(m) < sizeof(struct ether_header) &&
6184 mbuf_pullup(&m, sizeof(struct ether_header)) != 0) {
6185 BRIDGE_HF_DROP(brhf_ether_pullup_failed, __func__, __LINE__);
6186 goto done;
6187 }
6188 eh = mtod(m, struct ether_header *);
6189
6190 /*
6191 * Restrict the source hardware address
6192 */
6193 if ((bif->bif_flags & BIFF_HF_HWSRC) == 0 ||
6194 bcmp(eh->ether_shost, bif->bif_hf_hwsrc,
6195 ETHER_ADDR_LEN) != 0) {
6196 BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr, __func__, __LINE__);
6197 goto done;
6198 }
6199
6200 /*
6201 * Restrict Ethernet protocols to ARP and IP
6202 */
6203 if (eh->ether_type == htons(ETHERTYPE_ARP)) {
6204 struct ether_arp *ea;
6205 size_t minlen = sizeof(struct ether_header) +
6206 sizeof(struct ether_arp);
6207
6208 /*
6209 * Make the Ethernet and ARP headers contiguous
6210 */
6211 if (mbuf_pkthdr_len(m) < minlen) {
6212 BRIDGE_HF_DROP(brhf_arp_too_small, __func__, __LINE__);
6213 goto done;
6214 }
6215 if (mbuf_len(m) < minlen && mbuf_pullup(&m, minlen) != 0) {
6216 BRIDGE_HF_DROP(brhf_arp_pullup_failed,
6217 __func__, __LINE__);
6218 goto done;
6219 }
6220 /*
6221 * Verify this is an ethernet/ip arp
6222 */
6223 eh = mtod(m, struct ether_header *);
6224 ea = (struct ether_arp *)(eh + 1);
6225 if (ea->arp_hrd != htons(ARPHRD_ETHER)) {
6226 BRIDGE_HF_DROP(brhf_arp_bad_hw_type,
6227 __func__, __LINE__);
6228 goto done;
6229 }
6230 if (ea->arp_pro != htons(ETHERTYPE_IP)) {
6231 BRIDGE_HF_DROP(brhf_arp_bad_pro_type,
6232 __func__, __LINE__);
6233 goto done;
6234 }
6235 /*
6236 * Verify the address lengths are correct
6237 */
6238 if (ea->arp_hln != ETHER_ADDR_LEN) {
6239 BRIDGE_HF_DROP(brhf_arp_bad_hw_len, __func__, __LINE__);
6240 goto done;
6241 }
6242 if (ea->arp_pln != sizeof(struct in_addr)) {
6243 BRIDGE_HF_DROP(brhf_arp_bad_pro_len,
6244 __func__, __LINE__);
6245 goto done;
6246 }
6247
6248 /*
6249 * Allow only ARP request or ARP reply
6250 */
6251 if (ea->arp_op != htons(ARPOP_REQUEST) &&
6252 ea->arp_op != htons(ARPOP_REPLY)) {
6253 BRIDGE_HF_DROP(brhf_arp_bad_op, __func__, __LINE__);
6254 goto done;
6255 }
6256 /*
6257 * Verify source hardware address matches
6258 */
6259 if (bcmp(ea->arp_sha, bif->bif_hf_hwsrc,
6260 ETHER_ADDR_LEN) != 0) {
6261 BRIDGE_HF_DROP(brhf_arp_bad_sha, __func__, __LINE__);
6262 goto done;
6263 }
6264 /*
6265 * Verify source protocol address:
6266 * May be null for an ARP probe
6267 */
6268 if (bcmp(ea->arp_spa, &bif->bif_hf_ipsrc.s_addr,
6269 sizeof(struct in_addr)) != 0 &&
6270 bcmp(ea->arp_spa, &inaddr_any,
6271 sizeof(struct in_addr)) != 0) {
6272 BRIDGE_HF_DROP(brhf_arp_bad_spa, __func__, __LINE__);
6273 goto done;
6274 }
6275 /*
6276 *
6277 */
6278 bridge_hostfilter_stats.brhf_arp_ok += 1;
6279 error = 0;
6280 } else if (eh->ether_type == htons(ETHERTYPE_IP)) {
6281 size_t minlen = sizeof(struct ether_header) + sizeof(struct ip);
6282 struct ip iphdr;
6283 size_t offset;
6284
6285 /*
6286 * Make the Ethernet and IP headers contiguous
6287 */
6288 if (mbuf_pkthdr_len(m) < minlen) {
6289 BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
6290 goto done;
6291 }
6292 offset = sizeof(struct ether_header);
6293 error = mbuf_copydata(m, offset, sizeof(struct ip), &iphdr);
6294 if (error != 0) {
6295 BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
6296 goto done;
6297 }
6298 /*
6299 * Verify the source IP address
6300 */
6301 if (iphdr.ip_p == IPPROTO_UDP) {
6302 struct udphdr udp;
6303
6304 minlen += sizeof(struct udphdr);
6305 if (mbuf_pkthdr_len(m) < minlen) {
6306 BRIDGE_HF_DROP(brhf_ip_too_small,
6307 __func__, __LINE__);
6308 goto done;
6309 }
6310
6311 /*
6312 * Allow all zero addresses for DHCP requests
6313 */
6314 if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr &&
6315 iphdr.ip_src.s_addr != INADDR_ANY) {
6316 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
6317 __func__, __LINE__);
6318 goto done;
6319 }
6320 offset = sizeof(struct ether_header) +
6321 (IP_VHL_HL(iphdr.ip_vhl) << 2);
6322 error = mbuf_copydata(m, offset,
6323 sizeof(struct udphdr), &udp);
6324 if (error != 0) {
6325 BRIDGE_HF_DROP(brhf_ip_too_small,
6326 __func__, __LINE__);
6327 goto done;
6328 }
6329 /*
6330 * Either it's a Bootp/DHCP packet that we like or
6331 * it's a UDP packet from the host IP as source address
6332 */
6333 if (udp.uh_sport == htons(IPPORT_BOOTPC) &&
6334 udp.uh_dport == htons(IPPORT_BOOTPS)) {
6335 minlen += sizeof(struct dhcp);
6336 if (mbuf_pkthdr_len(m) < minlen) {
6337 BRIDGE_HF_DROP(brhf_ip_too_small,
6338 __func__, __LINE__);
6339 goto done;
6340 }
6341 offset += sizeof(struct udphdr);
6342 error = bridge_dhcp_filter(bif, m, offset);
6343 if (error != 0)
6344 goto done;
6345 } else if (iphdr.ip_src.s_addr == INADDR_ANY) {
6346 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
6347 __func__, __LINE__);
6348 goto done;
6349 }
6350 } else if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr ||
6351 bif->bif_hf_ipsrc.s_addr == INADDR_ANY) {
6352
6353 BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, __func__, __LINE__);
6354 goto done;
6355 }
6356 /*
6357 * Allow only boring IP protocols
6358 */
6359 if (iphdr.ip_p != IPPROTO_TCP &&
6360 iphdr.ip_p != IPPROTO_UDP &&
6361 iphdr.ip_p != IPPROTO_ICMP &&
6362 iphdr.ip_p != IPPROTO_ESP &&
6363 iphdr.ip_p != IPPROTO_AH &&
6364 iphdr.ip_p != IPPROTO_GRE) {
6365 BRIDGE_HF_DROP(brhf_ip_bad_proto, __func__, __LINE__);
6366 goto done;
6367 }
6368 bridge_hostfilter_stats.brhf_ip_ok += 1;
6369 error = 0;
6370 } else {
6371 BRIDGE_HF_DROP(brhf_bad_ether_type, __func__, __LINE__);
6372 goto done;
6373 }
6374 done:
6375 if (error != 0) {
6376 if (if_bridge_debug & BR_DBGF_HOSTFILTER) {
6377 if (m) {
6378 printf_mbuf_data(m, 0,
6379 sizeof(struct ether_header) +
6380 sizeof(struct ip));
6381 }
6382 printf("\n");
6383 }
6384
6385 if (m != NULL)
6386 m_freem(m);
6387 }
6388 return (error);
6389 }
6390
6391