/*
- * Copyright (c) 2004-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/cdefs.h>
-#define BRIDGE_DEBUG 1
+#define BRIDGE_DEBUG 1
#include <sys/param.h>
#include <sys/mbuf.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/net_api_stats.h>
+#include <net/pfvar.h>
#include <netinet/in.h> /* for struct arpcom */
+#include <netinet/tcp.h> /* for struct tcphdr */
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
-#define _IP_VHL
+#define _IP_VHL
#include <netinet/ip.h>
#include <netinet/ip_var.h>
-#if INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
-#endif
#ifdef DEV_CARP
#include <netinet/ip_carp.h>
#endif
#include <net/kpi_interfacefilter.h>
#include <net/route.h>
-#ifdef PFIL_HOOKS
-#include <netinet/ip_fw2.h>
-#include <netinet/ip_dummynet.h>
-#endif /* PFIL_HOOKS */
#include <dev/random/randomdev.h>
#include <netinet/bootp.h>
#if BRIDGE_DEBUG
-#define BR_DBGF_LIFECYCLE 0x0001
-#define BR_DBGF_INPUT 0x0002
-#define BR_DBGF_OUTPUT 0x0004
-#define BR_DBGF_RT_TABLE 0x0008
-#define BR_DBGF_DELAYED_CALL 0x0010
-#define BR_DBGF_IOCTL 0x0020
-#define BR_DBGF_MBUF 0x0040
-#define BR_DBGF_MCAST 0x0080
-#define BR_DBGF_HOSTFILTER 0x0100
+#define BR_DBGF_LIFECYCLE 0x0001
+#define BR_DBGF_INPUT 0x0002
+#define BR_DBGF_OUTPUT 0x0004
+#define BR_DBGF_RT_TABLE 0x0008
+#define BR_DBGF_DELAYED_CALL 0x0010
+#define BR_DBGF_IOCTL 0x0020
+#define BR_DBGF_MBUF 0x0040
+#define BR_DBGF_MCAST 0x0080
+#define BR_DBGF_HOSTFILTER 0x0100
+#define BR_DBGF_CHECKSUM 0x0200
+#define BR_DBGF_MAC_NAT 0x0400
+#define BR_DBGF_SEGMENTATION 0x0800
#endif /* BRIDGE_DEBUG */
-#define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
-#define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
-#define BRIDGE_LOCK_ASSERT_HELD(_sc) \
+#define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx)
+#define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx)
+#define BRIDGE_LOCK_ASSERT_HELD(_sc) \
LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
-#define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
+#define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \
LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
#if BRIDGE_DEBUG
-#define BR_LCKDBG_MAX 4
+#define BR_LCKDBG_MAX 4
-#define BRIDGE_LOCK(_sc) bridge_lock(_sc)
-#define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
-#define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
-#define BRIDGE_UNREF(_sc) bridge_unref(_sc)
-#define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
-#define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
+#define BRIDGE_LOCK(_sc) bridge_lock(_sc)
+#define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc)
+#define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc)
+#define BRIDGE_UNREF(_sc) bridge_unref(_sc)
+#define BRIDGE_XLOCK(_sc) bridge_xlock(_sc)
+#define BRIDGE_XDROP(_sc) bridge_xdrop(_sc)
+#define IF_BRIDGE_DEBUG(f) bridge_debug_flag_is_set(f)
#else /* !BRIDGE_DEBUG */
-#define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
-#define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
-#define BRIDGE_LOCK2REF(_sc, _err) do { \
- BRIDGE_LOCK_ASSERT_HELD(_sc); \
- if ((_sc)->sc_iflist_xcnt > 0) \
- (_err) = EBUSY; \
- else \
- (_sc)->sc_iflist_ref++; \
- _BRIDGE_UNLOCK(_sc); \
+#define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc)
+#define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc)
+#define BRIDGE_LOCK2REF(_sc, _err) do { \
+ BRIDGE_LOCK_ASSERT_HELD(_sc); \
+ if ((_sc)->sc_iflist_xcnt > 0) \
+ (_err) = EBUSY; \
+ else \
+ (_sc)->sc_iflist_ref++; \
+ _BRIDGE_UNLOCK(_sc); \
} while (0)
-#define BRIDGE_UNREF(_sc) do { \
- _BRIDGE_LOCK(_sc); \
- (_sc)->sc_iflist_ref--; \
+#define BRIDGE_UNREF(_sc) do { \
+ _BRIDGE_LOCK(_sc); \
+ (_sc)->sc_iflist_ref--; \
if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \
- _BRIDGE_UNLOCK(_sc); \
- wakeup(&(_sc)->sc_cv); \
- } else \
- _BRIDGE_UNLOCK(_sc); \
+ _BRIDGE_UNLOCK(_sc); \
+ wakeup(&(_sc)->sc_cv); \
+ } else \
+ _BRIDGE_UNLOCK(_sc); \
} while (0)
-#define BRIDGE_XLOCK(_sc) do { \
- BRIDGE_LOCK_ASSERT_HELD(_sc); \
- (_sc)->sc_iflist_xcnt++; \
- while ((_sc)->sc_iflist_ref > 0) \
- msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
- "BRIDGE_XLOCK", NULL); \
+#define BRIDGE_XLOCK(_sc) do { \
+ BRIDGE_LOCK_ASSERT_HELD(_sc); \
+ (_sc)->sc_iflist_xcnt++; \
+ while ((_sc)->sc_iflist_ref > 0) \
+ msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \
+ "BRIDGE_XLOCK", NULL); \
} while (0)
-#define BRIDGE_XDROP(_sc) do { \
- BRIDGE_LOCK_ASSERT_HELD(_sc); \
- (_sc)->sc_iflist_xcnt--; \
+#define BRIDGE_XDROP(_sc) do { \
+ BRIDGE_LOCK_ASSERT_HELD(_sc); \
+ (_sc)->sc_iflist_xcnt--; \
} while (0)
+#define IF_BRIDGE_DEBUG(f) FALSE
+
#endif /* BRIDGE_DEBUG */
#if NBPFILTER > 0
-#define BRIDGE_BPF_MTAP_INPUT(sc, m) \
- if (sc->sc_bpf_input) \
- bridge_bpf_input(sc->sc_ifp, m)
+#define BRIDGE_BPF_MTAP_INPUT(sc, m) \
+ if (sc->sc_bpf_input != NULL) \
+ bridge_bpf_input(sc->sc_ifp, m, __func__, __LINE__)
#else /* NBPFILTER */
-#define BRIDGE_BPF_MTAP_INPUT(ifp, m)
+#define BRIDGE_BPF_MTAP_INPUT(ifp, m)
#endif /* NBPFILTER */
/*
* Initial size of the route hash table. Must be a power of two.
*/
#ifndef BRIDGE_RTHASH_SIZE
-#define BRIDGE_RTHASH_SIZE 16
+#define BRIDGE_RTHASH_SIZE 16
#endif
/*
* Maximum size of the routing hash table
*/
-#define BRIDGE_RTHASH_SIZE_MAX 2048
+#define BRIDGE_RTHASH_SIZE_MAX 2048
-#define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
+#define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1)
/*
* Maximum number of addresses to cache.
*/
#ifndef BRIDGE_RTABLE_MAX
-#define BRIDGE_RTABLE_MAX 100
+#define BRIDGE_RTABLE_MAX 100
#endif
* Timeout (in seconds) for entries learned dynamically.
*/
#ifndef BRIDGE_RTABLE_TIMEOUT
-#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
+#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
#endif
/*
* Number of seconds between walks of the route list.
*/
#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
-#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
+#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
#endif
+/*
+ * Number of MAC NAT entries
+ * - sized based on 16 clients (including MAC NAT interface)
+ * each with 4 addresses
+ */
+#ifndef BRIDGE_MAC_NAT_ENTRY_MAX
+#define BRIDGE_MAC_NAT_ENTRY_MAX 64
+#endif /* BRIDGE_MAC_NAT_ENTRY_MAX */
+
/*
* List of capabilities to possibly mask on the member interface.
*/
-#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
+#define BRIDGE_IFCAPS_MASK (IFCAP_TSO | IFCAP_TXCSUM)
/*
* List of capabilities to disable on the member interface.
*/
-#define BRIDGE_IFCAPS_STRIP IFCAP_LRO
+#define BRIDGE_IFCAPS_STRIP IFCAP_LRO
/*
* Bridge interface list entry.
*/
struct bridge_iflist {
TAILQ_ENTRY(bridge_iflist) bif_next;
- struct ifnet *bif_ifp; /* member if */
- struct bstp_port bif_stp; /* STP state */
- uint32_t bif_ifflags; /* member if flags */
- int bif_savedcaps; /* saved capabilities */
- uint32_t bif_addrmax; /* max # of addresses */
- uint32_t bif_addrcnt; /* cur. # of addresses */
- uint32_t bif_addrexceeded; /* # of address violations */
-
- interface_filter_t bif_iff_ref;
- struct bridge_softc *bif_sc;
- uint32_t bif_flags;
-
- struct in_addr bif_hf_ipsrc;
- uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN];
+ struct ifnet *bif_ifp; /* member if */
+ struct bstp_port bif_stp; /* STP state */
+ uint32_t bif_ifflags; /* member if flags */
+ int bif_savedcaps; /* saved capabilities */
+ uint32_t bif_addrmax; /* max # of addresses */
+ uint32_t bif_addrcnt; /* cur. # of addresses */
+ uint32_t bif_addrexceeded; /* # of address violations */
+
+ interface_filter_t bif_iff_ref;
+ struct bridge_softc *bif_sc;
+ uint32_t bif_flags;
+
+ /* host filter */
+ struct in_addr bif_hf_ipsrc;
+ uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN];
+};
+
+#define BIFF_PROMISC 0x01 /* promiscuous mode set */
+#define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
+#define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
+#define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
+#define BIFF_HOST_FILTER 0x10 /* host filter enabled */
+#define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
+#define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
+#define BIFF_INPUT_BROADCAST 0x80 /* send broadcast packets in */
+
+/*
+ * mac_nat_entry
+ * - translates between an IP address and MAC address on a specific
+ * bridge interface member
+ */
+struct mac_nat_entry {
+ LIST_ENTRY(mac_nat_entry) mne_list; /* list linkage */
+ struct bridge_iflist *mne_bif; /* originating interface */
+ unsigned long mne_expire; /* expiration time */
+ union {
+ struct in_addr mneu_ip; /* originating IPv4 address */
+ struct in6_addr mneu_ip6; /* originating IPv6 address */
+ } mne_u;
+ uint8_t mne_mac[ETHER_ADDR_LEN];
+ uint8_t mne_flags;
+ uint8_t mne_reserved;
+};
+#define mne_ip mne_u.mneu_ip
+#define mne_ip6 mne_u.mneu_ip6
+
+#define MNE_FLAGS_IPV6 0x01 /* IPv6 address */
+
+LIST_HEAD(mac_nat_entry_list, mac_nat_entry);
+
+/*
+ * mac_nat_record
+ * - used by bridge_mac_nat_output() to convey the translation that needs
+ * to take place in bridge_mac_nat_translate
+ * - holds enough information so that the translation can be done later without
+ * holding the bridge lock
+ */
+struct mac_nat_record {
+ uint16_t mnr_ether_type;
+ union {
+ uint16_t mnru_arp_offset;
+ struct {
+ uint16_t mnruip_dhcp_flags;
+ uint16_t mnruip_udp_csum;
+ uint8_t mnruip_header_len;
+ } mnru_ip;
+ struct {
+ uint16_t mnruip6_icmp6_len;
+ uint16_t mnruip6_lladdr_offset;
+ uint8_t mnruip6_icmp6_type;
+ uint8_t mnruip6_header_len;
+ } mnru_ip6;
+ } mnr_u;
};
-#define BIFF_PROMISC 0x01 /* promiscuous mode set */
-#define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */
-#define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */
-#define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */
-#define BIFF_HOST_FILTER 0x10 /* host filter enabled */
-#define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */
-#define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */
+#define mnr_arp_offset mnr_u.mnru_arp_offset
+
+#define mnr_ip_header_len mnr_u.mnru_ip.mnruip_header_len
+#define mnr_ip_dhcp_flags mnr_u.mnru_ip.mnruip_dhcp_flags
+#define mnr_ip_udp_csum mnr_u.mnru_ip.mnruip_udp_csum
+
+#define mnr_ip6_icmp6_len mnr_u.mnru_ip6.mnruip6_icmp6_len
+#define mnr_ip6_icmp6_type mnr_u.mnru_ip6.mnruip6_icmp6_type
+#define mnr_ip6_header_len mnr_u.mnru_ip6.mnruip6_header_len
+#define mnr_ip6_lladdr_offset mnr_u.mnru_ip6.mnruip6_lladdr_offset
/*
* Bridge route node.
*/
struct bridge_rtnode {
- LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
- LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
- struct bridge_iflist *brt_dst; /* destination if */
- unsigned long brt_expire; /* expiration time */
- uint8_t brt_flags; /* address flags */
- uint8_t brt_addr[ETHER_ADDR_LEN];
- uint16_t brt_vlan; /* vlan id */
+ LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
+ LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
+ struct bridge_iflist *brt_dst; /* destination if */
+ unsigned long brt_expire; /* expiration time */
+ uint8_t brt_flags; /* address flags */
+ uint8_t brt_addr[ETHER_ADDR_LEN];
+ uint16_t brt_vlan; /* vlan id */
};
-#define brt_ifp brt_dst->bif_ifp
+#define brt_ifp brt_dst->bif_ifp
/*
* Bridge delayed function call context
typedef void (*bridge_delayed_func_t)(struct bridge_softc *);
struct bridge_delayed_call {
- struct bridge_softc *bdc_sc;
- bridge_delayed_func_t bdc_func; /* Function to call */
- struct timespec bdc_ts; /* Time to call */
- u_int32_t bdc_flags;
- thread_call_t bdc_thread_call;
+ struct bridge_softc *bdc_sc;
+ bridge_delayed_func_t bdc_func; /* Function to call */
+ struct timespec bdc_ts; /* Time to call */
+ u_int32_t bdc_flags;
+ thread_call_t bdc_thread_call;
};
-#define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
-#define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
-
+#define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */
+#define BDCF_CANCELLING 0x02 /* May be waiting for call completion */
/*
* Software state for each bridge.
*/
LIST_HEAD(_bridge_rtnode_list, bridge_rtnode);
-typedef struct {
- struct _bridge_rtnode_list *bb_rthash; /* our forwarding table */
- struct _bridge_rtnode_list bb_rtlist; /* list version of above */
- uint32_t bb_rthash_key; /* key for hash */
- uint32_t bb_rthash_size; /* size of the hash table */
- struct bridge_delayed_call bb_aging_timer;
- struct bridge_delayed_call bb_resize_call;
- TAILQ_HEAD(, bridge_iflist) bb_spanlist; /* span ports list */
- struct bstp_state bb_stp; /* STP state */
- bpf_packet_func bb_bpf_input;
- bpf_packet_func bb_bpf_output;
-} bridge_bsd, *bridge_bsd_t;
-
-#define sc_rthash sc_u.scu_bsd.bb_rthash
-#define sc_rtlist sc_u.scu_bsd.bb_rtlist
-#define sc_rthash_key sc_u.scu_bsd.bb_rthash_key
-#define sc_rthash_size sc_u.scu_bsd.bb_rthash_size
-#define sc_aging_timer sc_u.scu_bsd.bb_aging_timer
-#define sc_resize_call sc_u.scu_bsd.bb_resize_call
-#define sc_spanlist sc_u.scu_bsd.bb_spanlist
-#define sc_stp sc_u.scu_bsd.bb_stp
-#define sc_bpf_input sc_u.scu_bsd.bb_bpf_input
-#define sc_bpf_output sc_u.scu_bsd.bb_bpf_output
-
struct bridge_softc {
- struct ifnet *sc_ifp; /* make this an interface */
- u_int32_t sc_flags;
- union {
- bridge_bsd scu_bsd;
- } sc_u;
+ struct ifnet *sc_ifp; /* make this an interface */
+ u_int32_t sc_flags;
LIST_ENTRY(bridge_softc) sc_list;
- decl_lck_mtx_data(, sc_mtx);
- void *sc_cv;
- uint32_t sc_brtmax; /* max # of addresses */
- uint32_t sc_brtcnt; /* cur. # of addresses */
- uint32_t sc_brttimeout; /* rt timeout in seconds */
- uint32_t sc_iflist_ref; /* refcount for sc_iflist */
- uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
- TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
- uint32_t sc_brtexceeded; /* # of cache drops */
- uint32_t sc_filter_flags; /* ipf and flags */
- struct ifnet *sc_ifaddr; /* member mac copied from */
- u_char sc_defaddr[6]; /* Default MAC address */
- char sc_if_xname[IFNAMSIZ];
-
+ decl_lck_mtx_data(, sc_mtx);
+ struct _bridge_rtnode_list *sc_rthash; /* our forwarding table */
+ struct _bridge_rtnode_list sc_rtlist; /* list version of above */
+ uint32_t sc_rthash_key; /* key for hash */
+ uint32_t sc_rthash_size; /* size of the hash table */
+ struct bridge_delayed_call sc_aging_timer;
+ struct bridge_delayed_call sc_resize_call;
+ TAILQ_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
+ struct bstp_state sc_stp; /* STP state */
+ bpf_packet_func sc_bpf_input;
+ bpf_packet_func sc_bpf_output;
+ void *sc_cv;
+ uint32_t sc_brtmax; /* max # of addresses */
+ uint32_t sc_brtcnt; /* cur. # of addresses */
+ uint32_t sc_brttimeout; /* rt timeout in seconds */
+ uint32_t sc_iflist_ref; /* refcount for sc_iflist */
+ uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */
+ TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
+ uint32_t sc_brtexceeded; /* # of cache drops */
+ uint32_t sc_filter_flags; /* ipf and flags */
+ struct ifnet *sc_ifaddr; /* member mac copied from */
+ u_char sc_defaddr[6]; /* Default MAC address */
+ char sc_if_xname[IFNAMSIZ];
+
+ struct bridge_iflist *sc_mac_nat_bif; /* single MAC NAT interface */
+ struct mac_nat_entry_list sc_mne_list; /* MAC NAT IPv4 */
+ struct mac_nat_entry_list sc_mne_list_v6;/* MAC NAT IPv6 */
+ uint32_t sc_mne_max; /* max # of entries */
+ uint32_t sc_mne_count; /* cur. # of entries */
+ uint32_t sc_mne_allocation_failures;
#if BRIDGE_DEBUG
/*
* Locking and unlocking calling history
*/
- void *lock_lr[BR_LCKDBG_MAX];
- int next_lock_lr;
- void *unlock_lr[BR_LCKDBG_MAX];
- int next_unlock_lr;
+ void *lock_lr[BR_LCKDBG_MAX];
+ int next_lock_lr;
+ void *unlock_lr[BR_LCKDBG_MAX];
+ int next_unlock_lr;
#endif /* BRIDGE_DEBUG */
};
-#define SCF_DETACHING 0x01
-#define SCF_RESIZING 0x02
-#define SCF_MEDIA_ACTIVE 0x04
-#define SCF_BSD_MODE 0x08
+#define SCF_DETACHING 0x01
+#define SCF_RESIZING 0x02
+#define SCF_MEDIA_ACTIVE 0x04
-static inline void
-bridge_set_bsd_mode(struct bridge_softc * sc)
-{
- sc->sc_flags |= SCF_BSD_MODE;
-}
-
-static inline boolean_t
-bridge_in_bsd_mode(const struct bridge_softc * sc)
-{
- return ((sc->sc_flags & SCF_BSD_MODE) != 0);
-}
+typedef enum {
+ kChecksumOperationNone = 0,
+ kChecksumOperationClear = 1,
+ kChecksumOperationFinalize = 2,
+ kChecksumOperationCompute = 3,
+} ChecksumOperation;
struct bridge_hostfilter_stats bridge_hostfilter_stats;
decl_lck_mtx_data(static, bridge_list_mtx);
-static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
+static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
-static zone_t bridge_rtnode_pool = NULL;
+static ZONE_DECLARE(bridge_rtnode_pool, "bridge_rtnode",
+ sizeof(struct bridge_rtnode), ZC_NONE);
+static ZONE_DECLARE(bridge_mne_pool, "bridge_mac_nat_entry",
+ sizeof(struct mac_nat_entry), ZC_NONE);
-static int bridge_clone_create(struct if_clone *, uint32_t, void *);
-static int bridge_clone_destroy(struct ifnet *);
+static int bridge_clone_create(struct if_clone *, uint32_t, void *);
+static int bridge_clone_destroy(struct ifnet *);
-static errno_t bridge_ioctl(struct ifnet *, u_long, void *);
+static errno_t bridge_ioctl(struct ifnet *, u_long, void *);
#if HAS_IF_CAP
-static void bridge_mutecaps(struct bridge_softc *);
-static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
- int);
+static void bridge_mutecaps(struct bridge_softc *);
+static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
+ int);
#endif
static errno_t bridge_set_tso(struct bridge_softc *);
-__private_extern__ void bridge_ifdetach(struct bridge_iflist *, struct ifnet *);
-static int bridge_init(struct ifnet *);
+static void bridge_ifdetach(struct ifnet *);
+static void bridge_proto_attach_changed(struct ifnet *);
+static int bridge_init(struct ifnet *);
#if HAS_BRIDGE_DUMMYNET
-static void bridge_dummynet(struct mbuf *, struct ifnet *);
-#endif
-static void bridge_ifstop(struct ifnet *, int);
-static int bridge_output(struct ifnet *, struct mbuf *);
-static void bridge_finalize_cksum(struct ifnet *, struct mbuf *);
-static void bridge_start(struct ifnet *);
-__private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *);
-#if BRIDGE_MEMBER_OUT_FILTER
-static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t,
- mbuf_t *);
-static int bridge_member_output(struct ifnet *, struct mbuf *,
- struct sockaddr *, struct rtentry *);
+static void bridge_dummynet(struct mbuf *, struct ifnet *);
#endif
-static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
- struct mbuf *);
-static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
-
-static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
- struct mbuf *);
-
-static void bridge_aging_timer(struct bridge_softc *sc);
-
-static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
- struct mbuf *, int);
-static void bridge_span(struct bridge_softc *, struct mbuf *);
-
-static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
- uint16_t, struct bridge_iflist *, int, uint8_t);
+static void bridge_ifstop(struct ifnet *, int);
+static int bridge_output(struct ifnet *, struct mbuf *);
+static void bridge_finalize_cksum(struct ifnet *, struct mbuf *);
+static void bridge_start(struct ifnet *);
+static errno_t bridge_input(struct ifnet *, mbuf_t *);
+static errno_t bridge_iff_input(void *, ifnet_t, protocol_family_t,
+ mbuf_t *, char **);
+static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t,
+ mbuf_t *);
+static errno_t bridge_member_output(struct bridge_softc *sc, ifnet_t ifp,
+ mbuf_t *m);
+
+static int bridge_enqueue(ifnet_t, struct ifnet *,
+ struct ifnet *, struct mbuf *, ChecksumOperation);
+static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
+
+static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
+ struct mbuf *);
+
+static void bridge_aging_timer(struct bridge_softc *sc);
+
+static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
+ struct mbuf *, int);
+static void bridge_span(struct bridge_softc *, struct mbuf *);
+
+static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
+ uint16_t, struct bridge_iflist *, int, uint8_t);
static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
- uint16_t);
-static void bridge_rttrim(struct bridge_softc *);
-static void bridge_rtage(struct bridge_softc *);
-static void bridge_rtflush(struct bridge_softc *, int);
-static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
- uint16_t);
+ uint16_t);
+static void bridge_rttrim(struct bridge_softc *);
+static void bridge_rtage(struct bridge_softc *);
+static void bridge_rtflush(struct bridge_softc *, int);
+static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
+ uint16_t);
-static int bridge_rtable_init(struct bridge_softc *);
-static void bridge_rtable_fini(struct bridge_softc *);
+static int bridge_rtable_init(struct bridge_softc *);
+static void bridge_rtable_fini(struct bridge_softc *);
-static void bridge_rthash_resize(struct bridge_softc *);
+static void bridge_rthash_resize(struct bridge_softc *);
-static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
+static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
- const uint8_t *, uint16_t);
-static int bridge_rtnode_hash(struct bridge_softc *,
- struct bridge_rtnode *);
-static int bridge_rtnode_insert(struct bridge_softc *,
- struct bridge_rtnode *);
-static void bridge_rtnode_destroy(struct bridge_softc *,
- struct bridge_rtnode *);
+ const uint8_t *, uint16_t);
+static int bridge_rtnode_hash(struct bridge_softc *,
+ struct bridge_rtnode *);
+static int bridge_rtnode_insert(struct bridge_softc *,
+ struct bridge_rtnode *);
+static void bridge_rtnode_destroy(struct bridge_softc *,
+ struct bridge_rtnode *);
#if BRIDGESTP
-static void bridge_rtable_expire(struct ifnet *, int);
-static void bridge_state_change(struct ifnet *, int);
+static void bridge_rtable_expire(struct ifnet *, int);
+static void bridge_state_change(struct ifnet *, int);
#endif /* BRIDGESTP */
static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
- const char *name);
+ const char *name);
static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
- struct ifnet *ifp);
-static void bridge_delete_member(struct bridge_softc *,
- struct bridge_iflist *, int);
-static void bridge_delete_span(struct bridge_softc *,
- struct bridge_iflist *);
-
-static int bridge_ioctl_add(struct bridge_softc *, void *);
-static int bridge_ioctl_del(struct bridge_softc *, void *);
-static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
-static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
-static int bridge_ioctl_scache(struct bridge_softc *, void *);
-static int bridge_ioctl_gcache(struct bridge_softc *, void *);
-static int bridge_ioctl_gifs32(struct bridge_softc *, void *);
-static int bridge_ioctl_gifs64(struct bridge_softc *, void *);
-static int bridge_ioctl_rts32(struct bridge_softc *, void *);
-static int bridge_ioctl_rts64(struct bridge_softc *, void *);
-static int bridge_ioctl_saddr32(struct bridge_softc *, void *);
-static int bridge_ioctl_saddr64(struct bridge_softc *, void *);
-static int bridge_ioctl_sto(struct bridge_softc *, void *);
-static int bridge_ioctl_gto(struct bridge_softc *, void *);
-static int bridge_ioctl_daddr32(struct bridge_softc *, void *);
-static int bridge_ioctl_daddr64(struct bridge_softc *, void *);
-static int bridge_ioctl_flush(struct bridge_softc *, void *);
-static int bridge_ioctl_gpri(struct bridge_softc *, void *);
-static int bridge_ioctl_spri(struct bridge_softc *, void *);
-static int bridge_ioctl_ght(struct bridge_softc *, void *);
-static int bridge_ioctl_sht(struct bridge_softc *, void *);
-static int bridge_ioctl_gfd(struct bridge_softc *, void *);
-static int bridge_ioctl_sfd(struct bridge_softc *, void *);
-static int bridge_ioctl_gma(struct bridge_softc *, void *);
-static int bridge_ioctl_sma(struct bridge_softc *, void *);
-static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
-static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
-static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
-static int bridge_ioctl_addspan(struct bridge_softc *, void *);
-static int bridge_ioctl_delspan(struct bridge_softc *, void *);
-static int bridge_ioctl_gbparam32(struct bridge_softc *, void *);
-static int bridge_ioctl_gbparam64(struct bridge_softc *, void *);
-static int bridge_ioctl_grte(struct bridge_softc *, void *);
-static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
-static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
-static int bridge_ioctl_sproto(struct bridge_softc *, void *);
-static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
-static int bridge_ioctl_purge(struct bridge_softc *sc, void *);
-static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
-static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
-static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *);
-static int bridge_ioctl_shostfilter(struct bridge_softc *, void *);
-#ifdef PFIL_HOOKS
-static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
- int);
-static int bridge_ip_checkbasic(struct mbuf **);
-#ifdef INET6
-static int bridge_ip6_checkbasic(struct mbuf **);
-#endif /* INET6 */
-static int bridge_fragment(struct ifnet *, struct mbuf *,
- struct ether_header *, int, struct llc *);
-#endif /* PFIL_HOOKS */
+ struct ifnet *ifp);
+static void bridge_delete_member(struct bridge_softc *,
+ struct bridge_iflist *, int);
+static void bridge_delete_span(struct bridge_softc *,
+ struct bridge_iflist *);
+
+static int bridge_ioctl_add(struct bridge_softc *, void *);
+static int bridge_ioctl_del(struct bridge_softc *, void *);
+static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
+static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
+static int bridge_ioctl_scache(struct bridge_softc *, void *);
+static int bridge_ioctl_gcache(struct bridge_softc *, void *);
+static int bridge_ioctl_gifs32(struct bridge_softc *, void *);
+static int bridge_ioctl_gifs64(struct bridge_softc *, void *);
+static int bridge_ioctl_rts32(struct bridge_softc *, void *);
+static int bridge_ioctl_rts64(struct bridge_softc *, void *);
+static int bridge_ioctl_saddr32(struct bridge_softc *, void *);
+static int bridge_ioctl_saddr64(struct bridge_softc *, void *);
+static int bridge_ioctl_sto(struct bridge_softc *, void *);
+static int bridge_ioctl_gto(struct bridge_softc *, void *);
+static int bridge_ioctl_daddr32(struct bridge_softc *, void *);
+static int bridge_ioctl_daddr64(struct bridge_softc *, void *);
+static int bridge_ioctl_flush(struct bridge_softc *, void *);
+static int bridge_ioctl_gpri(struct bridge_softc *, void *);
+static int bridge_ioctl_spri(struct bridge_softc *, void *);
+static int bridge_ioctl_ght(struct bridge_softc *, void *);
+static int bridge_ioctl_sht(struct bridge_softc *, void *);
+static int bridge_ioctl_gfd(struct bridge_softc *, void *);
+static int bridge_ioctl_sfd(struct bridge_softc *, void *);
+static int bridge_ioctl_gma(struct bridge_softc *, void *);
+static int bridge_ioctl_sma(struct bridge_softc *, void *);
+static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
+static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
+static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
+static int bridge_ioctl_addspan(struct bridge_softc *, void *);
+static int bridge_ioctl_delspan(struct bridge_softc *, void *);
+static int bridge_ioctl_gbparam32(struct bridge_softc *, void *);
+static int bridge_ioctl_gbparam64(struct bridge_softc *, void *);
+static int bridge_ioctl_grte(struct bridge_softc *, void *);
+static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
+static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
+static int bridge_ioctl_sproto(struct bridge_softc *, void *);
+static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
+static int bridge_ioctl_purge(struct bridge_softc *sc, void *);
+static int bridge_ioctl_gfilt(struct bridge_softc *, void *);
+static int bridge_ioctl_sfilt(struct bridge_softc *, void *);
+static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *);
+static int bridge_ioctl_shostfilter(struct bridge_softc *, void *);
+static int bridge_ioctl_gmnelist32(struct bridge_softc *, void *);
+static int bridge_ioctl_gmnelist64(struct bridge_softc *, void *);
+
+static int bridge_pf(struct mbuf **, struct ifnet *, uint32_t sc_filter_flags, int input);
+static int bridge_ip_checkbasic(struct mbuf **);
+static int bridge_ip6_checkbasic(struct mbuf **);
static errno_t bridge_set_bpf_tap(ifnet_t, bpf_tap_mode, bpf_packet_func);
-__private_extern__ errno_t bridge_bpf_input(ifnet_t, struct mbuf *);
-__private_extern__ errno_t bridge_bpf_output(ifnet_t, struct mbuf *);
+static errno_t bridge_bpf_input(ifnet_t, struct mbuf *, const char *, int);
+static errno_t bridge_bpf_output(ifnet_t, struct mbuf *);
static void bridge_detach(ifnet_t);
static void bridge_link_event(struct ifnet *, u_int32_t);
static void bridge_schedule_delayed_call(struct bridge_delayed_call *);
static void bridge_cancel_delayed_call(struct bridge_delayed_call *);
static void bridge_cleanup_delayed_call(struct bridge_delayed_call *);
-static int bridge_host_filter(struct bridge_iflist *, struct mbuf *);
+static int bridge_host_filter(struct bridge_iflist *, mbuf_t *);
+
+static errno_t bridge_mac_nat_enable(struct bridge_softc *,
+ struct bridge_iflist *);
+static void bridge_mac_nat_disable(struct bridge_softc *sc);
+static void bridge_mac_nat_age_entries(struct bridge_softc *sc, unsigned long);
+static void bridge_mac_nat_populate_entries(struct bridge_softc *sc);
+static void bridge_mac_nat_flush_entries(struct bridge_softc *sc,
+ struct bridge_iflist *);
+static ifnet_t bridge_mac_nat_input(struct bridge_softc *, mbuf_t *,
+ boolean_t *);
+static boolean_t bridge_mac_nat_output(struct bridge_softc *,
+ struct bridge_iflist *, mbuf_t *, struct mac_nat_record *);
+static void bridge_mac_nat_translate(mbuf_t *, struct mac_nat_record *,
+ const caddr_t);
+static boolean_t is_broadcast_ip_packet(mbuf_t *);
+
+#define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
+static int
+gso_ipv4_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen,
+ boolean_t is_tx);
-#define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
+static int
+gso_ipv6_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen,
+ boolean_t is_tx);
/* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
-#define VLANTAGOF(_m) 0
+#define VLANTAGOF(_m) 0
u_int8_t bstp_etheraddr[ETHER_ADDR_LEN] =
- { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
static u_int8_t ethernulladdr[ETHER_ADDR_LEN] =
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
#if BRIDGESTP
static struct bstp_cb_ops bridge_ops = {
#endif /* BRIDGESTP */
SYSCTL_DECL(_net_link);
-SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
- "Bridge");
+SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
+ "Bridge");
static int bridge_inherit_mac = 0; /* share MAC with first bridge member */
SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
- CTLFLAG_RW|CTLFLAG_LOCKED,
- &bridge_inherit_mac, 0,
- "Inherit MAC address from the first bridge member");
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &bridge_inherit_mac, 0,
+ "Inherit MAC address from the first bridge member");
SYSCTL_INT(_net_link_bridge, OID_AUTO, rtable_prune_period,
- CTLFLAG_RW|CTLFLAG_LOCKED,
- &bridge_rtable_prune_period, 0,
- "Interval between pruning of routing table");
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &bridge_rtable_prune_period, 0,
+ "Interval between pruning of routing table");
static unsigned int bridge_rtable_hash_size_max = BRIDGE_RTHASH_SIZE_MAX;
SYSCTL_UINT(_net_link_bridge, OID_AUTO, rtable_hash_size_max,
- CTLFLAG_RW|CTLFLAG_LOCKED,
- &bridge_rtable_hash_size_max, 0,
- "Maximum size of the routing hash table");
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &bridge_rtable_hash_size_max, 0,
+ "Maximum size of the routing hash table");
#if BRIDGE_DEBUG_DELAYED_CALLBACK
static int bridge_delayed_callback_delay = 0;
SYSCTL_INT(_net_link_bridge, OID_AUTO, delayed_callback_delay,
- CTLFLAG_RW|CTLFLAG_LOCKED,
- &bridge_delayed_callback_delay, 0,
- "Delay before calling delayed function");
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &bridge_delayed_callback_delay, 0,
+ "Delay before calling delayed function");
#endif
-static int bridge_bsd_mode = 1;
-#if (DEVELOPMENT || DEBUG)
-SYSCTL_INT(_net_link_bridge, OID_AUTO, bsd_mode,
- CTLFLAG_RW|CTLFLAG_LOCKED,
- &bridge_bsd_mode, 0,
- "Bridge using bsd mode");
-#endif /* (DEVELOPMENT || DEBUG) */
-
SYSCTL_STRUCT(_net_link_bridge, OID_AUTO,
- hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED,
- &bridge_hostfilter_stats, bridge_hostfilter_stats, "");
-
-#if defined(PFIL_HOOKS)
-static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
-static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
-static int pfil_member = 1; /* run pfil hooks on the member interface */
-static int pfil_ipfw = 0; /* layer2 filter with ipfw */
-static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */
-static int pfil_local_phys = 0; /* run pfil hooks on the physical interface */
- /* for locally destined packets */
-SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW|CTLFLAG_LOCKED,
- &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
-SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW|CTLFLAG_LOCKED,
- &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
-SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW|CTLFLAG_LOCKED,
- &pfil_bridge, 0, "Packet filter on the bridge interface");
-SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW|CTLFLAG_LOCKED,
- &pfil_member, 0, "Packet filter on the member interface");
-SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
- CTLFLAG_RW|CTLFLAG_LOCKED, &pfil_local_phys, 0,
- "Packet filter on the physical interface for locally destined packets");
-#endif /* PFIL_HOOKS */
+ hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &bridge_hostfilter_stats, bridge_hostfilter_stats, "");
+
#if BRIDGESTP
static int log_stp = 0; /* log STP state changes */
SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
- &log_stp, 0, "Log STP state changes");
+ &log_stp, 0, "Log STP state changes");
#endif /* BRIDGESTP */
struct bridge_control {
- int (*bc_func)(struct bridge_softc *, void *);
- unsigned int bc_argsize;
- unsigned int bc_flags;
+ int (*bc_func)(struct bridge_softc *, void *);
+ unsigned int bc_argsize;
+ unsigned int bc_flags;
};
-#define BC_F_COPYIN 0x01 /* copy arguments in */
-#define BC_F_COPYOUT 0x02 /* copy arguments out */
-#define BC_F_SUSER 0x04 /* do super-user check */
+#define BC_F_COPYIN 0x01 /* copy arguments in */
+#define BC_F_COPYOUT 0x02 /* copy arguments out */
+#define BC_F_SUSER 0x04 /* do super-user check */
static const struct bridge_control bridge_control_table32[] = {
- { bridge_ioctl_add, sizeof (struct ifbreq), /* 0 */
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_del, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_add, .bc_argsize = sizeof(struct ifbreq), /* 0 */
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_del, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gifflags, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_sifflags, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gifflags, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sifflags, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_scache, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gcache, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_scache, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gcache, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_gifs32, sizeof (struct ifbifconf32),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_rts32, sizeof (struct ifbaconf32),
- BC_F_COPYIN|BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_gifs32, .bc_argsize = sizeof(struct ifbifconf32),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_rts32, .bc_argsize = sizeof(struct ifbaconf32),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
- { bridge_ioctl_saddr32, sizeof (struct ifbareq32),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_saddr32, .bc_argsize = sizeof(struct ifbareq32),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sto, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gto, sizeof (struct ifbrparam), /* 10 */
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sto, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gto, .bc_argsize = sizeof(struct ifbrparam), /* 10 */
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_daddr32, sizeof (struct ifbareq32),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_daddr32, .bc_argsize = sizeof(struct ifbareq32),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_flush, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_flush, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gpri, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_spri, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gpri, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_spri, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_ght, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sht, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_ght, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sht, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gfd, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sfd, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gfd, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sfd, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gma, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sma, sizeof (struct ifbrparam), /* 20 */
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gma, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sma, .bc_argsize = sizeof(struct ifbrparam), /* 20 */
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sifprio, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sifprio, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sifcost, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sifcost, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gfilt, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sfilt, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gfilt, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sfilt, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_purge, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_purge, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_addspan, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_delspan, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_addspan, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_delspan, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gbparam32, sizeof (struct ifbropreq32),
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_gbparam32, .bc_argsize = sizeof(struct ifbropreq32),
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_grte, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_grte, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_gifsstp32, sizeof (struct ifbpstpconf32), /* 30 */
- BC_F_COPYIN|BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_gifsstp32, .bc_argsize = sizeof(struct ifbpstpconf32), /* 30 */
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
- { bridge_ioctl_sproto, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sproto, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_stxhc, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_stxhc, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sifmaxaddr, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sifmaxaddr, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_ghostfilter, sizeof (struct ifbrhostfilter),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_shostfilter, sizeof (struct ifbrhostfilter),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_ghostfilter, .bc_argsize = sizeof(struct ifbrhostfilter),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_shostfilter, .bc_argsize = sizeof(struct ifbrhostfilter),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+
+ { .bc_func = bridge_ioctl_gmnelist32, .bc_argsize = sizeof(struct ifbrmnelist32),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
};
static const struct bridge_control bridge_control_table64[] = {
- { bridge_ioctl_add, sizeof (struct ifbreq), /* 0 */
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_del, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_add, .bc_argsize = sizeof(struct ifbreq), /* 0 */
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_del, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+
+ { .bc_func = bridge_ioctl_gifflags, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sifflags, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gifflags, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_sifflags, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_scache, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gcache, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_scache, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gcache, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_gifs64, .bc_argsize = sizeof(struct ifbifconf64),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_rts64, .bc_argsize = sizeof(struct ifbaconf64),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
- { bridge_ioctl_gifs64, sizeof (struct ifbifconf64),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_rts64, sizeof (struct ifbaconf64),
- BC_F_COPYIN|BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_saddr64, .bc_argsize = sizeof(struct ifbareq64),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_saddr64, sizeof (struct ifbareq64),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sto, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gto, .bc_argsize = sizeof(struct ifbrparam), /* 10 */
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_sto, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_gto, sizeof (struct ifbrparam), /* 10 */
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_daddr64, .bc_argsize = sizeof(struct ifbareq64),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_daddr64, sizeof (struct ifbareq64),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_flush, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_flush, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gpri, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_spri, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gpri, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_spri, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_ght, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sht, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_ght, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sht, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gfd, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sfd, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gfd, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sfd, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gma, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sma, .bc_argsize = sizeof(struct ifbrparam), /* 20 */
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gma, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sma, sizeof (struct ifbrparam), /* 20 */
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sifprio, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sifprio, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sifcost, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sifcost, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gfilt, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sfilt, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_gfilt, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
- { bridge_ioctl_sfilt, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_purge, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_purge, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_addspan, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
+ { .bc_func = bridge_ioctl_delspan, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_addspan, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
- { bridge_ioctl_delspan, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gbparam64, .bc_argsize = sizeof(struct ifbropreq64),
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_gbparam64, sizeof (struct ifbropreq64),
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_grte, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYOUT },
- { bridge_ioctl_grte, sizeof (struct ifbrparam),
- BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_gifsstp64, .bc_argsize = sizeof(struct ifbpstpconf64), /* 30 */
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
- { bridge_ioctl_gifsstp64, sizeof (struct ifbpstpconf64), /* 30 */
- BC_F_COPYIN|BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_sproto, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sproto, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_stxhc, .bc_argsize = sizeof(struct ifbrparam),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_stxhc, sizeof (struct ifbrparam),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_sifmaxaddr, .bc_argsize = sizeof(struct ifbreq),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_sifmaxaddr, sizeof (struct ifbreq),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_ghostfilter, .bc_argsize = sizeof(struct ifbrhostfilter),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
+ { .bc_func = bridge_ioctl_shostfilter, .bc_argsize = sizeof(struct ifbrhostfilter),
+ .bc_flags = BC_F_COPYIN | BC_F_SUSER },
- { bridge_ioctl_ghostfilter, sizeof (struct ifbrhostfilter),
- BC_F_COPYIN|BC_F_COPYOUT },
- { bridge_ioctl_shostfilter, sizeof (struct ifbrhostfilter),
- BC_F_COPYIN|BC_F_SUSER },
+ { .bc_func = bridge_ioctl_gmnelist64, .bc_argsize = sizeof(struct ifbrmnelist64),
+ .bc_flags = BC_F_COPYIN | BC_F_COPYOUT },
};
static const unsigned int bridge_control_table_size =
- sizeof (bridge_control_table32) / sizeof (bridge_control_table32[0]);
+ sizeof(bridge_control_table32) / sizeof(bridge_control_table32[0]);
static LIST_HEAD(, bridge_softc) bridge_list =
- LIST_HEAD_INITIALIZER(bridge_list);
+ LIST_HEAD_INITIALIZER(bridge_list);
static lck_grp_t *bridge_lock_grp = NULL;
static lck_attr_t *bridge_lock_attr = NULL;
-static if_clone_t bridge_cloner = NULL;
+#define BRIDGENAME "bridge"
+#define BRIDGES_MAX IF_MAXUNIT
+#define BRIDGE_ZONE_MAX_ELEM MIN(IFNETS_MAX, BRIDGES_MAX)
+
+static struct if_clone bridge_cloner =
+ IF_CLONE_INITIALIZER(BRIDGENAME, bridge_clone_create, bridge_clone_destroy,
+ 0, BRIDGES_MAX, BRIDGE_ZONE_MAX_ELEM, sizeof(struct bridge_softc));
static int if_bridge_txstart = 0;
SYSCTL_INT(_net_link_bridge, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED,
- &if_bridge_txstart, 0, "Bridge interface uses TXSTART model");
+ &if_bridge_txstart, 0, "Bridge interface uses TXSTART model");
#if BRIDGE_DEBUG
static int if_bridge_debug = 0;
SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
- &if_bridge_debug, 0, "Bridge debug");
+ &if_bridge_debug, 0, "Bridge debug");
+
+static int if_bridge_segmentation = 1;
+SYSCTL_INT(_net_link_bridge, OID_AUTO, segmentation,
+ CTLFLAG_RW | CTLFLAG_LOCKED,
+ &if_bridge_segmentation, 0, "Bridge interface enable segmentation");
static void printf_ether_header(struct ether_header *);
static void printf_mbuf_data(mbuf_t, size_t, size_t);
_BRIDGE_LOCK(sc);
sc->lock_lr[sc->next_lock_lr] = lr_saved;
- sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX;
}
static void
BRIDGE_LOCK_ASSERT_HELD(sc);
sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
- sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
_BRIDGE_UNLOCK(sc);
}
BRIDGE_LOCK_ASSERT_HELD(sc);
- if (sc->sc_iflist_xcnt > 0)
+ if (sc->sc_iflist_xcnt > 0) {
error = EBUSY;
- else
+ } else {
sc->sc_iflist_ref++;
+ }
sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
- sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
_BRIDGE_UNLOCK(sc);
- return (error);
+ return error;
}
static void
_BRIDGE_LOCK(sc);
sc->lock_lr[sc->next_lock_lr] = lr_saved;
- sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX;
sc->sc_iflist_ref--;
sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
- sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) {
_BRIDGE_UNLOCK(sc);
wakeup(&sc->sc_cv);
- } else
+ } else {
_BRIDGE_UNLOCK(sc);
+ }
}
static void
sc->sc_iflist_xcnt++;
while (sc->sc_iflist_ref > 0) {
sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
- sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX;
msleep(&sc->sc_cv, &sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL);
sc->lock_lr[sc->next_lock_lr] = lr_saved;
- sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
+ sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX;
}
}
void
printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
{
- if (m)
+ if (m) {
printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
"nextpkt: 0x%llx%s",
prefix ? prefix : "", (unsigned int)mbuf_pkthdr_len(m),
(uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m)),
(uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m)),
suffix ? suffix : "");
- else
+ } else {
printf("%s<NULL>%s\n", prefix, suffix);
+ }
}
void
(uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m)),
(uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m)),
!suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
- if ((mbuf_flags(m) & MBUF_PKTHDR))
+ if ((mbuf_flags(m) & MBUF_PKTHDR)) {
printf_mbuf_pkthdr(m, " ", suffix);
- } else
+ }
+ } else {
printf("%s<NULL>%s\n", prefix, suffix);
+ }
}
void
printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
{
- mbuf_t n;
- size_t i, j;
- size_t pktlen, mlen, maxlen;
- unsigned char *ptr;
+ mbuf_t n;
+ size_t i, j;
+ size_t pktlen, mlen, maxlen;
+ unsigned char *ptr;
pktlen = mbuf_pkthdr_len(m);
- if (offset > pktlen)
+ if (offset > pktlen) {
return;
+ }
maxlen = (pktlen - offset > len) ? len : pktlen - offset;
n = m;
for (i = 0, j = 0; i < maxlen; i++, j++) {
if (j >= mlen) {
n = mbuf_next(n);
- if (n == 0)
+ if (n == 0) {
break;
+ }
ptr = mbuf_data(n);
mlen = mbuf_len(n);
j = 0;
{
int i;
uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) +
- IFNAMSIZ + ETHER_ADDR_LEN];
+ IFNAMSIZ + ETHER_ADDR_LEN];
struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
- memset(sdl, 0, sizeof (sdl_buffer));
+ memset(sdl, 0, sizeof(sdl_buffer));
sdl->sdl_family = AF_LINK;
sdl->sdl_nlen = strlen(sc->sc_if_xname);
sdl->sdl_alen = ETHER_ADDR_LEN;
sdl->sdl_family, sdl->sdl_type, sdl->sdl_nlen,
sdl->sdl_alen, sdl->sdl_slen);
#endif
- for (i = 0; i < sdl->sdl_alen; i++)
+ for (i = 0; i < sdl->sdl_alen; i++) {
printf("%s%x", i ? ":" : "", (CONST_LLADDR(sdl))[i]);
+ }
printf("\n");
}
+static boolean_t
+bridge_debug_flag_is_set(uint32_t flag)
+{
+ return (if_bridge_debug & flag) != 0;
+}
+
#endif /* BRIDGE_DEBUG */
/*
#pragma unused(n)
int error;
lck_grp_attr_t *lck_grp_attr = NULL;
- struct ifnet_clone_params ifnet_clone_params;
-
- bridge_rtnode_pool = zinit(sizeof (struct bridge_rtnode),
- 1024 * sizeof (struct bridge_rtnode), 0, "bridge_rtnode");
- zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE);
lck_grp_attr = lck_grp_attr_alloc_init();
bstp_sys_init();
#endif /* BRIDGESTP */
- ifnet_clone_params.ifc_name = "bridge";
- ifnet_clone_params.ifc_create = bridge_clone_create;
- ifnet_clone_params.ifc_destroy = bridge_clone_destroy;
-
- error = ifnet_clone_attach(&ifnet_clone_params, &bridge_cloner);
- if (error != 0)
+ error = if_clone_attach(&bridge_cloner);
+ if (error != 0) {
printf("%s: ifnet_clone_attach failed %d\n", __func__, error);
-
- return (error);
-}
-
-#if defined(PFIL_HOOKS)
-/*
- * handler for net.link.bridge.pfil_ipfw
- */
-static int
-sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
-{
-#pragma unused(arg1, arg2)
- int enable = pfil_ipfw;
- int error;
-
- error = sysctl_handle_int(oidp, &enable, 0, req);
- enable = (enable) ? 1 : 0;
-
- if (enable != pfil_ipfw) {
- pfil_ipfw = enable;
-
- /*
- * Disable pfil so that ipfw doesnt run twice, if the user
- * really wants both then they can re-enable pfil_bridge and/or
- * pfil_member. Also allow non-ip packets as ipfw can filter by
- * layer2 type.
- */
- if (pfil_ipfw) {
- pfil_onlyip = 0;
- pfil_bridge = 0;
- pfil_member = 0;
- }
}
- return (error);
+ return error;
}
-SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
- &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
-#endif /* PFIL_HOOKS */
static errno_t
bridge_ifnet_set_attrs(struct ifnet * ifp)
{
- errno_t error;
+ errno_t error;
error = ifnet_set_mtu(ifp, ETHERMTU);
if (error != 0) {
printf("%s: ifnet_set_flags failed %d\n", __func__, error);
goto done;
}
- done:
- return (error);
+done:
+ return error;
}
/*
{
#pragma unused(params)
struct ifnet *ifp = NULL;
- struct bridge_softc *sc, *sc2;
+ struct bridge_softc *sc = NULL;
+ struct bridge_softc *sc2 = NULL;
struct ifnet_init_eparams init_params;
errno_t error = 0;
uint8_t eth_hostid[ETHER_ADDR_LEN];
int fb, retry, has_hostid;
- sc = _MALLOC(sizeof (*sc), M_DEVBUF, M_WAITOK | M_ZERO);
+ sc = if_clone_softc_allocate(&bridge_cloner);
+ if (sc == NULL) {
+ error = ENOMEM;
+ goto done;
+ }
lck_mtx_init(&sc->sc_mtx, bridge_lock_grp, bridge_lock_attr);
sc->sc_brtmax = BRIDGE_RTABLE_MAX;
+ sc->sc_mne_max = BRIDGE_MAC_NAT_ENTRY_MAX;
sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
- sc->sc_filter_flags = IFBF_FILT_DEFAULT;
-#ifndef BRIDGE_IPF
- /*
- * For backwards compatibility with previous behaviour...
- * Switch off filtering on the bridge itself if BRIDGE_IPF is
- * not defined.
- */
- sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
-#endif
-
- if (bridge_bsd_mode != 0) {
- bridge_set_bsd_mode(sc);
- }
+ sc->sc_filter_flags = 0;
TAILQ_INIT(&sc->sc_iflist);
/* use the interface name as the unique id for ifp recycle */
- snprintf(sc->sc_if_xname, sizeof (sc->sc_if_xname), "%s%d",
+ snprintf(sc->sc_if_xname, sizeof(sc->sc_if_xname), "%s%d",
ifc->ifc_name, unit);
- bzero(&init_params, sizeof (init_params));
- init_params.ver = IFNET_INIT_CURRENT_VERSION;
- init_params.len = sizeof (init_params);
- if (bridge_in_bsd_mode(sc)) {
- /* Initialize our routing table. */
- error = bridge_rtable_init(sc);
- if (error != 0) {
- printf("%s: bridge_rtable_init failed %d\n",
- __func__, error);
- goto done;
- }
- TAILQ_INIT(&sc->sc_spanlist);
- if (if_bridge_txstart) {
- init_params.start = bridge_start;
- } else {
- init_params.flags = IFNET_INIT_LEGACY;
- init_params.output = bridge_output;
- }
- init_params.set_bpf_tap = bridge_set_bpf_tap;
- }
- init_params.uniqueid = sc->sc_if_xname;
- init_params.uniqueid_len = strlen(sc->sc_if_xname);
- init_params.sndq_maxlen = IFQ_MAXLEN;
- init_params.name = ifc->ifc_name;
- init_params.unit = unit;
- init_params.family = IFNET_FAMILY_ETHERNET;
- init_params.type = IFT_BRIDGE;
- init_params.demux = ether_demux;
- init_params.add_proto = ether_add_proto;
- init_params.del_proto = ether_del_proto;
- init_params.check_multi = ether_check_multi;
- init_params.framer_extended = ether_frameout_extended;
- init_params.softc = sc;
- init_params.ioctl = bridge_ioctl;
- init_params.detach = bridge_detach;
- init_params.broadcast_addr = etherbroadcastaddr;
- init_params.broadcast_len = ETHER_ADDR_LEN;
-
- if (bridge_in_bsd_mode(sc)) {
- error = ifnet_allocate_extended(&init_params, &ifp);
- if (error != 0) {
- printf("%s: ifnet_allocate failed %d\n",
- __func__, error);
- goto done;
- }
- sc->sc_ifp = ifp;
- error = bridge_ifnet_set_attrs(ifp);
- if (error != 0) {
- printf("%s: bridge_ifnet_set_attrs failed %d\n",
- __func__, error);
- goto done;
- }
+ bzero(&init_params, sizeof(init_params));
+ init_params.ver = IFNET_INIT_CURRENT_VERSION;
+ init_params.len = sizeof(init_params);
+ /* Initialize our routing table. */
+ error = bridge_rtable_init(sc);
+ if (error != 0) {
+ printf("%s: bridge_rtable_init failed %d\n",
+ __func__, error);
+ goto done;
+ }
+ TAILQ_INIT(&sc->sc_spanlist);
+ if (if_bridge_txstart) {
+ init_params.start = bridge_start;
+ } else {
+ init_params.flags = IFNET_INIT_LEGACY;
+ init_params.output = bridge_output;
+ }
+ init_params.set_bpf_tap = bridge_set_bpf_tap;
+ init_params.uniqueid = sc->sc_if_xname;
+ init_params.uniqueid_len = strlen(sc->sc_if_xname);
+ init_params.sndq_maxlen = IFQ_MAXLEN;
+ init_params.name = ifc->ifc_name;
+ init_params.unit = unit;
+ init_params.family = IFNET_FAMILY_ETHERNET;
+ init_params.type = IFT_BRIDGE;
+ init_params.demux = ether_demux;
+ init_params.add_proto = ether_add_proto;
+ init_params.del_proto = ether_del_proto;
+ init_params.check_multi = ether_check_multi;
+ init_params.framer_extended = ether_frameout_extended;
+ init_params.softc = sc;
+ init_params.ioctl = bridge_ioctl;
+ init_params.detach = bridge_detach;
+ init_params.broadcast_addr = etherbroadcastaddr;
+ init_params.broadcast_len = ETHER_ADDR_LEN;
+
+ error = ifnet_allocate_extended(&init_params, &ifp);
+ if (error != 0) {
+ printf("%s: ifnet_allocate failed %d\n",
+ __func__, error);
+ goto done;
+ }
+ LIST_INIT(&sc->sc_mne_list);
+ LIST_INIT(&sc->sc_mne_list_v6);
+ sc->sc_ifp = ifp;
+ error = bridge_ifnet_set_attrs(ifp);
+ if (error != 0) {
+ printf("%s: bridge_ifnet_set_attrs failed %d\n",
+ __func__, error);
+ goto done;
}
-
/*
* Generate an ethernet address with a locally administered address.
*
*/
fb = 0;
has_hostid = (uuid_get_ethernet(ð_hostid[0]) == 0);
- for (retry = 1; retry != 0; ) {
+ for (retry = 1; retry != 0;) {
if (fb || has_hostid == 0) {
read_frandom(&sc->sc_defaddr, ETHER_ADDR_LEN);
sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
ETHER_ADDR_LEN);
sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
sc->sc_defaddr[0] |= 2; /* set the LAA bit */
- sc->sc_defaddr[3] = /* stir it up a bit */
+ sc->sc_defaddr[3] = /* stir it up a bit */
((sc->sc_defaddr[3] & 0x0f) << 4) |
((sc->sc_defaddr[3] & 0xf0) >> 4);
/*
lck_mtx_lock(&bridge_list_mtx);
LIST_FOREACH(sc2, &bridge_list, sc_list) {
if (memcmp(sc->sc_defaddr,
- IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0)
+ IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0) {
retry = 1;
+ }
}
lck_mtx_unlock(&bridge_list_mtx);
}
sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
link_print(sc);
+ }
#endif
- if (bridge_in_bsd_mode(sc)) {
- error = ifnet_attach(ifp, NULL);
- if (error != 0) {
- printf("%s: ifnet_attach failed %d\n", __func__, error);
- goto done;
- }
+ error = ifnet_attach(ifp, NULL);
+ if (error != 0) {
+ printf("%s: ifnet_attach failed %d\n", __func__, error);
+ goto done;
}
error = ifnet_set_lladdr_and_type(ifp, sc->sc_defaddr, ETHER_ADDR_LEN,
goto done;
}
- if (bridge_in_bsd_mode(sc)) {
- ifnet_set_offload(ifp,
- IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP |
- IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES);
- error = bridge_set_tso(sc);
- if (error != 0) {
- printf("%s: bridge_set_tso failed %d\n",
- __func__, error);
- goto done;
- }
+ ifnet_set_offload(ifp,
+ IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP |
+ IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES);
+ error = bridge_set_tso(sc);
+ if (error != 0) {
+ printf("%s: bridge_set_tso failed %d\n",
+ __func__, error);
+ goto done;
+ }
#if BRIDGESTP
- bstp_attach(&sc->sc_stp, &bridge_ops);
+ bstp_attach(&sc->sc_stp, &bridge_ops);
#endif /* BRIDGESTP */
- }
lck_mtx_lock(&bridge_list_mtx);
LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
lck_mtx_unlock(&bridge_list_mtx);
/* attach as ethernet */
- error = bpf_attach(ifp, DLT_EN10MB, sizeof (struct ether_header),
+ error = bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header),
NULL, NULL);
done:
if (error != 0) {
printf("%s failed error %d\n", __func__, error);
- /* Cleanup TBD */
+ /* TBD: Clean up: sc, sc_rthash etc */
}
- return (error);
+ return error;
}
/*
BRIDGE_LOCK(sc);
if ((sc->sc_flags & SCF_DETACHING)) {
BRIDGE_UNLOCK(sc);
- return (0);
+ return 0;
}
sc->sc_flags |= SCF_DETACHING;
bridge_ifstop(ifp, 1);
- if (bridge_in_bsd_mode(sc)) {
- bridge_cancel_delayed_call(&sc->sc_resize_call);
+ bridge_cancel_delayed_call(&sc->sc_resize_call);
- bridge_cleanup_delayed_call(&sc->sc_resize_call);
- bridge_cleanup_delayed_call(&sc->sc_aging_timer);
- }
+ bridge_cleanup_delayed_call(&sc->sc_resize_call);
+ bridge_cleanup_delayed_call(&sc->sc_aging_timer);
error = ifnet_set_flags(ifp, 0, IFF_UP);
if (error != 0) {
printf("%s: ifnet_set_flags failed %d\n", __func__, error);
}
- while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL)
+ while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL) {
bridge_delete_member(sc, bif, 0);
+ }
- if (bridge_in_bsd_mode(sc)) {
- while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
- bridge_delete_span(sc, bif);
- }
- BRIDGE_UNLOCK(sc);
+ while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
+ bridge_delete_span(sc, bif);
}
+ BRIDGE_UNLOCK(sc);
error = ifnet_detach(ifp);
if (error != 0) {
panic("%s: ifnet_detach(%p) failed %d\n",
- __func__, ifp, error);
- }
- return (0);
-}
-
-#define DRVSPEC do { \
- if (ifd->ifd_cmd >= bridge_control_table_size) { \
- error = EINVAL; \
- break; \
- } \
- bc = &bridge_control_table[ifd->ifd_cmd]; \
- \
- if (cmd == SIOCGDRVSPEC && \
- (bc->bc_flags & BC_F_COPYOUT) == 0) { \
- error = EINVAL; \
- break; \
- } else if (cmd == SIOCSDRVSPEC && \
- (bc->bc_flags & BC_F_COPYOUT) != 0) { \
- error = EINVAL; \
- break; \
- } \
- \
- if (bc->bc_flags & BC_F_SUSER) { \
- error = kauth_authorize_generic(kauth_cred_get(), \
- KAUTH_GENERIC_ISSUSER); \
- if (error) \
- break; \
- } \
- \
- if (ifd->ifd_len != bc->bc_argsize || \
- ifd->ifd_len > sizeof (args)) { \
- error = EINVAL; \
- break; \
- } \
- \
- bzero(&args, sizeof (args)); \
- if (bc->bc_flags & BC_F_COPYIN) { \
- error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
- if (error) \
- break; \
- } \
- \
- BRIDGE_LOCK(sc); \
- error = (*bc->bc_func)(sc, &args); \
- BRIDGE_UNLOCK(sc); \
- if (error) \
- break; \
- \
- if (bc->bc_flags & BC_F_COPYOUT) \
- error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
+ __func__, ifp, error);
+ }
+ return 0;
+}
+
+#define DRVSPEC do { \
+ if (ifd->ifd_cmd >= bridge_control_table_size) { \
+ error = EINVAL; \
+ break; \
+ } \
+ bc = &bridge_control_table[ifd->ifd_cmd]; \
+ \
+ if (cmd == SIOCGDRVSPEC && \
+ (bc->bc_flags & BC_F_COPYOUT) == 0) { \
+ error = EINVAL; \
+ break; \
+ } else if (cmd == SIOCSDRVSPEC && \
+ (bc->bc_flags & BC_F_COPYOUT) != 0) { \
+ error = EINVAL; \
+ break; \
+ } \
+ \
+ if (bc->bc_flags & BC_F_SUSER) { \
+ error = kauth_authorize_generic(kauth_cred_get(), \
+ KAUTH_GENERIC_ISSUSER); \
+ if (error) \
+ break; \
+ } \
+ \
+ if (ifd->ifd_len != bc->bc_argsize || \
+ ifd->ifd_len > sizeof (args)) { \
+ error = EINVAL; \
+ break; \
+ } \
+ \
+ bzero(&args, sizeof (args)); \
+ if (bc->bc_flags & BC_F_COPYIN) { \
+ error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \
+ if (error) \
+ break; \
+ } \
+ \
+ BRIDGE_LOCK(sc); \
+ error = (*bc->bc_func)(sc, &args); \
+ BRIDGE_UNLOCK(sc); \
+ if (error) \
+ break; \
+ \
+ if (bc->bc_flags & BC_F_COPYOUT) \
+ error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \
} while (0)
/*
BRIDGE_LOCK_ASSERT_NOTHELD(sc);
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_IOCTL)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_IOCTL)) {
printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
__func__, ifp->if_xname, cmd, (cmd & IOC_IN) ? 'I' : ' ',
(cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd),
(char)IOCGROUP(cmd), cmd & 0xff);
+ }
#endif /* BRIDGE_DEBUG */
switch (cmd) {
-
case SIOCSIFADDR:
case SIOCAIFADDR:
ifnet_set_flags(ifp, IFF_UP, IFF_UP);
if (user_addr != USER_ADDR_NULL) {
error = copyout(&ifmr->ifm_current, user_addr,
- sizeof (int));
+ sizeof(int));
}
break;
}
case SIOCSIFLLADDR:
error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data,
ifr->ifr_addr.sa_len);
- if (error != 0)
+ if (error != 0) {
printf("%s: SIOCSIFLLADDR error %d\n", ifp->if_xname,
error);
+ }
break;
case SIOCSIFMTU:
break;
}
}
- if (!error)
+ if (!error) {
sc->sc_ifp->if_mtu = ifr->ifr_mtu;
+ }
BRIDGE_UNLOCK(sc);
break;
default:
error = ether_ioctl(ifp, cmd, data);
#if BRIDGE_DEBUG
- if (error != 0 && error != EOPNOTSUPP)
+ if (error != 0 && error != EOPNOTSUPP) {
printf("%s: ifp %s cmd 0x%08lx "
"(%c%c [%lu] %c %lu) failed error: %d\n",
__func__, ifp->if_xname, cmd,
(cmd & IOC_OUT) ? 'O' : ' ',
IOCPARM_LEN(cmd), (char)IOCGROUP(cmd),
cmd & 0xff, error);
+ }
#endif /* BRIDGE_DEBUG */
break;
}
BRIDGE_LOCK_ASSERT_NOTHELD(sc);
- return (error);
+ return error;
}
#if HAS_IF_CAP
bridge_set_ifcap(sc, bif, enabled);
}
-
}
static void
struct ifreq ifr;
int error;
- bzero(&ifr, sizeof (ifr));
+ bzero(&ifr, sizeof(ifr));
ifr.ifr_reqcap = set;
if (ifp->if_capenable != set) {
IFF_LOCKGIANT(ifp);
error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
IFF_UNLOCKGIANT(ifp);
- if (error)
+ if (error) {
printf("%s: %s error setting interface capabilities "
"on %s\n", __func__, sc->sc_ifp->if_xname,
ifp->if_xname);
+ }
}
}
#endif /* HAS_IF_CAP */
TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
ifnet_t ifp = bif->bif_ifp;
- if (ifp == NULL)
+ if (ifp == NULL) {
continue;
+ }
if (offload & IFNET_TSO_IPV4) {
if (ifp->if_hwassist & IFNET_TSO_IPV4) {
- if (tso_v4_mtu > ifp->if_tso_v4_mtu)
+ if (tso_v4_mtu > ifp->if_tso_v4_mtu) {
tso_v4_mtu = ifp->if_tso_v4_mtu;
+ }
} else {
offload &= ~IFNET_TSO_IPV4;
tso_v4_mtu = 0;
}
if (offload & IFNET_TSO_IPV6) {
if (ifp->if_hwassist & IFNET_TSO_IPV6) {
- if (tso_v6_mtu > ifp->if_tso_v6_mtu)
+ if (tso_v6_mtu > ifp->if_tso_v6_mtu) {
tso_v6_mtu = ifp->if_tso_v6_mtu;
+ }
} else {
offload &= ~IFNET_TSO_IPV6;
tso_v6_mtu = 0;
error = ifnet_set_offload(sc->sc_ifp, offload);
if (error != 0) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: ifnet_set_offload(%s, 0x%x) "
"failed %d\n", __func__,
sc->sc_ifp->if_xname, offload, error);
+ }
#endif /* BRIDGE_DEBUG */
goto done;
}
* as large as the interface MTU
*/
if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV4) {
- if (tso_v4_mtu < sc->sc_ifp->if_mtu)
+ if (tso_v4_mtu < sc->sc_ifp->if_mtu) {
tso_v4_mtu = sc->sc_ifp->if_mtu;
+ }
error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET,
tso_v4_mtu);
if (error != 0) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: ifnet_set_tso_mtu(%s, "
"AF_INET, %u) failed %d\n",
__func__, sc->sc_ifp->if_xname,
tso_v4_mtu, error);
+ }
#endif /* BRIDGE_DEBUG */
goto done;
}
}
if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV6) {
- if (tso_v6_mtu < sc->sc_ifp->if_mtu)
+ if (tso_v6_mtu < sc->sc_ifp->if_mtu) {
tso_v6_mtu = sc->sc_ifp->if_mtu;
+ }
error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET6,
tso_v6_mtu);
if (error != 0) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: ifnet_set_tso_mtu(%s, "
"AF_INET6, %u) failed %d\n",
__func__, sc->sc_ifp->if_xname,
tso_v6_mtu, error);
+ }
#endif /* BRIDGE_DEBUG */
goto done;
}
}
}
done:
- return (error);
+ return error;
}
/*
TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
ifp = bif->bif_ifp;
- if (strcmp(ifp->if_xname, name) == 0)
- return (bif);
+ if (strcmp(ifp->if_xname, name) == 0) {
+ return bif;
+ }
}
- return (NULL);
+ return NULL;
}
/*
BRIDGE_LOCK_ASSERT_HELD(sc);
TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
- if (bif->bif_ifp == member_ifp)
- return (bif);
+ if (bif->bif_ifp == member_ifp) {
+ return bif;
+ }
}
- return (NULL);
+ return NULL;
}
static errno_t
bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol,
- mbuf_t *data, char **frame_ptr)
+ mbuf_t *data, char **frame_ptr)
{
#pragma unused(protocol)
errno_t error = 0;
size_t frmlen = 0;
mbuf_t m = *data;
- if ((m->m_flags & M_PROTO1))
+ if ((m->m_flags & M_PROTO1)) {
goto out;
+ }
if (*frame_ptr >= (char *)mbuf_datastart(m) &&
*frame_ptr <= (char *)mbuf_data(m)) {
frmlen = (char *)mbuf_data(m) - *frame_ptr;
}
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_INPUT) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
"frmlen %lu\n", __func__, sc->sc_ifp->if_xname,
ifp->if_xname, (uint64_t)VM_KERNEL_ADDRPERM(m),
(uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr),
included ? "inside" : "outside", frmlen);
- if (if_bridge_debug & BR_DBGF_MBUF) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MBUF)) {
printf_mbuf(m, "bridge_iff_input[", "\n");
printf_ether_header((struct ether_header *)
(void *)*frame_ptr);
}
}
#endif /* BRIDGE_DEBUG */
+ if (included == 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
+ printf("%s: frame_ptr outside mbuf\n", __func__);
+ }
+ goto out;
+ }
/* Move data pointer to start of frame to the link layer header */
- if (included) {
- (void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen,
- mbuf_len(m) + frmlen);
- (void) mbuf_pkthdr_adjustlen(m, frmlen);
- } else {
- printf("%s: frame_ptr outside mbuf\n", __func__);
+ (void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen,
+ mbuf_len(m) + frmlen);
+ (void) mbuf_pkthdr_adjustlen(m, frmlen);
+
+ /* make sure we can access the ethernet header */
+ if (mbuf_pkthdr_len(m) < sizeof(struct ether_header)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
+ printf("%s: short frame %lu < %lu\n", __func__,
+ mbuf_pkthdr_len(m), sizeof(struct ether_header));
+ }
goto out;
}
+ if (mbuf_len(m) < sizeof(struct ether_header)) {
+ error = mbuf_pullup(data, sizeof(struct ether_header));
+ if (error != 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
+ printf("%s: mbuf_pullup(%lu) failed %d\n",
+ __func__, sizeof(struct ether_header),
+ error);
+ }
+ error = EJUSTRETURN;
+ goto out;
+ }
+ if (m != *data) {
+ m = *data;
+ *frame_ptr = mbuf_data(m);
+ }
+ }
- error = bridge_input(ifp, m, *frame_ptr);
+ error = bridge_input(ifp, data);
/* Adjust packet back to original */
if (error == 0) {
+ /* bridge_input might have modified *data */
+ if (*data != m) {
+ m = *data;
+ *frame_ptr = mbuf_data(m);
+ }
(void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen,
mbuf_len(m) - frmlen);
(void) mbuf_pkthdr_adjustlen(m, -frmlen);
}
#if BRIDGE_DEBUG
- if ((if_bridge_debug & BR_DBGF_INPUT) &&
- (if_bridge_debug & BR_DBGF_MBUF)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT) &&
+ IF_BRIDGE_DEBUG(BR_DBGF_MBUF)) {
printf("\n");
printf_mbuf(m, "bridge_iff_input]", "\n");
}
out:
BRIDGE_LOCK_ASSERT_NOTHELD(sc);
- return (error);
+ return error;
}
-#if BRIDGE_MEMBER_OUT_FILTER
static errno_t
bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol,
- mbuf_t *data)
+ mbuf_t *data)
{
#pragma unused(protocol)
errno_t error = 0;
struct bridge_softc *sc = bif->bif_sc;
mbuf_t m = *data;
- if ((m->m_flags & M_PROTO1))
+ if ((m->m_flags & M_PROTO1)) {
goto out;
+ }
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_OUTPUT) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_OUTPUT)) {
printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
sc->sc_ifp->if_xname, ifp->if_xname,
(uint64_t)VM_KERNEL_ADDRPERM(m),
}
#endif /* BRIDGE_DEBUG */
- error = bridge_member_output(sc, ifp, m);
- if (error != 0) {
+ error = bridge_member_output(sc, ifp, data);
+ if (error != 0 && error != EJUSTRETURN) {
printf("%s: bridge_member_output failed error %d\n", __func__,
error);
}
-
out:
BRIDGE_LOCK_ASSERT_NOTHELD(sc);
- return (error);
+ return error;
}
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
static void
bridge_iff_event(void *cookie, ifnet_t ifp, protocol_family_t protocol,
- const struct kev_msg *event_msg)
+ const struct kev_msg *event_msg)
{
#pragma unused(protocol)
struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
event_msg->kev_class == KEV_NETWORK_CLASS &&
event_msg->kev_subclass == KEV_DL_SUBCLASS) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: %s event_code %u - %s\n", __func__,
ifp->if_xname, event_msg->event_code,
dlil_kev_dl_code_str(event_msg->event_code));
+ }
#endif /* BRIDGE_DEBUG */
switch (event_msg->event_code) {
- case KEV_DL_IF_DETACHING:
- case KEV_DL_IF_DETACHED: {
- bridge_ifdetach(bif, ifp);
- break;
- }
- case KEV_DL_LINK_OFF:
- case KEV_DL_LINK_ON: {
- bridge_iflinkevent(ifp);
+ case KEV_DL_IF_DETACHING:
+ case KEV_DL_IF_DETACHED: {
+ bridge_ifdetach(ifp);
+ break;
+ }
+ case KEV_DL_LINK_OFF:
+ case KEV_DL_LINK_ON: {
+ bridge_iflinkevent(ifp);
#if BRIDGESTP
- bstp_linkstate(ifp, event_msg->event_code);
+ bstp_linkstate(ifp, event_msg->event_code);
#endif /* BRIDGESTP */
- break;
- }
- case KEV_DL_SIFFLAGS: {
- if ((bif->bif_flags & BIFF_PROMISC) == 0 &&
- (ifp->if_flags & IFF_UP)) {
- errno_t error;
-
- error = ifnet_set_promiscuous(ifp, 1);
- if (error != 0) {
- printf("%s: "
- "ifnet_set_promiscuous (%s)"
- " failed %d\n",
- __func__, ifp->if_xname,
- error);
- } else {
- bif->bif_flags |= BIFF_PROMISC;
- }
+ break;
+ }
+ case KEV_DL_SIFFLAGS: {
+ if ((bif->bif_flags & BIFF_PROMISC) == 0 &&
+ (ifp->if_flags & IFF_UP)) {
+ errno_t error;
+
+ error = ifnet_set_promiscuous(ifp, 1);
+ if (error != 0) {
+ printf("%s: "
+ "ifnet_set_promiscuous (%s)"
+ " failed %d\n",
+ __func__, ifp->if_xname,
+ error);
+ } else {
+ bif->bif_flags |= BIFF_PROMISC;
}
- break;
}
- case KEV_DL_IFCAP_CHANGED: {
- BRIDGE_LOCK(sc);
- bridge_set_tso(sc);
- BRIDGE_UNLOCK(sc);
- break;
- }
- default:
- break;
+ break;
+ }
+ case KEV_DL_IFCAP_CHANGED: {
+ BRIDGE_LOCK(sc);
+ bridge_set_tso(sc);
+ BRIDGE_UNLOCK(sc);
+ break;
+ }
+ case KEV_DL_PROTO_DETACHED:
+ case KEV_DL_PROTO_ATTACHED: {
+ bridge_proto_attach_changed(ifp);
+ break;
+ }
+ default:
+ break;
}
}
}
struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: %s\n", __func__, ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
- bridge_ifdetach(bif, ifp);
+ bridge_ifdetach(ifp);
_FREE(bif, M_DEVBUF);
}
static errno_t
bridge_proto_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet,
- char *header)
+ char *header)
{
#pragma unused(protocol, packet, header)
#if BRIDGE_DEBUG
printf("%s: unexpected packet from %s\n", __func__,
ifp->if_xname);
#endif /* BRIDGE_DEBUG */
- return (0);
+ return 0;
}
static int
bridge_attach_protocol(struct ifnet *ifp)
{
- int error;
- struct ifnet_attach_proto_param reg;
+ int error;
+ struct ifnet_attach_proto_param reg;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: %s\n", __func__, ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
- bzero(®, sizeof (reg));
+ bzero(®, sizeof(reg));
reg.input = bridge_proto_input;
error = ifnet_attach_protocol(ifp, PF_BRIDGE, ®);
- if (error)
+ if (error) {
printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
__func__, ifp->if_xname, error);
+ }
- return (error);
+ return error;
}
static int
bridge_detach_protocol(struct ifnet *ifp)
{
- int error;
+ int error;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: %s\n", __func__, ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
error = ifnet_detach_protocol(ifp, PF_BRIDGE);
- if (error)
+ if (error) {
printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
__func__, ifp->if_xname, error);
+ }
- return (error);
+ return error;
}
/*
*/
static void
bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
- int gone)
+ int gone)
{
struct ifnet *ifs = bif->bif_ifp, *bifp = sc->sc_ifp;
int lladdr_changed = 0, error, filt_attached;
uint8_t eaddr[ETHER_ADDR_LEN];
u_int32_t event_code = 0;
- boolean_t bsd_mode;
BRIDGE_LOCK_ASSERT_HELD(sc);
VERIFY(ifs != NULL);
- bsd_mode = bridge_in_bsd_mode(sc);
-
/*
- * First, remove the member from the list first so it cannot be found anymore
+ * Remove the member from the list first so it cannot be found anymore
* when we release the bridge lock below
*/
BRIDGE_XLOCK(sc);
TAILQ_REMOVE(&sc->sc_iflist, bif, bif_next);
BRIDGE_XDROP(sc);
+ if (sc->sc_mac_nat_bif != NULL) {
+ if (bif == sc->sc_mac_nat_bif) {
+ bridge_mac_nat_disable(sc);
+ } else {
+ bridge_mac_nat_flush_entries(sc, bif);
+ }
+ }
+
if (!gone) {
switch (ifs->if_type) {
case IFT_ETHER:
break;
case IFT_GIF:
- /* currently not supported */
- /* FALLTHRU */
+ /* currently not supported */
+ /* FALLTHRU */
default:
VERIFY(0);
/* NOTREACHED */
BRIDGE_LOCK(sc);
}
#if BRIDGESTP
- if (bsd_mode && (bif->bif_ifflags & IFBIF_STP) != 0) {
+ if ((bif->bif_ifflags & IFBIF_STP) != 0) {
bstp_disable(&bif->bif_stp);
}
#endif /* BRIDGESTP */
TAILQ_FIRST(&sc->sc_iflist)->bif_ifp;
bcopy(IF_LLADDR(fif), eaddr, ETHER_ADDR_LEN);
sc->sc_ifaddr = fif;
- ifnet_reference(fif); /* for sc_ifaddr */
+ ifnet_reference(fif); /* for sc_ifaddr */
}
lladdr_changed = 1;
}
#if HAS_IF_CAP
- bridge_mutecaps(sc); /* recalculate now this interface is removed */
+ bridge_mutecaps(sc); /* recalculate now this interface is removed */
#endif /* HAS_IF_CAP */
error = bridge_set_tso(sc);
printf("%s: bridge_set_tso failed %d\n", __func__, error);
}
- if (bsd_mode) {
- bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
- }
+ bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
KASSERT(bif->bif_addrcnt == 0,
("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
*/
event_code = bridge_updatelinkstatus(sc);
- if (bsd_mode) {
- BRIDGE_UNLOCK(sc);
- }
+ BRIDGE_UNLOCK(sc);
+
if (lladdr_changed &&
- (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0)
+ (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) {
printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
+ }
- if (event_code != 0)
+ if (event_code != 0) {
bridge_link_event(bifp, event_code);
+ }
#if BRIDGESTP
- if (bsd_mode) {
- bstp_destroy(&bif->bif_stp); /* prepare to free */
- }
+ bstp_destroy(&bif->bif_stp); /* prepare to free */
#endif /* BRIDGESTP */
- if (filt_attached)
+ if (filt_attached) {
iflt_detach(bif->bif_iff_ref);
- else
+ } else {
_FREE(bif, M_DEVBUF);
+ }
ifs->if_bridge = NULL;
ifnet_release(ifs);
uint8_t eaddr[ETHER_ADDR_LEN];
struct iff_filter iff;
u_int32_t event_code = 0;
- boolean_t bsd_mode = bridge_in_bsd_mode(sc);
+ boolean_t mac_nat = FALSE;
ifs = ifunit(req->ifbr_ifsname);
- if (ifs == NULL)
- return (ENOENT);
- if (ifs->if_ioctl == NULL) /* must be supported */
- return (EINVAL);
+ if (ifs == NULL) {
+ return ENOENT;
+ }
+ if (ifs->if_ioctl == NULL) { /* must be supported */
+ return EINVAL;
+ }
if (IFNET_IS_INTCOPROC(ifs)) {
- return (EINVAL);
+ return EINVAL;
}
- if (bsd_mode) {
- /* If it's in the span list, it can't be a member. */
- TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
- if (ifs == bif->bif_ifp)
- return (EBUSY);
+ /* If it's in the span list, it can't be a member. */
+ TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
+ if (ifs == bif->bif_ifp) {
+ return EBUSY;
+ }
}
- if (ifs->if_bridge == sc)
- return (EEXIST);
+ if (ifs->if_bridge == sc) {
+ return EEXIST;
+ }
- if (ifs->if_bridge != NULL)
- return (EBUSY);
+ if (ifs->if_bridge != NULL) {
+ return EBUSY;
+ }
switch (ifs->if_type) {
case IFT_ETHER:
+ if (strcmp(ifs->if_name, "en") == 0 &&
+ ifs->if_subfamily == IFNET_SUBFAMILY_WIFI &&
+ (ifs->if_eflags & IFEF_IPV4_ROUTER) == 0) {
+ /* XXX is there a better way to identify Wi-Fi STA? */
+ mac_nat = TRUE;
+ }
+ break;
case IFT_L2VLAN:
- /* permitted interface types */
break;
case IFT_GIF:
- /* currently not supported */
- /* FALLTHRU */
+ /* currently not supported */
+ /* FALLTHRU */
default:
- return (EINVAL);
+ return EINVAL;
}
- bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_WAITOK | M_ZERO);
- if (bif == NULL)
- return (ENOMEM);
+ /* fail to add the interface if the MTU doesn't match */
+ if (!TAILQ_EMPTY(&sc->sc_iflist) && sc->sc_ifp->if_mtu != ifs->if_mtu) {
+ printf("%s: %s: invalid MTU for %s", __func__,
+ sc->sc_ifp->if_xname,
+ ifs->if_xname);
+ return EINVAL;
+ }
+ /* there's already an interface that's doing MAC NAT */
+ if (mac_nat && sc->sc_mac_nat_bif != NULL) {
+ return EBUSY;
+ }
+ bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
+ if (bif == NULL) {
+ return ENOMEM;
+ }
bif->bif_ifp = ifs;
ifnet_reference(ifs);
- bif->bif_ifflags = IFBIF_LEARNING | IFBIF_DISCOVER;
+ bif->bif_ifflags |= IFBIF_LEARNING | IFBIF_DISCOVER;
#if HAS_IF_CAP
bif->bif_savedcaps = ifs->if_capenable;
#endif /* HAS_IF_CAP */
bif->bif_sc = sc;
+ if (mac_nat) {
+ (void)bridge_mac_nat_enable(sc, bif);
+ }
/* Allow the first Ethernet member to define the MTU */
- if (TAILQ_EMPTY(&sc->sc_iflist))
+ if (TAILQ_EMPTY(&sc->sc_iflist)) {
sc->sc_ifp->if_mtu = ifs->if_mtu;
- else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
- printf("%s: %s: invalid MTU for %s", __func__,
- sc->sc_ifp->if_xname,
- ifs->if_xname);
- return (EINVAL);
}
/*
!memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) {
bcopy(IF_LLADDR(ifs), eaddr, ETHER_ADDR_LEN);
sc->sc_ifaddr = ifs;
- ifnet_reference(ifs); /* for sc_ifaddr */
+ ifnet_reference(ifs); /* for sc_ifaddr */
lladdr_changed = 1;
}
ifs->if_bridge = sc;
#if BRIDGESTP
- if (bsd_mode) {
- bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
- }
+ bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
#endif /* BRIDGESTP */
/*
error = ifnet_set_promiscuous(ifs, 1);
if (error) {
/* Ignore error when device is not up */
- if (error != ENETDOWN)
+ if (error != ENETDOWN) {
goto out;
+ }
error = 0;
} else {
bif->bif_flags |= BIFF_PROMISC;
/*
* The new member may change the link status of the bridge interface
*/
- if (interface_media_active(ifs))
+ if (interface_media_active(ifs)) {
bif->bif_flags |= BIFF_MEDIA_ACTIVE;
- else
+ } else {
bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
+ }
event_code = bridge_updatelinkstatus(sc);
/*
* Respect lock ordering with DLIL lock for the following operations
*/
- if (bsd_mode) {
- BRIDGE_UNLOCK(sc);
- }
+ BRIDGE_UNLOCK(sc);
+
/*
* install an interface filter
*/
- memset(&iff, 0, sizeof (struct iff_filter));
+ memset(&iff, 0, sizeof(struct iff_filter));
iff.iff_cookie = bif;
iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
- if (bsd_mode) {
- iff.iff_input = bridge_iff_input;
-#if BRIDGE_MEMBER_OUT_FILTER
- iff.iff_output = bridge_iff_output;
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
- }
+ iff.iff_input = bridge_iff_input;
+ iff.iff_output = bridge_iff_output;
iff.iff_event = bridge_iff_event;
iff.iff_detached = bridge_iff_detached;
error = dlil_attach_filter(ifs, &iff, &bif->bif_iff_ref,
BRIDGE_LOCK(sc);
goto out;
}
+ BRIDGE_LOCK(sc);
bif->bif_flags |= BIFF_FILTER_ATTACHED;
+ BRIDGE_UNLOCK(sc);
/*
- * install an dummy "bridge" protocol
+ * install a dummy "bridge" protocol
*/
if ((error = bridge_attach_protocol(ifs)) != 0) {
if (error != 0) {
goto out;
}
}
+ BRIDGE_LOCK(sc);
bif->bif_flags |= BIFF_PROTO_ATTACHED;
+ BRIDGE_UNLOCK(sc);
if (lladdr_changed &&
- (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0)
+ (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) {
printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
+ }
- if (event_code != 0)
+ if (event_code != 0) {
bridge_link_event(bifp, event_code);
+ }
BRIDGE_LOCK(sc);
out:
- if (error && bif != NULL)
+ if (error && bif != NULL) {
bridge_delete_member(sc, bif, 1);
+ }
- return (error);
+ return error;
}
static int
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
bridge_delete_member(sc, bif, 0);
- return (0);
+ return 0;
}
static int
bridge_ioctl_purge(struct bridge_softc *sc, void *arg)
{
#pragma unused(sc, arg)
- return (0);
+ return 0;
}
static int
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
-
- if (bridge_in_bsd_mode(sc)) {
- struct bstp_port *bp;
-
- bp = &bif->bif_stp;
- req->ifbr_state = bp->bp_state;
- req->ifbr_priority = bp->bp_priority;
- req->ifbr_path_cost = bp->bp_path_cost;
- req->ifbr_proto = bp->bp_protover;
- req->ifbr_role = bp->bp_role;
- req->ifbr_stpflags = bp->bp_flags;
- /* Copy STP state options as flags */
- if (bp->bp_operedge)
- req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
- if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
- req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
- if (bp->bp_ptp_link)
- req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
- if (bp->bp_flags & BSTP_PORT_AUTOPTP)
- req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
- if (bp->bp_flags & BSTP_PORT_ADMEDGE)
- req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
- if (bp->bp_flags & BSTP_PORT_ADMCOST)
- req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
+ if (bif == NULL) {
+ return ENOENT;
}
+
+ struct bstp_port *bp;
+
+ bp = &bif->bif_stp;
+ req->ifbr_state = bp->bp_state;
+ req->ifbr_priority = bp->bp_priority;
+ req->ifbr_path_cost = bp->bp_path_cost;
+ req->ifbr_proto = bp->bp_protover;
+ req->ifbr_role = bp->bp_role;
+ req->ifbr_stpflags = bp->bp_flags;
req->ifbr_ifsflags = bif->bif_ifflags;
+
+ /* Copy STP state options as flags */
+ if (bp->bp_operedge) {
+ req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
+ }
+ if (bp->bp_flags & BSTP_PORT_AUTOEDGE) {
+ req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
+ }
+ if (bp->bp_ptp_link) {
+ req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
+ }
+ if (bp->bp_flags & BSTP_PORT_AUTOPTP) {
+ req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
+ }
+ if (bp->bp_flags & BSTP_PORT_ADMEDGE) {
+ req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
+ }
+ if (bp->bp_flags & BSTP_PORT_ADMCOST) {
+ req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
+ }
+
req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
req->ifbr_addrcnt = bif->bif_addrcnt;
req->ifbr_addrmax = bif->bif_addrmax;
req->ifbr_addrexceeded = bif->bif_addrexceeded;
- return (0);
+ return 0;
}
static int
int error;
#endif /* BRIDGESTP */
- if (!bridge_in_bsd_mode(sc)) {
- return (EINVAL);
- }
-
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
- if (req->ifbr_ifsflags & IFBIF_SPAN)
+ if (req->ifbr_ifsflags & IFBIF_SPAN) {
/* SPAN is readonly */
- return (EINVAL);
+ return EINVAL;
+ }
+ if ((req->ifbr_ifsflags & IFBIF_MAC_NAT) != 0) {
+ errno_t error;
+ error = bridge_mac_nat_enable(sc, bif);
+ if (error != 0) {
+ return error;
+ }
+ } else if (sc->sc_mac_nat_bif != NULL) {
+ bridge_mac_nat_disable(sc);
+ }
#if BRIDGESTP
if (req->ifbr_ifsflags & IFBIF_STP) {
if ((bif->bif_ifflags & IFBIF_STP) == 0) {
error = bstp_enable(&bif->bif_stp);
- if (error)
- return (error);
+ if (error) {
+ return error;
+ }
}
} else {
- if ((bif->bif_ifflags & IFBIF_STP) != 0)
+ if ((bif->bif_ifflags & IFBIF_STP) != 0) {
bstp_disable(&bif->bif_stp);
+ }
}
/* Pass on STP flags */
bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
#else /* !BRIDGESTP */
- if (req->ifbr_ifsflags & IFBIF_STP)
- return (EOPNOTSUPP);
+ if (req->ifbr_ifsflags & IFBIF_STP) {
+ return EOPNOTSUPP;
+ }
#endif /* !BRIDGESTP */
/* Save the bits relating to the bridge */
bif->bif_ifflags = req->ifbr_ifsflags & IFBIFMASK;
- return (0);
+ return 0;
}
static int
struct ifbrparam *param = arg;
sc->sc_brtmax = param->ifbrp_csize;
- if (bridge_in_bsd_mode(sc)) {
- bridge_rttrim(sc);
- }
- return (0);
+ bridge_rttrim(sc);
+ return 0;
}
static int
param->ifbrp_csize = sc->sc_brtmax;
- return (0);
-}
-
-#define BRIDGE_IOCTL_GIFS do { \
- struct bridge_iflist *bif; \
- struct ifbreq breq; \
- char *buf, *outbuf; \
- unsigned int count, buflen, len; \
- \
- count = 0; \
- TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
- count++; \
- if (bridge_in_bsd_mode(sc)) { \
- TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
- count++; \
- } \
- \
- buflen = sizeof (breq) * count; \
- if (bifc->ifbic_len == 0) { \
- bifc->ifbic_len = buflen; \
- return (0); \
- } \
- BRIDGE_UNLOCK(sc); \
- outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
- BRIDGE_LOCK(sc); \
- \
- count = 0; \
- buf = outbuf; \
- len = min(bifc->ifbic_len, buflen); \
- bzero(&breq, sizeof (breq)); \
- TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
- if (len < sizeof (breq)) \
- break; \
- \
- snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
- "%s", bif->bif_ifp->if_xname); \
- /* Fill in the ifbreq structure */ \
- error = bridge_ioctl_gifflags(sc, &breq); \
- if (error) \
- break; \
- memcpy(buf, &breq, sizeof (breq)); \
- count++; \
- buf += sizeof (breq); \
- len -= sizeof (breq); \
- } \
- if (bridge_in_bsd_mode(sc)) { \
- TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
- if (len < sizeof (breq)) \
- break; \
- \
- snprintf(breq.ifbr_ifsname, \
- sizeof (breq.ifbr_ifsname), \
- "%s", bif->bif_ifp->if_xname); \
- breq.ifbr_ifsflags = bif->bif_ifflags; \
- breq.ifbr_portno \
- = bif->bif_ifp->if_index & 0xfff; \
- memcpy(buf, &breq, sizeof (breq)); \
- count++; \
- buf += sizeof (breq); \
- len -= sizeof (breq); \
- } \
- } \
- \
- BRIDGE_UNLOCK(sc); \
- bifc->ifbic_len = sizeof (breq) * count; \
- error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
- BRIDGE_LOCK(sc); \
- _FREE(outbuf, M_TEMP); \
+ return 0;
+}
+
+#define BRIDGE_IOCTL_GIFS do { \
+ struct bridge_iflist *bif; \
+ struct ifbreq breq; \
+ char *buf, *outbuf; \
+ unsigned int count, buflen, len; \
+ \
+ count = 0; \
+ TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \
+ count++; \
+ TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \
+ count++; \
+ \
+ buflen = sizeof (breq) * count; \
+ if (bifc->ifbic_len == 0) { \
+ bifc->ifbic_len = buflen; \
+ return (0); \
+ } \
+ BRIDGE_UNLOCK(sc); \
+ outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
+ BRIDGE_LOCK(sc); \
+ \
+ count = 0; \
+ buf = outbuf; \
+ len = min(bifc->ifbic_len, buflen); \
+ bzero(&breq, sizeof (breq)); \
+ TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
+ if (len < sizeof (breq)) \
+ break; \
+ \
+ snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \
+ "%s", bif->bif_ifp->if_xname); \
+ /* Fill in the ifbreq structure */ \
+ error = bridge_ioctl_gifflags(sc, &breq); \
+ if (error) \
+ break; \
+ memcpy(buf, &breq, sizeof (breq)); \
+ count++; \
+ buf += sizeof (breq); \
+ len -= sizeof (breq); \
+ } \
+ TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \
+ if (len < sizeof (breq)) \
+ break; \
+ \
+ snprintf(breq.ifbr_ifsname, \
+ sizeof (breq.ifbr_ifsname), \
+ "%s", bif->bif_ifp->if_xname); \
+ breq.ifbr_ifsflags = bif->bif_ifflags; \
+ breq.ifbr_portno \
+ = bif->bif_ifp->if_index & 0xfff; \
+ memcpy(buf, &breq, sizeof (breq)); \
+ count++; \
+ buf += sizeof (breq); \
+ len -= sizeof (breq); \
+ } \
+ \
+ BRIDGE_UNLOCK(sc); \
+ bifc->ifbic_len = sizeof (breq) * count; \
+ error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \
+ BRIDGE_LOCK(sc); \
+ _FREE(outbuf, M_TEMP); \
} while (0)
static int
BRIDGE_IOCTL_GIFS;
- return (error);
+ return error;
}
static int
BRIDGE_IOCTL_GIFS;
- return (error);
-}
-
-#define BRIDGE_IOCTL_RTS do { \
- struct bridge_rtnode *brt; \
- char *buf; \
- char *outbuf = NULL; \
- unsigned int count, buflen, len; \
- unsigned long now; \
- \
- if (bac->ifbac_len == 0) \
- return (0); \
- \
- bzero(&bareq, sizeof (bareq)); \
- count = 0; \
- if (!bridge_in_bsd_mode(sc)) { \
- goto out; \
- } \
- LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
- count++; \
- buflen = sizeof (bareq) * count; \
- \
- BRIDGE_UNLOCK(sc); \
- outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
- BRIDGE_LOCK(sc); \
- \
- count = 0; \
- buf = outbuf; \
- len = min(bac->ifbac_len, buflen); \
- LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
- if (len < sizeof (bareq)) \
- goto out; \
- snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
- "%s", brt->brt_ifp->if_xname); \
- memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
- bareq.ifba_vlan = brt->brt_vlan; \
- if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
- now = (unsigned long) net_uptime(); \
- if (now < brt->brt_expire) \
- bareq.ifba_expire = \
- brt->brt_expire - now; \
- } else \
- bareq.ifba_expire = 0; \
- bareq.ifba_flags = brt->brt_flags; \
- \
- memcpy(buf, &bareq, sizeof (bareq)); \
- count++; \
- buf += sizeof (bareq); \
- len -= sizeof (bareq); \
- } \
-out: \
- bac->ifbac_len = sizeof (bareq) * count; \
- if (outbuf != NULL) { \
- BRIDGE_UNLOCK(sc); \
- error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
- _FREE(outbuf, M_TEMP); \
- BRIDGE_LOCK(sc); \
- } \
- return (error); \
+ return error;
+}
+
+#define BRIDGE_IOCTL_RTS do { \
+ struct bridge_rtnode *brt; \
+ char *buf; \
+ char *outbuf = NULL; \
+ unsigned int count, buflen, len; \
+ unsigned long now; \
+ \
+ if (bac->ifbac_len == 0) \
+ return (0); \
+ \
+ bzero(&bareq, sizeof (bareq)); \
+ count = 0; \
+ LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \
+ count++; \
+ buflen = sizeof (bareq) * count; \
+ \
+ BRIDGE_UNLOCK(sc); \
+ outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
+ BRIDGE_LOCK(sc); \
+ \
+ count = 0; \
+ buf = outbuf; \
+ len = min(bac->ifbac_len, buflen); \
+ LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \
+ if (len < sizeof (bareq)) \
+ goto out; \
+ snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \
+ "%s", brt->brt_ifp->if_xname); \
+ memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
+ bareq.ifba_vlan = brt->brt_vlan; \
+ if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \
+ now = (unsigned long) net_uptime(); \
+ if (now < brt->brt_expire) \
+ bareq.ifba_expire = \
+ brt->brt_expire - now; \
+ } else \
+ bareq.ifba_expire = 0; \
+ bareq.ifba_flags = brt->brt_flags; \
+ \
+ memcpy(buf, &bareq, sizeof (bareq)); \
+ count++; \
+ buf += sizeof (bareq); \
+ len -= sizeof (bareq); \
+ } \
+out: \
+ bac->ifbac_len = sizeof (bareq) * count; \
+ if (outbuf != NULL) { \
+ BRIDGE_UNLOCK(sc); \
+ error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \
+ _FREE(outbuf, M_TEMP); \
+ BRIDGE_LOCK(sc); \
+ } \
+ return (error); \
} while (0)
static int
int error = 0;
BRIDGE_IOCTL_RTS;
- return (error);
+ return error;
}
static int
int error = 0;
BRIDGE_IOCTL_RTS;
- return (error);
+ return error;
}
static int
struct bridge_iflist *bif;
int error;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
-
bif = bridge_lookup_member(sc, req->ifba_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
req->ifba_flags);
- return (error);
+ return error;
}
static int
struct bridge_iflist *bif;
int error;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
-
bif = bridge_lookup_member(sc, req->ifba_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
req->ifba_flags);
- return (error);
+ return error;
}
static int
struct ifbrparam *param = arg;
sc->sc_brttimeout = param->ifbrp_ctime;
- return (0);
+ return 0;
}
static int
struct ifbrparam *param = arg;
param->ifbrp_ctime = sc->sc_brttimeout;
- return (0);
+ return 0;
}
static int
{
struct ifbareq32 *req = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
- return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
+ return bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan);
}
static int
{
struct ifbareq64 *req = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
- return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
+ return bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan);
}
static int
{
struct ifbreq *req = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
bridge_rtflush(sc, req->ifbr_ifsflags);
- return (0);
+ return 0;
}
static int
struct ifbrparam *param = arg;
struct bstp_state *bs = &sc->sc_stp;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
param->ifbrp_prio = bs->bs_bridge_priority;
- return (0);
+ return 0;
}
static int
#if BRIDGESTP
struct ifbrparam *param = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
- return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
+ return bstp_set_priority(&sc->sc_stp, param->ifbrp_prio);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
struct ifbrparam *param = arg;
struct bstp_state *bs = &sc->sc_stp;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
- return (0);
+ return 0;
}
static int
#if BRIDGESTP
struct ifbrparam *param = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
- return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
+ return bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
struct ifbrparam *param;
struct bstp_state *bs;
- if (!bridge_in_bsd_mode(sc)) {
- return (0);
- }
param = arg;
bs = &sc->sc_stp;
param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
- return (0);
+ return 0;
}
static int
#if BRIDGESTP
struct ifbrparam *param = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
- return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
+ return bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
struct ifbrparam *param;
struct bstp_state *bs;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
param = arg;
bs = &sc->sc_stp;
param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
- return (0);
+ return 0;
}
static int
#if BRIDGESTP
struct ifbrparam *param = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
- return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
+ return bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
struct ifbreq *req = arg;
struct bridge_iflist *bif;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
- return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
+ return bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
struct ifbreq *req = arg;
struct bridge_iflist *bif;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
- return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
+ return bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
param->ifbrp_filter = sc->sc_filter_flags;
- return (0);
+ return 0;
}
static int
{
struct ifbrparam *param = arg;
- if (param->ifbrp_filter & ~IFBF_FILT_MASK)
- return (EINVAL);
+ if (param->ifbrp_filter & ~IFBF_FILT_MASK) {
+ return EINVAL;
+ }
-#ifndef BRIDGE_IPF
- if (param->ifbrp_filter & IFBF_FILT_USEIPF)
- return (EINVAL);
-#endif
+ if (param->ifbrp_filter & IFBF_FILT_USEIPF) {
+ return EINVAL;
+ }
sc->sc_filter_flags = param->ifbrp_filter;
- return (0);
+ return 0;
}
static int
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
bif->bif_addrmax = req->ifbr_addrmax;
- return (0);
+ return 0;
}
static int
struct bridge_iflist *bif = NULL;
struct ifnet *ifs;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
ifs = ifunit(req->ifbr_ifsname);
- if (ifs == NULL)
- return (ENOENT);
+ if (ifs == NULL) {
+ return ENOENT;
+ }
if (IFNET_IS_INTCOPROC(ifs)) {
- return (EINVAL);
+ return EINVAL;
}
TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
- if (ifs == bif->bif_ifp)
- return (EBUSY);
+ if (ifs == bif->bif_ifp) {
+ return EBUSY;
+ }
- if (ifs->if_bridge != NULL)
- return (EBUSY);
+ if (ifs->if_bridge != NULL) {
+ return EBUSY;
+ }
switch (ifs->if_type) {
- case IFT_ETHER:
- case IFT_L2VLAN:
- break;
- case IFT_GIF:
- /* currently not supported */
- /* FALLTHRU */
- default:
- return (EINVAL);
+ case IFT_ETHER:
+ case IFT_L2VLAN:
+ break;
+ case IFT_GIF:
+ /* currently not supported */
+ /* FALLTHRU */
+ default:
+ return EINVAL;
}
- bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_WAITOK | M_ZERO);
- if (bif == NULL)
- return (ENOMEM);
+ bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
+ if (bif == NULL) {
+ return ENOMEM;
+ }
bif->bif_ifp = ifs;
bif->bif_ifflags = IFBIF_SPAN;
TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
- return (0);
+ return 0;
}
static int
struct bridge_iflist *bif;
struct ifnet *ifs;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
ifs = ifunit(req->ifbr_ifsname);
- if (ifs == NULL)
- return (ENOENT);
+ if (ifs == NULL) {
+ return ENOENT;
+ }
TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
- if (ifs == bif->bif_ifp)
- break;
+ if (ifs == bif->bif_ifp) {
+ break;
+ }
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
bridge_delete_span(sc, bif);
- return (0);
-}
-
-#define BRIDGE_IOCTL_GBPARAM do { \
- struct bstp_state *bs = &sc->sc_stp; \
- struct bstp_port *root_port; \
- \
- req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
- req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
- req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
- \
- root_port = bs->bs_root_port; \
- if (root_port == NULL) \
- req->ifbop_root_port = 0; \
- else \
- req->ifbop_root_port = root_port->bp_ifp->if_index; \
- \
- req->ifbop_holdcount = bs->bs_txholdcount; \
- req->ifbop_priority = bs->bs_bridge_priority; \
- req->ifbop_protocol = bs->bs_protover; \
- req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
- req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
- req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
- req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
- req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
- req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
+ return 0;
+}
+
+#define BRIDGE_IOCTL_GBPARAM do { \
+ struct bstp_state *bs = &sc->sc_stp; \
+ struct bstp_port *root_port; \
+ \
+ req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \
+ req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \
+ req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \
+ \
+ root_port = bs->bs_root_port; \
+ if (root_port == NULL) \
+ req->ifbop_root_port = 0; \
+ else \
+ req->ifbop_root_port = root_port->bp_ifp->if_index; \
+ \
+ req->ifbop_holdcount = bs->bs_txholdcount; \
+ req->ifbop_priority = bs->bs_bridge_priority; \
+ req->ifbop_protocol = bs->bs_protover; \
+ req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \
+ req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \
+ req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \
+ req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \
+ req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \
+ req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \
} while (0)
static int
{
struct ifbropreq32 *req = arg;
- if (bridge_in_bsd_mode(sc)) {
- BRIDGE_IOCTL_GBPARAM;
- }
- return (0);
+ BRIDGE_IOCTL_GBPARAM;
+ return 0;
}
static int
{
struct ifbropreq64 *req = arg;
- if (bridge_in_bsd_mode(sc)) {
- BRIDGE_IOCTL_GBPARAM;
- }
- return (0);
+ BRIDGE_IOCTL_GBPARAM;
+ return 0;
}
static int
struct ifbrparam *param = arg;
param->ifbrp_cexceeded = sc->sc_brtexceeded;
- return (0);
-}
-
-#define BRIDGE_IOCTL_GIFSSTP do { \
- struct bridge_iflist *bif; \
- struct bstp_port *bp; \
- struct ifbpstpreq bpreq; \
- char *buf, *outbuf; \
- unsigned int count, buflen, len; \
- \
- count = 0; \
- TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
- if ((bif->bif_ifflags & IFBIF_STP) != 0) \
- count++; \
- } \
- \
- buflen = sizeof (bpreq) * count; \
- if (bifstp->ifbpstp_len == 0) { \
- bifstp->ifbpstp_len = buflen; \
- return (0); \
- } \
- \
- BRIDGE_UNLOCK(sc); \
- outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
- BRIDGE_LOCK(sc); \
- \
- count = 0; \
- buf = outbuf; \
- len = min(bifstp->ifbpstp_len, buflen); \
- bzero(&bpreq, sizeof (bpreq)); \
- TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
- if (len < sizeof (bpreq)) \
- break; \
- \
- if ((bif->bif_ifflags & IFBIF_STP) == 0) \
- continue; \
- \
- bp = &bif->bif_stp; \
- bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
- bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
- bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
- bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
- bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
- bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
- \
- memcpy(buf, &bpreq, sizeof (bpreq)); \
- count++; \
- buf += sizeof (bpreq); \
- len -= sizeof (bpreq); \
- } \
- \
- BRIDGE_UNLOCK(sc); \
- bifstp->ifbpstp_len = sizeof (bpreq) * count; \
+ return 0;
+}
+
+#define BRIDGE_IOCTL_GIFSSTP do { \
+ struct bridge_iflist *bif; \
+ struct bstp_port *bp; \
+ struct ifbpstpreq bpreq; \
+ char *buf, *outbuf; \
+ unsigned int count, buflen, len; \
+ \
+ count = 0; \
+ TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
+ if ((bif->bif_ifflags & IFBIF_STP) != 0) \
+ count++; \
+ } \
+ \
+ buflen = sizeof (bpreq) * count; \
+ if (bifstp->ifbpstp_len == 0) { \
+ bifstp->ifbpstp_len = buflen; \
+ return (0); \
+ } \
+ \
+ BRIDGE_UNLOCK(sc); \
+ outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \
+ BRIDGE_LOCK(sc); \
+ \
+ count = 0; \
+ buf = outbuf; \
+ len = min(bifstp->ifbpstp_len, buflen); \
+ bzero(&bpreq, sizeof (bpreq)); \
+ TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \
+ if (len < sizeof (bpreq)) \
+ break; \
+ \
+ if ((bif->bif_ifflags & IFBIF_STP) == 0) \
+ continue; \
+ \
+ bp = &bif->bif_stp; \
+ bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \
+ bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \
+ bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \
+ bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \
+ bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
+ bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \
+ \
+ memcpy(buf, &bpreq, sizeof (bpreq)); \
+ count++; \
+ buf += sizeof (bpreq); \
+ len -= sizeof (bpreq); \
+ } \
+ \
+ BRIDGE_UNLOCK(sc); \
+ bifstp->ifbpstp_len = sizeof (bpreq) * count; \
error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
- BRIDGE_LOCK(sc); \
- _FREE(outbuf, M_TEMP); \
- return (error); \
+ BRIDGE_LOCK(sc); \
+ _FREE(outbuf, M_TEMP); \
+ return (error); \
} while (0)
static int
struct ifbpstpconf32 *bifstp = arg;
int error = 0;
- if (bridge_in_bsd_mode(sc)) {
- BRIDGE_IOCTL_GIFSSTP;
- }
- return (error);
+ BRIDGE_IOCTL_GIFSSTP;
+ return error;
}
static int
struct ifbpstpconf64 *bifstp = arg;
int error = 0;
- if (bridge_in_bsd_mode(sc)) {
- BRIDGE_IOCTL_GIFSSTP;
- }
- return (error);
+ BRIDGE_IOCTL_GIFSSTP;
+ return error;
}
static int
#if BRIDGESTP
struct ifbrparam *param = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
- return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
+ return bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
#if BRIDGESTP
struct ifbrparam *param = arg;
- if (!bridge_in_bsd_mode(sc)) {
- return (EOPNOTSUPP);
- }
- return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
+ return bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc);
#else /* !BRIDGESTP */
#pragma unused(sc, arg)
- return (EOPNOTSUPP);
+ return EOPNOTSUPP;
#endif /* !BRIDGESTP */
}
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
bzero(req, sizeof(struct ifbrhostfilter));
if (bif->bif_flags & BIFF_HOST_FILTER) {
ETHER_ADDR_LEN);
req->ifbrhf_ipsrc = bif->bif_hf_ipsrc.s_addr;
}
- return (0);
+ return 0;
}
static int
struct bridge_iflist *bif;
bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
- if (bif == NULL)
- return (ENOENT);
+ if (bif == NULL) {
+ return ENOENT;
+ }
INC_ATOMIC_INT64_LIM(net_api_stats.nas_vmnet_total);
bcopy(req->ifbrhf_hwsrca, bif->bif_hf_hwsrc,
ETHER_ADDR_LEN);
if (bcmp(req->ifbrhf_hwsrca, ethernulladdr,
- ETHER_ADDR_LEN) != 0)
+ ETHER_ADDR_LEN) != 0) {
bif->bif_flags |= BIFF_HF_HWSRC;
- else
+ } else {
bif->bif_flags &= ~BIFF_HF_HWSRC;
+ }
}
if (req->ifbrhf_flags & IFBRHF_IPSRC) {
bif->bif_hf_ipsrc.s_addr = req->ifbrhf_ipsrc;
- if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY)
+ if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY) {
bif->bif_flags |= BIFF_HF_IPSRC;
- else
+ } else {
bif->bif_flags &= ~BIFF_HF_IPSRC;
+ }
}
} else {
bif->bif_flags &= ~(BIFF_HOST_FILTER | BIFF_HF_HWSRC |
bif->bif_hf_ipsrc.s_addr = INADDR_ANY;
}
- return (0);
+ return 0;
+}
+
+static char *
+bridge_mac_nat_entry_out(struct mac_nat_entry_list * list,
+ unsigned int * count_p, char *buf, unsigned int *len_p)
+{
+ unsigned int count = *count_p;
+ struct ifbrmne ifbmne;
+ unsigned int len = *len_p;
+ struct mac_nat_entry *mne;
+ unsigned long now;
+
+ bzero(&ifbmne, sizeof(ifbmne));
+ LIST_FOREACH(mne, list, mne_list) {
+ if (len < sizeof(ifbmne)) {
+ break;
+ }
+ snprintf(ifbmne.ifbmne_ifname, sizeof(ifbmne.ifbmne_ifname),
+ "%s", mne->mne_bif->bif_ifp->if_xname);
+ memcpy(ifbmne.ifbmne_mac, mne->mne_mac,
+ sizeof(ifbmne.ifbmne_mac));
+ now = (unsigned long) net_uptime();
+ if (now < mne->mne_expire) {
+ ifbmne.ifbmne_expire = mne->mne_expire - now;
+ } else {
+ ifbmne.ifbmne_expire = 0;
+ }
+ if ((mne->mne_flags & MNE_FLAGS_IPV6) != 0) {
+ ifbmne.ifbmne_af = AF_INET6;
+ ifbmne.ifbmne_ip6_addr = mne->mne_ip6;
+ } else {
+ ifbmne.ifbmne_af = AF_INET;
+ ifbmne.ifbmne_ip_addr = mne->mne_ip;
+ }
+ memcpy(buf, &ifbmne, sizeof(ifbmne));
+ count++;
+ buf += sizeof(ifbmne);
+ len -= sizeof(ifbmne);
+ }
+ *count_p = count;
+ *len_p = len;
+ return buf;
+}
+
+/*
+ * bridge_ioctl_gmnelist()
+ * Perform the get mac_nat_entry list ioctl.
+ *
+ * Note:
+ * The struct ifbrmnelist32 and struct ifbrmnelist64 have the same
+ * field size/layout except for the last field ifbml_buf, the user-supplied
+ * buffer pointer. That is passed in separately via the 'user_addr'
+ * parameter from the respective 32-bit or 64-bit ioctl routine.
+ */
+static int
+bridge_ioctl_gmnelist(struct bridge_softc *sc, struct ifbrmnelist32 *mnl,
+ user_addr_t user_addr)
+{
+ unsigned int count;
+ char *buf;
+ int error = 0;
+ char *outbuf = NULL;
+ struct mac_nat_entry *mne;
+ unsigned int buflen;
+ unsigned int len;
+
+ mnl->ifbml_elsize = sizeof(struct ifbrmne);
+ count = 0;
+ LIST_FOREACH(mne, &sc->sc_mne_list, mne_list)
+ count++;
+ LIST_FOREACH(mne, &sc->sc_mne_list_v6, mne_list)
+ count++;
+ buflen = sizeof(struct ifbrmne) * count;
+ if (buflen == 0 || mnl->ifbml_len == 0) {
+ mnl->ifbml_len = buflen;
+ return error;
+ }
+ BRIDGE_UNLOCK(sc);
+ outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);
+ BRIDGE_LOCK(sc);
+ count = 0;
+ buf = outbuf;
+ len = min(mnl->ifbml_len, buflen);
+ buf = bridge_mac_nat_entry_out(&sc->sc_mne_list, &count, buf, &len);
+ buf = bridge_mac_nat_entry_out(&sc->sc_mne_list_v6, &count, buf, &len);
+ mnl->ifbml_len = count * sizeof(struct ifbrmne);
+ BRIDGE_UNLOCK(sc);
+ error = copyout(outbuf, user_addr, mnl->ifbml_len);
+ _FREE(outbuf, M_TEMP);
+ BRIDGE_LOCK(sc);
+ return error;
+}
+
+static int
+bridge_ioctl_gmnelist64(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrmnelist64 *mnl = arg;
+
+ return bridge_ioctl_gmnelist(sc, arg, mnl->ifbml_buf);
}
+static int
+bridge_ioctl_gmnelist32(struct bridge_softc *sc, void *arg)
+{
+ struct ifbrmnelist32 *mnl = arg;
+
+ return bridge_ioctl_gmnelist(sc, arg,
+ CAST_USER_ADDR_T(mnl->ifbml_buf));
+}
/*
* bridge_ifdetach:
* Detach an interface from a bridge. Called when a member
* interface is detaching.
*/
-__private_extern__ void
-bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
+static void
+bridge_ifdetach(struct ifnet *ifp)
{
+ struct bridge_iflist *bif;
struct bridge_softc *sc = ifp->if_bridge;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: %s\n", __func__, ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
/* Check if the interface is a bridge member */
if (sc != NULL) {
BRIDGE_LOCK(sc);
bif = bridge_lookup_member_if(sc, ifp);
- if (bif != NULL)
+ if (bif != NULL) {
bridge_delete_member(sc, bif, 1);
+ }
BRIDGE_UNLOCK(sc);
return;
}
/* Check if the interface is a span port */
lck_mtx_lock(&bridge_list_mtx);
LIST_FOREACH(sc, &bridge_list, sc_list) {
- if (bridge_in_bsd_mode(sc)) {
- BRIDGE_LOCK(sc);
- TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
- if (ifp == bif->bif_ifp) {
- bridge_delete_span(sc, bif);
- break;
- }
- BRIDGE_UNLOCK(sc);
+ BRIDGE_LOCK(sc);
+ TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
+ if (ifp == bif->bif_ifp) {
+ bridge_delete_span(sc, bif);
+ break;
}
+ BRIDGE_UNLOCK(sc);
}
lck_mtx_unlock(&bridge_list_mtx);
}
+/*
+ * bridge_proto_attach_changed
+ *
+ * Called when protocol attachment on the interface changes.
+ */
+static void
+bridge_proto_attach_changed(struct ifnet *ifp)
+{
+ boolean_t changed = FALSE;
+ struct bridge_iflist *bif;
+ boolean_t input_broadcast;
+ struct bridge_softc *sc = ifp->if_bridge;
+
+#if BRIDGE_DEBUG
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
+ printf("%s: %s\n", __func__, ifp->if_xname);
+ }
+#endif /* BRIDGE_DEBUG */
+ if (sc == NULL) {
+ return;
+ }
+ /*
+ * Selectively enable input broadcast only when necessary.
+ * The bridge interface itself attaches a fake protocol
+ * so checking for at least two protocols means that the
+ * interface is being used for something besides bridging.
+ */
+ input_broadcast = if_get_protolist(ifp, NULL, 0) >= 2;
+ BRIDGE_LOCK(sc);
+ bif = bridge_lookup_member_if(sc, ifp);
+ if (bif != NULL) {
+ if (input_broadcast) {
+ if ((bif->bif_flags & BIFF_INPUT_BROADCAST) == 0) {
+ bif->bif_flags |= BIFF_INPUT_BROADCAST;
+ changed = TRUE;
+ }
+ } else if ((bif->bif_flags & BIFF_INPUT_BROADCAST) != 0) {
+ changed = TRUE;
+ bif->bif_flags &= ~BIFF_INPUT_BROADCAST;
+ }
+ }
+ BRIDGE_UNLOCK(sc);
+#if BRIDGE_DEBUG
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
+ printf("%s: input broadcast %s", ifp->if_xname,
+ input_broadcast ? "ENABLED" : "DISABLED");
+ }
+#endif /* BRIDGE_DEBUG */
+ return;
+}
+
/*
* interface_media_active:
*
bzero(&ifmr, sizeof(ifmr));
if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) {
- if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0)
+ if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0) {
status = ifmr.ifm_status & IFM_ACTIVE ? 1 : 0;
+ }
}
- return (status);
+ return status;
}
/*
* bridge_updatelinkstatus:
*
- * Update the media active status of the bridge based on the
+ * Update the media active status of the bridge based on the
* media active status of its member.
* If changed, return the corresponding onf/off link event.
*/
event_code = KEV_DL_LINK_OFF;
}
- return (event_code);
+ return event_code;
}
/*
struct bridge_softc *sc = ifp->if_bridge;
struct bridge_iflist *bif;
u_int32_t event_code = 0;
+ int media_active;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
printf("%s: %s\n", __func__, ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
/* Check if the interface is a bridge member */
- if (sc == NULL)
+ if (sc == NULL) {
return;
+ }
+ media_active = interface_media_active(ifp);
BRIDGE_LOCK(sc);
bif = bridge_lookup_member_if(sc, ifp);
if (bif != NULL) {
- if (interface_media_active(ifp))
+ if (media_active) {
bif->bif_flags |= BIFF_MEDIA_ACTIVE;
- else
+ } else {
bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
+ }
+ if (sc->sc_mac_nat_bif != NULL) {
+ bridge_mac_nat_flush_entries(sc, bif);
+ }
- event_code = bridge_updatelinkstatus(sc);
+ event_code = bridge_updatelinkstatus(sc);
}
BRIDGE_UNLOCK(sc);
- if (event_code != 0)
+ if (event_code != 0) {
bridge_link_event(sc->sc_ifp, event_code);
+ }
}
/*
BRIDGE_LOCK(sc);
#if BRIDGE_DEBUG_DELAYED_CALLBACK
- if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_DELAYED_CALL)) {
printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
call->bdc_flags);
+ }
#endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
if (call->bdc_flags & BDCF_CANCELLING) {
wakeup(call);
} else {
- if ((sc->sc_flags & SCF_DETACHING) == 0)
+ if ((sc->sc_flags & SCF_DETACHING) == 0) {
(*call->bdc_func)(sc);
}
+ }
call->bdc_flags &= ~BDCF_OUTSTANDING;
BRIDGE_UNLOCK(sc);
}
* bridge_schedule_delayed_call:
*
* Schedule a function to be called on a separate thread
- * The actual call may be scheduled to run at a given time or ASAP.
+ * The actual call may be scheduled to run at a given time or ASAP.
*/
static void
bridge_schedule_delayed_call(struct bridge_delayed_call *call)
BRIDGE_LOCK_ASSERT_HELD(sc);
if ((sc->sc_flags & SCF_DETACHING) ||
- (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING)))
+ (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING))) {
return;
+ }
if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) {
nanoseconds_to_absolutetime(
- (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC +
- call->bdc_ts.tv_nsec, &deadline);
+ (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC +
+ call->bdc_ts.tv_nsec, &deadline);
clock_absolutetime_interval_to_deadline(deadline, &deadline);
}
call->bdc_flags = BDCF_OUTSTANDING;
#if BRIDGE_DEBUG_DELAYED_CALLBACK
- if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_DELAYED_CALL)) {
printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
call->bdc_flags);
+ }
#endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
- if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec)
+ if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) {
thread_call_func_delayed(
(thread_call_func_t)bridge_delayed_callback,
call, deadline);
- else {
- if (call->bdc_thread_call == NULL)
+ } else {
+ if (call->bdc_thread_call == NULL) {
call->bdc_thread_call = thread_call_allocate(
(thread_call_func_t)bridge_delayed_callback,
call);
+ }
thread_call_enter(call->bdc_thread_call);
}
}
/*
* The call was never scheduled
*/
- if (sc == NULL)
+ if (sc == NULL) {
return;
+ }
BRIDGE_LOCK_ASSERT_HELD(sc);
while (call->bdc_flags & BDCF_OUTSTANDING) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_DELAYED_CALL)) {
printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
call->bdc_flags);
+ }
#endif /* BRIDGE_DEBUG */
result = thread_call_func_cancel(
- (thread_call_func_t)bridge_delayed_callback, call, FALSE);
+ (thread_call_func_t)bridge_delayed_callback, call, FALSE);
if (result) {
/*
/*
* The call was never scheduled
*/
- if (sc == NULL)
+ if (sc == NULL) {
return;
+ }
BRIDGE_LOCK_ASSERT_HELD(sc);
if (call->bdc_thread_call != NULL) {
result = thread_call_free(call->bdc_thread_call);
- if (result == FALSE)
+ if (result == FALSE) {
panic("%s thread_call_free() failed for call %p",
- __func__, call);
+ __func__, call);
+ }
call->bdc_thread_call = NULL;
}
}
BRIDGE_LOCK_ASSERT_HELD(sc);
- if ((ifnet_flags(ifp) & IFF_RUNNING))
- return (0);
+ if ((ifnet_flags(ifp) & IFF_RUNNING)) {
+ return 0;
+ }
error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
- if (bridge_in_bsd_mode(sc)) {
- /*
- * Calling bridge_aging_timer() is OK as there are no entries to
- * age so we're just going to arm the timer
- */
- bridge_aging_timer(sc);
+ /*
+ * Calling bridge_aging_timer() is OK as there are no entries to
+ * age so we're just going to arm the timer
+ */
+ bridge_aging_timer(sc);
#if BRIDGESTP
- if (error == 0)
- bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
-#endif /* BRIDGESTP */
+ if (error == 0) {
+ bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
}
- return (error);
+#endif /* BRIDGESTP */
+ return error;
}
/*
BRIDGE_LOCK_ASSERT_HELD(sc);
- if ((ifnet_flags(ifp) & IFF_RUNNING) == 0)
+ if ((ifnet_flags(ifp) & IFF_RUNNING) == 0) {
return;
+ }
- if (bridge_in_bsd_mode(sc)) {
- bridge_cancel_delayed_call(&sc->sc_aging_timer);
+ bridge_cancel_delayed_call(&sc->sc_aging_timer);
#if BRIDGESTP
- bstp_stop(&sc->sc_stp);
+ bstp_stop(&sc->sc_stp);
#endif /* BRIDGESTP */
- bridge_rtflush(sc, IFBF_FLUSHDYN);
- }
+ bridge_rtflush(sc, IFBF_FLUSHDYN);
(void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
}
/*
- * bridge_enqueue:
- *
- * Enqueue a packet on a bridge member interface.
+ * bridge_compute_cksum:
*
+ * If the packet has checksum flags, compare the hardware checksum
+ * capabilities of the source and destination interfaces. If they
+ * are the same, there's nothing to do. If they are different,
+ * finalize the checksum so that it can be sent on the destination
+ * interface.
*/
-static int
-bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
+static void
+bridge_compute_cksum(struct ifnet *src_if, struct ifnet *dst_if, struct mbuf *m)
{
- int len, error = 0;
- short mflags;
- struct mbuf *m0;
+ uint32_t csum_flags;
+ uint16_t dst_hw_csum;
+ uint32_t did_sw;
+ struct ether_header *eh;
+ uint16_t src_hw_csum;
- VERIFY(dst_ifp != NULL);
+ csum_flags = m->m_pkthdr.csum_flags & IF_HWASSIST_CSUM_MASK;
+ if (csum_flags == 0) {
+ /* no checksum offload */
+ return;
+ }
/*
- * We may be sending a fragment so traverse the mbuf
- *
- * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
+ * if destination/source differ in checksum offload
+ * capabilities, finalize/compute the checksum
*/
- for (; m; m = m0) {
- errno_t _error;
- struct flowadv adv = { FADV_SUCCESS };
+ dst_hw_csum = IF_HWASSIST_CSUM_FLAGS(dst_if->if_hwassist);
+ src_hw_csum = IF_HWASSIST_CSUM_FLAGS(src_if->if_hwassist);
+ if (dst_hw_csum == src_hw_csum) {
+ return;
+ }
+ eh = mtod(m, struct ether_header *);
+ switch (ntohs(eh->ether_type)) {
+ case ETHERTYPE_IP:
+ did_sw = in_finalize_cksum(m, sizeof(*eh), csum_flags);
+ break;
+ case ETHERTYPE_IPV6:
+ did_sw = in6_finalize_cksum(m, sizeof(*eh), -1, -1, csum_flags);
+ break;
+ }
+#if BRIDGE_DEBUG
+ if (IF_BRIDGE_DEBUG(BR_DBGF_CHECKSUM)) {
+ printf("%s: [%s -> %s] before 0x%x did 0x%x after 0x%x\n",
+ __func__,
+ src_if->if_xname, dst_if->if_xname, csum_flags, did_sw,
+ m->m_pkthdr.csum_flags);
+ }
+#endif /* BRIDGE_DEBUG */
+}
- m0 = m->m_nextpkt;
- m->m_nextpkt = NULL;
+static int
+bridge_transmit(struct ifnet * ifp, struct mbuf *m)
+{
+ struct flowadv adv = { .code = FADV_SUCCESS };
+ errno_t error;
- len = m->m_pkthdr.len;
- mflags = m->m_flags;
- m->m_flags |= M_PROTO1; /* set to avoid loops */
+ error = dlil_output(ifp, 0, m, NULL, NULL, 1, &adv);
+ if (error == 0) {
+ if (adv.code == FADV_FLOW_CONTROLLED) {
+ error = EQFULL;
+ } else if (adv.code == FADV_SUSPENDED) {
+ error = EQSUSPENDED;
+ }
+ }
+ return error;
+}
+static int
+bridge_send(struct ifnet *src_ifp,
+ struct ifnet *dst_ifp, struct mbuf *m, ChecksumOperation cksum_op)
+{
+ switch (cksum_op) {
+ case kChecksumOperationClear:
+ m->m_pkthdr.csum_flags = 0;
+ break;
+ case kChecksumOperationFinalize:
+ /* the checksum might not be correct, finalize now */
bridge_finalize_cksum(dst_ifp, m);
-
+ break;
+ case kChecksumOperationCompute:
+ bridge_compute_cksum(src_ifp, dst_ifp, m);
+ break;
+ default:
+ break;
+ }
#if HAS_IF_CAP
- /*
- * If underlying interface can not do VLAN tag insertion itself
- * then attach a packet tag that holds it.
- */
- if ((m->m_flags & M_VLANTAG) &&
- (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
- m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
- if (m == NULL) {
- printf("%s: %s: unable to prepend VLAN "
- "header\n", __func__, dst_ifp->if_xname);
- (void) ifnet_stat_increment_out(dst_ifp,
- 0, 0, 1);
- continue;
- }
- m->m_flags &= ~M_VLANTAG;
+ /*
+ * If underlying interface can not do VLAN tag insertion itself
+ * then attach a packet tag that holds it.
+ */
+ if ((m->m_flags & M_VLANTAG) &&
+ (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
+ m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
+ if (m == NULL) {
+ printf("%s: %s: unable to prepend VLAN "
+ "header\n", __func__, dst_ifp->if_xname);
+ (void) ifnet_stat_increment_out(dst_ifp,
+ 0, 0, 1);
+ return 0;
}
+ m->m_flags &= ~M_VLANTAG;
+ }
#endif /* HAS_IF_CAP */
+ return bridge_transmit(dst_ifp, m);
+}
- _error = dlil_output(dst_ifp, 0, m, NULL, NULL, 1, &adv);
-
- /* Preserve existing error value */
- if (error == 0) {
- if (_error != 0)
- error = _error;
- else if (adv.code == FADV_FLOW_CONTROLLED)
- error = EQFULL;
- else if (adv.code == FADV_SUSPENDED)
- error = EQSUSPENDED;
- }
+static int
+bridge_send_tso(struct ifnet *dst_ifp, struct mbuf *m)
+{
+ struct ether_header *eh;
+ uint16_t ether_type;
+ errno_t error;
+ boolean_t is_ipv4;
+ u_int mac_hlen;
+ eh = mtod(m, struct ether_header *);
+ ether_type = ntohs(eh->ether_type);
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ is_ipv4 = TRUE;
+ break;
+ case ETHERTYPE_IPV6:
+ is_ipv4 = FALSE;
+ break;
+ default:
+ printf("%s: large non IPv4/IPv6 packet\n", __func__);
+ m_freem(m);
+ error = EINVAL;
+ goto done;
+ }
+ mac_hlen = sizeof(*eh);
+
+#if HAS_IF_CAP
+ /*
+ * If underlying interface can not do VLAN tag insertion itself
+ * then attach a packet tag that holds it.
+ */
+ if ((m->m_flags & M_VLANTAG) &&
+ (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
+ m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
+ if (m == NULL) {
+ printf("%s: %s: unable to prepend VLAN "
+ "header\n", __func__, dst_ifp->if_xname);
+ (void) ifnet_stat_increment_out(dst_ifp,
+ 0, 0, 1);
+ error = ENOBUFS;
+ goto done;
+ }
+ m->m_flags &= ~M_VLANTAG;
+ mac_hlen += ETHER_VLAN_ENCAP_LEN;
+ }
+#endif /* HAS_IF_CAP */
+ if (is_ipv4) {
+ error = gso_ipv4_tcp(dst_ifp, &m, mac_hlen, TRUE);
+ } else {
+ error = gso_ipv6_tcp(dst_ifp, &m, mac_hlen, TRUE);
+ }
+
+done:
+ return error;
+}
+
+/*
+ * bridge_enqueue:
+ *
+ * Enqueue a packet on a bridge member interface.
+ *
+ */
+static int
+bridge_enqueue(ifnet_t bridge_ifp, struct ifnet *src_ifp,
+ struct ifnet *dst_ifp, struct mbuf *m, ChecksumOperation cksum_op)
+{
+ errno_t error = 0;
+ int len;
+
+ VERIFY(dst_ifp != NULL);
+
+ /*
+ * We may be sending a fragment so traverse the mbuf
+ *
+ * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
+ */
+ for (struct mbuf *next_m = NULL; m != NULL; m = next_m) {
+ errno_t _error;
+
+ len = m->m_pkthdr.len;
+ m->m_flags |= M_PROTO1; /* set to avoid loops */
+ next_m = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ /*
+ * need to segment the packet if it is a large frame
+ * and the destination interface does not support TSO
+ */
+ if (if_bridge_segmentation != 0 &&
+ len > (bridge_ifp->if_mtu + ETHER_HDR_LEN) &&
+ (dst_ifp->if_capabilities & IFCAP_TSO) != IFCAP_TSO) {
+ _error = bridge_send_tso(dst_ifp, m);
+ } else {
+ _error = bridge_send(src_ifp, dst_ifp, m, cksum_op);
+ }
+ /* Preserve first error value */
+ if (error == 0 && _error != 0) {
+ error = _error;
+ }
if (_error == 0) {
- (void) ifnet_stat_increment_out(sc->sc_ifp, 1, len, 0);
+ (void) ifnet_stat_increment_out(bridge_ifp, 1, len, 0);
} else {
- (void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
+ (void) ifnet_stat_increment_out(bridge_ifp, 0, 0, 1);
}
}
- return (error);
+ return error;
}
#if HAS_BRIDGE_DUMMYNET
sc = ifp->if_bridge;
/*
- * The packet didnt originate from a member interface. This should only
+ * The packet didn't originate from a member interface. This should only
* ever happen if a member interface is removed while packets are
* queued for it.
*/
}
if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
- if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
+ if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) {
return;
- if (m == NULL)
+ }
+ if (m == NULL) {
return;
+ }
}
-
- (void) bridge_enqueue(sc, ifp, m);
+ (void) bridge_enqueue(sc->sc_ifp, NULL, ifp, m, kChecksumOperationNone);
}
+
#endif /* HAS_BRIDGE_DUMMYNET */
-#if BRIDGE_MEMBER_OUT_FILTER
/*
* bridge_member_output:
*
* performs the bridging function for locally originated
* packets.
*
- * The mbuf has the Ethernet header already attached. We must
- * enqueue or free the mbuf before returning.
+ * The mbuf has the Ethernet header already attached.
*/
-static int
-bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
- struct rtentry *rt)
+static errno_t
+bridge_member_output(struct bridge_softc *sc, ifnet_t ifp, mbuf_t *data)
{
-#pragma unused(sa, rt)
+ ifnet_t bridge_ifp;
struct ether_header *eh;
struct ifnet *dst_if;
- struct bridge_softc *sc;
uint16_t vlan;
+ struct bridge_iflist *mac_nat_bif;
+ ifnet_t mac_nat_ifp;
+ mbuf_t m = *data;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_OUTPUT)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_OUTPUT)) {
printf("%s: ifp %s\n", __func__, ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
if (m->m_len < ETHER_HDR_LEN) {
m = m_pullup(m, ETHER_HDR_LEN);
- if (m == NULL)
- return (0);
+ if (m == NULL) {
+ *data = NULL;
+ return EJUSTRETURN;
+ }
}
eh = mtod(m, struct ether_header *);
- sc = ifp->if_bridge;
vlan = VLANTAGOF(m);
BRIDGE_LOCK(sc);
+ mac_nat_bif = sc->sc_mac_nat_bif;
+ mac_nat_ifp = (mac_nat_bif != NULL) ? mac_nat_bif->bif_ifp : NULL;
+ if (mac_nat_ifp == ifp) {
+ /* record the IP address used by the MAC NAT interface */
+ (void)bridge_mac_nat_output(sc, mac_nat_bif, data, NULL);
+ m = *data;
+ if (m == NULL) {
+ /* packet was deallocated */
+ BRIDGE_UNLOCK(sc);
+ return EJUSTRETURN;
+ }
+ }
+ bridge_ifp = sc->sc_ifp;
/*
* APPLE MODIFICATION
* go ahead and send out that interface. Otherwise, the packet
* is dropped below.
*/
- if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
+ if ((bridge_ifp->if_flags & IFF_RUNNING) == 0) {
dst_if = ifp;
goto sendunicast;
}
* If the packet is a multicast, or we don't know a better way to
* get there, send to all interfaces.
*/
- if (ETHER_IS_MULTICAST(eh->ether_dhost))
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
dst_if = NULL;
- else
+ } else {
dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
+ }
if (dst_if == NULL) {
struct bridge_iflist *bif;
struct mbuf *mc;
- int error = 0, used = 0;
+ int used = 0;
+ errno_t error;
+
bridge_span(sc, m);
BRIDGE_LOCK2REF(sc, error);
- if (error) {
+ if (error != 0) {
m_freem(m);
- return (0);
+ return EJUSTRETURN;
}
TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
+ /* skip interface with inactive link status */
+ if ((bif->bif_flags & BIFF_MEDIA_ACTIVE) == 0) {
+ continue;
+ }
dst_if = bif->bif_ifp;
- if (dst_if->if_type == IFT_GIF)
- continue;
- if ((dst_if->if_flags & IFF_RUNNING) == 0)
+#if 0
+ if (dst_if->if_type == IFT_GIF) {
continue;
-
- /*
- * If this is not the original output interface,
- * and the interface is participating in spanning
- * tree, make sure the port is in a state that
- * allows forwarding.
- */
- if (dst_if != ifp && (bif->bif_ifflags & IFBIF_STP) &&
- bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ }
+#endif
+ if ((dst_if->if_flags & IFF_RUNNING) == 0) {
continue;
-
- if (LIST_NEXT(bif, bif_next) == NULL) {
+ }
+ if (dst_if != ifp) {
+ /*
+ * If this is not the original output interface,
+ * and the interface is participating in spanning
+ * tree, make sure the port is in a state that
+ * allows forwarding.
+ */
+ if ((bif->bif_ifflags & IFBIF_STP) &&
+ bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
+ continue;
+ }
+ /*
+ * If this is not the original output interface,
+ * and the destination is the MAC NAT interface,
+ * drop the packet. The packet can't be sent
+ * if the source MAC is incorrect.
+ */
+ if (dst_if == mac_nat_ifp) {
+ continue;
+ }
+ }
+ if (TAILQ_NEXT(bif, bif_next) == NULL) {
used = 1;
mc = m;
} else {
- mc = m_copypacket(m, M_DONTWAIT);
+ mc = m_dup(m, M_DONTWAIT);
if (mc == NULL) {
(void) ifnet_stat_increment_out(
- sc->sc_ifp, 0, 0, 1);
+ bridge_ifp, 0, 0, 1);
continue;
}
}
-
- (void) bridge_enqueue(sc, dst_if, mc);
+ (void) bridge_enqueue(bridge_ifp, ifp, dst_if,
+ mc, kChecksumOperationCompute);
}
- if (used == 0)
+ if (used == 0) {
m_freem(m);
+ }
BRIDGE_UNREF(sc);
- return (0);
+ return EJUSTRETURN;
}
sendunicast:
if ((dst_if->if_flags & IFF_RUNNING) == 0) {
m_freem(m);
BRIDGE_UNLOCK(sc);
- return (0);
+ return EJUSTRETURN;
}
BRIDGE_UNLOCK(sc);
- (void) bridge_enqueue(sc, dst_if, m);
- return (0);
+ if (dst_if == ifp) {
+ /* just let the packet continue on its way */
+ return 0;
+ }
+ if (dst_if != mac_nat_ifp) {
+ (void) bridge_enqueue(bridge_ifp, ifp, dst_if, m,
+ kChecksumOperationCompute);
+ } else {
+ /*
+ * This is not the original output interface
+ * and the destination is the MAC NAT interface.
+ * Drop the packet because the packet can't be sent
+ * if the source MAC is incorrect.
+ */
+ m_freem(m);
+ }
+ return EJUSTRETURN;
}
-#endif /* BRIDGE_MEMBER_OUT_FILTER */
/*
* Output callback.
{
struct bridge_softc *sc = ifnet_softc(ifp);
struct ether_header *eh;
- struct ifnet *dst_if;
+ struct ifnet *dst_if = NULL;
int error = 0;
eh = mtod(m, struct ether_header *);
- dst_if = NULL;
BRIDGE_LOCK(sc);
- ASSERT(bridge_in_bsd_mode(sc));
- if (!(m->m_flags & (M_BCAST|M_MCAST)))
+ if (!(m->m_flags & (M_BCAST | M_MCAST))) {
dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0);
+ }
(void) ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0);
#if NBPFILTER > 0
- if (sc->sc_bpf_output)
+ if (sc->sc_bpf_output) {
bridge_bpf_output(ifp, m);
+ }
#endif
if (dst_if == NULL) {
/* callee will unlock */
- bridge_broadcast(sc, ifp, m, 0);
+ bridge_broadcast(sc, NULL, m, 0);
} else {
+ ifnet_t bridge_ifp;
+
+ bridge_ifp = sc->sc_ifp;
BRIDGE_UNLOCK(sc);
- error = bridge_enqueue(sc, dst_if, m);
+ error = bridge_enqueue(bridge_ifp, NULL, dst_if, m,
+ kChecksumOperationFinalize);
}
- return (error);
+ return error;
}
static void
struct ether_header *eh = mtod(m, struct ether_header *);
uint32_t sw_csum, hwcap;
- if (ifp != NULL)
+
+ if (ifp != NULL) {
hwcap = (ifp->if_hwassist | CSUM_DATA_VALID);
- else
+ } else {
hwcap = 0;
+ }
/* do in software what the hardware cannot */
sw_csum = m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_FLAGS(hwcap);
(m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
if (m->m_pkthdr.csum_flags & CSUM_TCP) {
uint16_t start =
- sizeof (*eh) + sizeof (struct ip);
+ sizeof(*eh) + sizeof(struct ip);
uint16_t ulpoff =
m->m_pkthdr.csum_data & 0xffff;
m->m_pkthdr.csum_flags |=
m->m_pkthdr.csum_flags);
}
}
- (void) in_finalize_cksum(m, sizeof (*eh), sw_csum);
+ (void) in_finalize_cksum(m, sizeof(*eh), sw_csum);
break;
-#if INET6
case ETHERTYPE_IPV6:
if ((hwcap & CSUM_PARTIAL) &&
!(sw_csum & CSUM_DELAY_IPV6_DATA) &&
(m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) {
if (m->m_pkthdr.csum_flags & CSUM_TCPIPV6) {
uint16_t start =
- sizeof (*eh) + sizeof (struct ip6_hdr);
+ sizeof(*eh) + sizeof(struct ip6_hdr);
uint16_t ulpoff =
m->m_pkthdr.csum_data & 0xffff;
m->m_pkthdr.csum_flags |=
m->m_pkthdr.csum_flags);
}
}
- (void) in6_finalize_cksum(m, sizeof (*eh), -1, -1, sw_csum);
+ (void) in6_finalize_cksum(m, sizeof(*eh), -1, -1, sw_csum);
break;
-#endif /* INET6 */
}
}
struct mbuf *m;
for (;;) {
- if (ifnet_dequeue(ifp, &m) != 0)
+ if (ifnet_dequeue(ifp, &m) != 0) {
break;
+ }
(void) bridge_output(ifp, m);
}
*/
static void
bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
- struct mbuf *m)
+ struct mbuf *m)
{
struct bridge_iflist *dbif;
- struct ifnet *src_if, *dst_if, *ifp;
+ ifnet_t bridge_ifp;
+ struct ifnet *src_if, *dst_if;
struct ether_header *eh;
uint16_t vlan;
uint8_t *dst;
int error;
+ struct mac_nat_record mnr;
+ boolean_t translate_mac = FALSE;
+ uint32_t sc_filter_flags = 0;
BRIDGE_LOCK_ASSERT_HELD(sc);
- ASSERT(bridge_in_bsd_mode(sc));
+ bridge_ifp = sc->sc_ifp;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_OUTPUT)
- printf("%s: %s m 0x%llx\n", __func__, sc->sc_ifp->if_xname,
+ if (IF_BRIDGE_DEBUG(BR_DBGF_OUTPUT)) {
+ printf("%s: %s m 0x%llx\n", __func__, bridge_ifp->if_xname,
(uint64_t)VM_KERNEL_ADDRPERM(m));
+ }
#endif /* BRIDGE_DEBUG */
src_if = m->m_pkthdr.rcvif;
- ifp = sc->sc_ifp;
- (void) ifnet_stat_increment_in(ifp, 1, m->m_pkthdr.len, 0);
+ (void) ifnet_stat_increment_in(bridge_ifp, 1, m->m_pkthdr.len, 0);
vlan = VLANTAGOF(m);
if ((sbif->bif_ifflags & IFBIF_STP) &&
- sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
goto drop;
+ }
eh = mtod(m, struct ether_header *);
dst = eh->ether_dhost;
* If the interface has addresses limits then deny any source
* that is not in the cache.
*/
- if (error && sbif->bif_addrmax)
+ if (error && sbif->bif_addrmax) {
goto drop;
+ }
}
if ((sbif->bif_ifflags & IFBIF_STP) != 0 &&
- sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
+ sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) {
goto drop;
+ }
/*
* At this point, the port either doesn't participate
* If the packet is unicast, destined for someone on
* "this" side of the bridge, drop it.
*/
- if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
+ if ((m->m_flags & (M_BCAST | M_MCAST)) == 0) {
+ /* unicast */
dst_if = bridge_rtlookup(sc, dst, vlan);
- if (src_if == dst_if)
+ if (src_if == dst_if) {
goto drop;
+ }
} else {
+ /* broadcast/multicast */
+
/*
* Check if its a reserved multicast address, any address
* listed in 802.1D section 7.12.6 may not be forwarded by the
*/
if (dst[0] == 0x01 && dst[1] == 0x80 &&
dst[2] == 0xc2 && dst[3] == 0x00 &&
- dst[4] == 0x00 && dst[5] <= 0x0f)
+ dst[4] == 0x00 && dst[5] <= 0x0f) {
goto drop;
+ }
/* ...forward it to all interfaces. */
- atomic_add_64(&ifp->if_imcasts, 1);
+ atomic_add_64(&bridge_ifp->if_imcasts, 1);
dst_if = NULL;
}
#if NBPFILTER > 0
if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) ||
dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
- m->m_pkthdr.rcvif = ifp;
- if (sc->sc_bpf_input)
- bridge_bpf_input(ifp, m);
+ m->m_pkthdr.rcvif = bridge_ifp;
+ BRIDGE_BPF_MTAP_INPUT(sc, m);
}
#endif /* NBPFILTER */
-#if defined(PFIL_HOOKS)
- /* run the packet filter */
- if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
- BRIDGE_UNLOCK(sc);
- if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
- return;
- if (m == NULL)
- return;
- BRIDGE_LOCK(sc);
- }
-#endif /* PFIL_HOOKS */
-
if (dst_if == NULL) {
+ /* bridge_broadcast will unlock */
bridge_broadcast(sc, src_if, m, 1);
return;
}
+ /*
+ * Unicast.
+ */
/*
* At this point, we're dealing with a unicast frame
* going to a different interface.
*/
- if ((dst_if->if_flags & IFF_RUNNING) == 0)
+ if ((dst_if->if_flags & IFF_RUNNING) == 0) {
goto drop;
+ }
dbif = bridge_lookup_member_if(sc, dst_if);
- if (dbif == NULL)
+ if (dbif == NULL) {
/* Not a member of the bridge (anymore?) */
goto drop;
+ }
/* Private segments can not talk to each other */
- if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)
+ if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE) {
goto drop;
+ }
if ((dbif->bif_ifflags & IFBIF_STP) &&
- dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
goto drop;
+ }
#if HAS_DHCPRA_MASK
/* APPLE MODIFICATION <rdar:6985737> */
if ((dst_if->if_extflags & IFEXTF_DHCPRA_MASK) != 0) {
m = ip_xdhcpra_output(dst_if, m);
if (!m) {
- ++sc->sc_sc.sc_ifp.if_xdhcpra;
+ ++bridge_ifp.if_xdhcpra;
+ BRIDGE_UNLOCK(sc);
return;
}
}
#endif /* HAS_DHCPRA_MASK */
- BRIDGE_UNLOCK(sc);
+ if (dbif == sc->sc_mac_nat_bif) {
+ /* determine how to translate the packet */
+ translate_mac
+ = bridge_mac_nat_output(sc, sbif, &m, &mnr);
+ if (m == NULL) {
+ /* packet was deallocated */
+ BRIDGE_UNLOCK(sc);
+ return;
+ }
+ }
-#if defined(PFIL_HOOKS)
- if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
- if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
+ sc_filter_flags = sc->sc_filter_flags;
+ BRIDGE_UNLOCK(sc);
+ if (PF_IS_ENABLED && (sc_filter_flags & IFBF_FILT_MEMBER)) {
+ if (bridge_pf(&m, dst_if, sc_filter_flags, FALSE) != 0) {
return;
- if (m == NULL)
+ }
+ if (m == NULL) {
return;
+ }
}
-#endif /* PFIL_HOOKS */
- (void) bridge_enqueue(sc, dst_if, m);
+ /* if we need to, translate the MAC address */
+ if (translate_mac) {
+ bridge_mac_nat_translate(&m, &mnr, IF_LLADDR(dst_if));
+ }
+ /*
+ * This is an inbound packet where the checksum
+ * (if applicable) is already present/valid. Since
+ * we are just doing layer 2 forwarding (not IP
+ * forwarding), there's no need to validate the checksum.
+ * Clear the checksum offload flags and send it along.
+ */
+ if (m != NULL) {
+ (void) bridge_enqueue(bridge_ifp, NULL, dst_if, m,
+ kChecksumOperationClear);
+ }
return;
drop:
#if BRIDGE_DEBUG
-char *ether_ntop(char *, size_t, const u_char *);
-
-__private_extern__ char *
+static char *
ether_ntop(char *buf, size_t len, const u_char *ap)
{
snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
- return (buf);
+ return buf;
+}
+
+#endif /* BRIDGE_DEBUG */
+
+static void
+inject_input_packet(ifnet_t ifp, mbuf_t m)
+{
+ mbuf_pkthdr_setrcvif(m, ifp);
+ mbuf_pkthdr_setheader(m, mbuf_data(m));
+ mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
+ mbuf_len(m) - ETHER_HDR_LEN);
+ mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN);
+ m->m_flags |= M_PROTO1; /* set to avoid loops */
+ dlil_input_packet_list(ifp, m);
+ return;
+}
+
+static boolean_t
+in_addr_is_ours(struct in_addr ip)
+{
+ struct in_ifaddr *ia;
+ boolean_t ours = FALSE;
+
+ lck_rw_lock_shared(in_ifaddr_rwlock);
+ TAILQ_FOREACH(ia, INADDR_HASH(ip.s_addr), ia_hash) {
+ if (IA_SIN(ia)->sin_addr.s_addr == ip.s_addr) {
+ ours = TRUE;
+ break;
+ }
+ }
+ lck_rw_done(in_ifaddr_rwlock);
+ return ours;
+}
+
+static boolean_t
+in6_addr_is_ours(const struct in6_addr * ip6_p)
+{
+ struct in6_ifaddr *ia6;
+ boolean_t ours = FALSE;
+
+ lck_rw_lock_shared(&in6_ifaddr_rwlock);
+ TAILQ_FOREACH(ia6, IN6ADDR_HASH(ip6_p), ia6_hash) {
+ if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, ip6_p)) {
+ ours = TRUE;
+ break;
+ }
+ }
+ lck_rw_done(&in6_ifaddr_rwlock);
+ return ours;
}
+static void
+bridge_interface_input(ifnet_t bridge_ifp, mbuf_t m,
+ bpf_packet_func bpf_input_func)
+{
+ size_t byte_count;
+ struct ether_header *eh;
+ uint16_t ether_type;
+ errno_t error;
+ boolean_t is_ipv4;
+ int len;
+ u_int mac_hlen;
+ int pkt_count;
+
+ /* segment large packets before sending them up */
+ if (if_bridge_segmentation == 0) {
+ goto done;
+ }
+ len = m->m_pkthdr.len;
+ if (len <= (bridge_ifp->if_mtu + ETHER_HDR_LEN)) {
+ goto done;
+ }
+ eh = mtod(m, struct ether_header *);
+ ether_type = ntohs(eh->ether_type);
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ is_ipv4 = TRUE;
+ break;
+ case ETHERTYPE_IPV6:
+ is_ipv4 = FALSE;
+ break;
+ default:
+ printf("%s: large non IPv4/IPv6 packet\n", __func__);
+ m_freem(m);
+ return;
+ }
+
+ /*
+ * We have a large IPv4/IPv6 TCP packet. Segment it if required.
+ *
+ * If gso_ipv[46]_tcp() returns success (0), the packet(s) are
+ * ready to be passed up. If the destination is a local IP address,
+ * the packet will be passed up as a large, single packet.
+ *
+ * If gso_ipv[46]_tcp() returns an error, the packet has already
+ * been freed.
+ */
+ mac_hlen = sizeof(*eh);
+ if (is_ipv4) {
+ error = gso_ipv4_tcp(bridge_ifp, &m, mac_hlen, FALSE);
+ } else {
+ error = gso_ipv6_tcp(bridge_ifp, &m, mac_hlen, FALSE);
+ }
+ if (error != 0) {
+ return;
+ }
+
+done:
+ pkt_count = 0;
+ byte_count = 0;
+ for (mbuf_t scan = m; scan != NULL; scan = scan->m_nextpkt) {
+ /* Mark the packet as arriving on the bridge interface */
+ mbuf_pkthdr_setrcvif(scan, bridge_ifp);
+ mbuf_pkthdr_setheader(scan, mbuf_data(scan));
+ if (bpf_input_func != NULL) {
+ (*bpf_input_func)(bridge_ifp, scan);
+ }
+ mbuf_setdata(scan, (char *)mbuf_data(scan) + ETHER_HDR_LEN,
+ mbuf_len(scan) - ETHER_HDR_LEN);
+ mbuf_pkthdr_adjustlen(scan, -ETHER_HDR_LEN);
+ byte_count += mbuf_pkthdr_len(scan);
+ pkt_count++;
+ }
+ (void)ifnet_stat_increment_in(bridge_ifp, pkt_count, byte_count, 0);
+#if BRIDGE_DEBUG
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
+ printf("%s: %s %d packet(s) %ld bytes\n", __func__,
+ bridge_ifp->if_xname, pkt_count, byte_count);
+ }
#endif /* BRIDGE_DEBUG */
+ dlil_input_packet_list(bridge_ifp, m);
+ return;
+}
+
/*
* bridge_input:
*
* Filter input from a member interface. Queue the packet for
* bridging if it is not for us.
*/
-__private_extern__ errno_t
-bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header)
+errno_t
+bridge_input(struct ifnet *ifp, mbuf_t *data)
{
struct bridge_softc *sc = ifp->if_bridge;
struct bridge_iflist *bif, *bif2;
- struct ifnet *bifp;
+ ifnet_t bridge_ifp;
struct ether_header *eh;
struct mbuf *mc, *mc2;
uint16_t vlan;
- int error;
+ errno_t error;
+ boolean_t is_broadcast;
+ boolean_t is_ip_broadcast = FALSE;
+ boolean_t is_ifp_mac = FALSE;
+ mbuf_t m = *data;
+ uint32_t sc_filter_flags = 0;
- ASSERT(bridge_in_bsd_mode(sc));
+ bridge_ifp = sc->sc_ifp;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_INPUT)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
- sc->sc_ifp->if_xname, ifp->if_xname,
+ bridge_ifp->if_xname, ifp->if_xname,
(uint64_t)VM_KERNEL_ADDRPERM(m),
(uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
+ }
#endif /* BRIDGE_DEBUG */
if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_INPUT)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
printf("%s: %s not running passing along\n",
- __func__, sc->sc_ifp->if_xname);
+ __func__, bridge_ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
- return (0);
+ return 0;
}
- bifp = sc->sc_ifp;
vlan = VLANTAGOF(m);
#ifdef IFF_MONITOR
* the bpf(4) machinery, but before we do, increment the byte and
* packet counters associated with this interface.
*/
- if ((bifp->if_flags & IFF_MONITOR) != 0) {
- m->m_pkthdr.rcvif = bifp;
+ if ((bridge_ifp->if_flags & IFF_MONITOR) != 0) {
+ m->m_pkthdr.rcvif = bridge_ifp;
BRIDGE_BPF_MTAP_INPUT(sc, m);
- (void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0);
+ (void) ifnet_stat_increment_in(bridge_ifp, 1, m->m_pkthdr.len, 0);
m_freem(m);
- return (EJUSTRETURN);
+ return EJUSTRETURN;
}
#endif /* IFF_MONITOR */
* Need to clear the promiscous flags otherwise it will be
* dropped by DLIL after processing filters
*/
- if ((mbuf_flags(m) & MBUF_PROMISC))
+ if ((mbuf_flags(m) & MBUF_PROMISC)) {
mbuf_setflags_mask(m, 0, MBUF_PROMISC);
+ }
+
+ sc_filter_flags = sc->sc_filter_flags;
+ if (PF_IS_ENABLED && (sc_filter_flags & IFBF_FILT_MEMBER)) {
+ error = bridge_pf(&m, ifp, sc_filter_flags, TRUE);
+ if (error != 0) {
+ return EJUSTRETURN;
+ }
+ if (m == NULL) {
+ return EJUSTRETURN;
+ }
+ /*
+ * bridge_pf could have modified the pointer on success in order
+ * to do its processing. Updated data such that we don't use a
+ * stale pointer.
+ */
+ *data = m;
+ }
BRIDGE_LOCK(sc);
bif = bridge_lookup_member_if(sc, ifp);
if (bif == NULL) {
BRIDGE_UNLOCK(sc);
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_INPUT)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
printf("%s: %s bridge_lookup_member_if failed\n",
- __func__, sc->sc_ifp->if_xname);
+ __func__, bridge_ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
- return (0);
+ return 0;
}
if (bif->bif_flags & BIFF_HOST_FILTER) {
- error = bridge_host_filter(bif, m);
+ error = bridge_host_filter(bif, data);
if (error != 0) {
- if (if_bridge_debug & BR_DBGF_INPUT)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) {
printf("%s: %s bridge_host_filter failed\n",
__func__, bif->bif_ifp->if_xname);
+ }
BRIDGE_UNLOCK(sc);
- return (EJUSTRETURN);
+ return EJUSTRETURN;
}
+ m = *data;
}
+ is_broadcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0;
eh = mtod(m, struct ether_header *);
+ if (!is_broadcast &&
+ memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) {
+ if (sc->sc_mac_nat_bif == bif) {
+ /* doing MAC-NAT, check if destination is broadcast */
+ is_ip_broadcast = is_broadcast_ip_packet(data);
+ if (*data == NULL) {
+ BRIDGE_UNLOCK(sc);
+ return EJUSTRETURN;
+ }
+ m = *data;
+ }
+ if (!is_ip_broadcast) {
+ is_ifp_mac = TRUE;
+ }
+ }
bridge_span(sc, m);
- if (m->m_flags & (M_BCAST|M_MCAST)) {
-
+ if (is_broadcast || is_ip_broadcast) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_MCAST)
- if ((m->m_flags & M_MCAST))
+ if (is_broadcast && IF_BRIDGE_DEBUG(BR_DBGF_MCAST)) {
+ if ((m->m_flags & M_MCAST)) {
printf("%s: multicast: "
"%02x:%02x:%02x:%02x:%02x:%02x\n",
__func__,
eh->ether_dhost[0], eh->ether_dhost[1],
eh->ether_dhost[2], eh->ether_dhost[3],
eh->ether_dhost[4], eh->ether_dhost[5]);
+ }
+ }
#endif /* BRIDGE_DEBUG */
/* Tap off 802.1D packets; they do not get forwarded. */
- if (memcmp(eh->ether_dhost, bstp_etheraddr,
+ if (is_broadcast && memcmp(eh->ether_dhost, bstp_etheraddr,
ETHER_ADDR_LEN) == 0) {
#if BRIDGESTP
m = bstp_input(&bif->bif_stp, ifp, m);
#endif /* !BRIDGESTP */
if (m == NULL) {
BRIDGE_UNLOCK(sc);
- return (EJUSTRETURN);
+ return EJUSTRETURN;
}
}
if ((bif->bif_ifflags & IFBIF_STP) &&
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
BRIDGE_UNLOCK(sc);
- return (0);
+ return 0;
}
/*
mc = m_dup(m, M_DONTWAIT);
if (mc == NULL) {
BRIDGE_UNLOCK(sc);
- return (0);
+ return 0;
}
/*
*
* Note that bridge_forward calls BRIDGE_UNLOCK
*/
+ if (is_ip_broadcast) {
+ /* make the copy look like it is actually broadcast */
+ mc->m_flags |= M_BCAST;
+ eh = mtod(mc, struct ether_header *);
+ bcopy(etherbroadcastaddr, eh->ether_dhost,
+ ETHER_ADDR_LEN);
+ }
bridge_forward(sc, bif, mc);
/*
* here from ether_input as a bridge is never a member of a
* bridge.
*/
- VERIFY(bifp->if_bridge == NULL);
+ VERIFY(bridge_ifp->if_bridge == NULL);
mc2 = m_dup(m, M_DONTWAIT);
if (mc2 != NULL) {
/* Keep the layer3 header aligned */
}
if (mc2 != NULL) {
/* mark packet as arriving on the bridge */
- mc2->m_pkthdr.rcvif = bifp;
+ mc2->m_pkthdr.rcvif = bridge_ifp;
mc2->m_pkthdr.pkt_hdr = mbuf_data(mc2);
-#if NBPFILTER > 0
- if (sc->sc_bpf_input)
- bridge_bpf_input(bifp, mc2);
-#endif /* NBPFILTER */
+ BRIDGE_BPF_MTAP_INPUT(sc, m);
+
(void) mbuf_setdata(mc2,
(char *)mbuf_data(mc2) + ETHER_HDR_LEN,
mbuf_len(mc2) - ETHER_HDR_LEN);
- (void) mbuf_pkthdr_adjustlen(mc2, - ETHER_HDR_LEN);
+ (void) mbuf_pkthdr_adjustlen(mc2, -ETHER_HDR_LEN);
- (void) ifnet_stat_increment_in(bifp, 1,
+ (void) ifnet_stat_increment_in(bridge_ifp, 1,
mbuf_pkthdr_len(mc2), 0);
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_MCAST)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MCAST)) {
printf("%s: %s mcast for us\n", __func__,
- sc->sc_ifp->if_xname);
+ bridge_ifp->if_xname);
+ }
#endif /* BRIDGE_DEBUG */
- dlil_input_packet_list(bifp, mc2);
+ dlil_input_packet_list(bridge_ifp, mc2);
}
/* Return the original packet for local processing. */
- return (0);
+ return 0;
}
if ((bif->bif_ifflags & IFBIF_STP) &&
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
BRIDGE_UNLOCK(sc);
- return (0);
+ return 0;
}
#ifdef DEV_CARP
-#define CARP_CHECK_WE_ARE_DST(iface) \
+#define CARP_CHECK_WE_ARE_DST(iface) \
((iface)->if_carp &&\
- carp_forus((iface)->if_carp, eh->ether_dhost))
-#define CARP_CHECK_WE_ARE_SRC(iface) \
+ carp_forus((iface)->if_carp, eh->ether_dhost))
+#define CARP_CHECK_WE_ARE_SRC(iface) \
((iface)->if_carp &&\
- carp_forus((iface)->if_carp, eh->ether_shost))
-#else
-#define CARP_CHECK_WE_ARE_DST(iface) 0
-#define CARP_CHECK_WE_ARE_SRC(iface) 0
-#endif
-
-#ifdef INET6
-#define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
+ carp_forus((iface)->if_carp, eh->ether_shost))
#else
-#define PFIL_HOOKED_INET6 0
+#define CARP_CHECK_WE_ARE_DST(iface) 0
+#define CARP_CHECK_WE_ARE_SRC(iface) 0
#endif
-#if defined(PFIL_HOOKS)
-#define PFIL_PHYS(sc, ifp, m) do { \
- if (pfil_local_phys && \
- (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \
- if (bridge_pfil(&m, NULL, ifp, \
- PFIL_IN) != 0 || m == NULL) { \
- BRIDGE_UNLOCK(sc); \
- return (NULL); \
- } \
- } \
-} while (0)
-#else /* PFIL_HOOKS */
-#define PFIL_PHYS(sc, ifp, m)
-#endif /* PFIL_HOOKS */
-
-#define GRAB_OUR_PACKETS(iface) \
- if ((iface)->if_type == IFT_GIF) \
- continue; \
- /* It is destined for us. */ \
- if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
- ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
- if ((iface)->if_type == IFT_BRIDGE) { \
- BRIDGE_BPF_MTAP_INPUT(sc, m); \
- /* Filter on the physical interface. */ \
- PFIL_PHYS(sc, iface, m); \
- } \
- if (bif->bif_ifflags & IFBIF_LEARNING) { \
- error = bridge_rtupdate(sc, eh->ether_shost, \
- vlan, bif, 0, IFBAF_DYNAMIC); \
- if (error && bif->bif_addrmax) { \
- BRIDGE_UNLOCK(sc); \
- return (EJUSTRETURN); \
- } \
- } \
- m->m_pkthdr.rcvif = iface; \
- BRIDGE_UNLOCK(sc); \
- return (0); \
- } \
- \
- /* We just received a packet that we sent out. */ \
- if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
- ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
- BRIDGE_UNLOCK(sc); \
- return (EJUSTRETURN); \
+#define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
+
+#define PFIL_PHYS(sc, ifp, m)
+
+#define GRAB_OUR_PACKETS(iface) \
+ if ((iface)->if_type == IFT_GIF) \
+ continue; \
+ /* It is destined for us. */ \
+ if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \
+ ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \
+ if ((iface)->if_type == IFT_BRIDGE) { \
+ BRIDGE_BPF_MTAP_INPUT(sc, m); \
+ /* Filter on the physical interface. */ \
+ PFIL_PHYS(sc, iface, m); \
+ } else { \
+ bpf_tap_in(iface, DLT_EN10MB, m, NULL, 0); \
+ } \
+ if (bif->bif_ifflags & IFBIF_LEARNING) { \
+ error = bridge_rtupdate(sc, eh->ether_shost, \
+ vlan, bif, 0, IFBAF_DYNAMIC); \
+ if (error && bif->bif_addrmax) { \
+ BRIDGE_UNLOCK(sc); \
+ m_freem(m); \
+ return (EJUSTRETURN); \
+ } \
+ } \
+ BRIDGE_UNLOCK(sc); \
+ inject_input_packet(iface, m); \
+ return (EJUSTRETURN); \
+ } \
+ \
+ /* We just received a packet that we sent out. */ \
+ if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \
+ ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \
+ BRIDGE_UNLOCK(sc); \
+ m_freem(m); \
+ return (EJUSTRETURN); \
}
/*
* Unicast.
*/
+
+ /* handle MAC-NAT if enabled */
+ if (is_ifp_mac && sc->sc_mac_nat_bif == bif) {
+ ifnet_t dst_if;
+ boolean_t is_input = FALSE;
+
+ dst_if = bridge_mac_nat_input(sc, data, &is_input);
+ m = *data;
+ if (dst_if == ifp) {
+ /* our input packet */
+ } else if (dst_if != NULL || m == NULL) {
+ BRIDGE_UNLOCK(sc);
+ if (dst_if != NULL) {
+ ASSERT(m != NULL);
+ if (is_input) {
+ inject_input_packet(dst_if, m);
+ } else {
+ (void)bridge_enqueue(bridge_ifp, NULL,
+ dst_if, m,
+ kChecksumOperationClear);
+ }
+ }
+ return EJUSTRETURN;
+ }
+ }
+
/*
- * If the packet is for us, set the packets source as the
- * bridge, and return the packet back to ether_input for
- * local processing.
+ * If the packet is for the bridge, pass it up for local processing.
*/
- if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
- ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bifp)) {
-
- /* Mark the packet as arriving on the bridge interface */
- (void) mbuf_pkthdr_setrcvif(m, bifp);
- mbuf_pkthdr_setheader(m, frame_header);
+ if (memcmp(eh->ether_dhost, IF_LLADDR(bridge_ifp),
+ ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bridge_ifp)) {
+ bpf_packet_func bpf_input_func = sc->sc_bpf_input;
/*
* If the interface is learning, and the source
* address is valid and not multicast, record
* the address.
*/
- if (bif->bif_ifflags & IFBIF_LEARNING)
+ if (bif->bif_ifflags & IFBIF_LEARNING) {
(void) bridge_rtupdate(sc, eh->ether_shost,
vlan, bif, 0, IFBAF_DYNAMIC);
-
- BRIDGE_BPF_MTAP_INPUT(sc, m);
-
- (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
- mbuf_len(m) - ETHER_HDR_LEN);
- (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
-
- (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
-
+ }
BRIDGE_UNLOCK(sc);
-#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_INPUT)
- printf("%s: %s packet for bridge\n", __func__,
- sc->sc_ifp->if_xname);
-#endif /* BRIDGE_DEBUG */
-
- dlil_input_packet_list(bifp, m);
-
- return (EJUSTRETURN);
+ bridge_interface_input(bridge_ifp, m, bpf_input_func);
+ return EJUSTRETURN;
}
/*
* bridge's own MAC address, because the bridge may be
* using the SAME MAC address as one of its interfaces
*/
- if (memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) {
+ if (is_ifp_mac) {
#ifdef VERY_VERY_VERY_DIAGNOSTIC
- printf("%s: not forwarding packet bound for member "
- "interface\n", __func__);
+ printf("%s: not forwarding packet bound for member "
+ "interface\n", __func__);
#endif
- BRIDGE_UNLOCK(sc);
- return (0);
+
+ BRIDGE_UNLOCK(sc);
+ return 0;
}
- /* Now check the all bridge members. */
+ /* Now check the remaining bridge members. */
TAILQ_FOREACH(bif2, &sc->sc_iflist, bif_next) {
- GRAB_OUR_PACKETS(bif2->bif_ifp)
+ if (bif2->bif_ifp != ifp) {
+ GRAB_OUR_PACKETS(bif2->bif_ifp);
+ }
}
#undef CARP_CHECK_WE_ARE_DST
*/
bridge_forward(sc, bif, m);
- return (EJUSTRETURN);
+ return EJUSTRETURN;
}
/*
*/
static void
bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
- struct mbuf *m, int runfilt)
+ struct mbuf *m, int runfilt)
{
-#ifndef PFIL_HOOKS
-#pragma unused(runfilt)
-#endif
+ ifnet_t bridge_ifp;
struct bridge_iflist *dbif, *sbif;
struct mbuf *mc;
+ struct mbuf *mc_in;
struct ifnet *dst_if;
int error = 0, used = 0;
-
- sbif = bridge_lookup_member_if(sc, src_if);
+ boolean_t bridge_if_out;
+ ChecksumOperation cksum_op;
+ struct mac_nat_record mnr;
+ struct bridge_iflist *mac_nat_bif = sc->sc_mac_nat_bif;
+ boolean_t translate_mac = FALSE;
+ uint32_t sc_filter_flags = 0;
+
+ bridge_ifp = sc->sc_ifp;
+ if (src_if != NULL) {
+ bridge_if_out = FALSE;
+ cksum_op = kChecksumOperationClear;
+ sbif = bridge_lookup_member_if(sc, src_if);
+ if (sbif != NULL && mac_nat_bif != NULL && sbif != mac_nat_bif) {
+ /* get the translation record while holding the lock */
+ translate_mac
+ = bridge_mac_nat_output(sc, sbif, &m, &mnr);
+ if (m == NULL) {
+ /* packet was deallocated */
+ BRIDGE_UNLOCK(sc);
+ return;
+ }
+ }
+ } else {
+ /*
+ * src_if is NULL when the bridge interface calls
+ * bridge_broadcast().
+ */
+ bridge_if_out = TRUE;
+ cksum_op = kChecksumOperationFinalize;
+ sbif = NULL;
+ }
BRIDGE_LOCK2REF(sc, error);
if (error) {
return;
}
-#ifdef PFIL_HOOKS
- /* Filter on the bridge interface before broadcasting */
- if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
- if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
- goto out;
- if (m == NULL)
- goto out;
- }
-#endif /* PFIL_HOOKS */
-
TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) {
dst_if = dbif->bif_ifp;
- if (dst_if == src_if)
+ if (dst_if == src_if) {
+ /* skip the interface that the packet came in on */
continue;
+ }
/* Private segments can not talk to each other */
- if (sbif &&
- (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE))
+ if (sbif != NULL &&
+ (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)) {
continue;
+ }
if ((dbif->bif_ifflags & IFBIF_STP) &&
- dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
+ dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
continue;
+ }
if ((dbif->bif_ifflags & IFBIF_DISCOVER) == 0 &&
- (m->m_flags & (M_BCAST|M_MCAST)) == 0)
+ (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
continue;
+ }
- if ((dst_if->if_flags & IFF_RUNNING) == 0)
+ if ((dst_if->if_flags & IFF_RUNNING) == 0) {
continue;
+ }
if (!(dbif->bif_flags & BIFF_MEDIA_ACTIVE)) {
continue;
} else {
mc = m_dup(m, M_DONTWAIT);
if (mc == NULL) {
- (void) ifnet_stat_increment_out(sc->sc_ifp,
+ (void) ifnet_stat_increment_out(bridge_ifp,
0, 0, 1);
continue;
}
}
-#ifdef PFIL_HOOKS
/*
- * Filter on the output interface. Pass a NULL bridge interface
- * pointer so we do not redundantly filter on the bridge for
- * each interface we broadcast on.
+ * If broadcast input is enabled, do so only if this
+ * is an input packet.
*/
+ if (!bridge_if_out &&
+ (dbif->bif_flags & BIFF_INPUT_BROADCAST) != 0) {
+ mc_in = m_dup(mc, M_DONTWAIT);
+ /* this could fail, but we continue anyways */
+ } else {
+ mc_in = NULL;
+ }
+
+ /* out */
+ if (translate_mac && mac_nat_bif == dbif) {
+ /* translate the packet without holding the lock */
+ bridge_mac_nat_translate(&mc, &mnr, IF_LLADDR(dst_if));
+ }
+
+ sc_filter_flags = sc->sc_filter_flags;
if (runfilt &&
- (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
+ PF_IS_ENABLED && (sc_filter_flags & IFBF_FILT_MEMBER)) {
if (used == 0) {
/* Keep the layer3 header aligned */
int i = min(mc->m_pkthdr.len, max_protohdr);
mc = m_copyup(mc, i, ETHER_ALIGN);
if (mc == NULL) {
(void) ifnet_stat_increment_out(
- sc->sc_ifp, 0, 0, 1);
+ sc->sc_ifp, 0, 0, 1);
+ if (mc_in != NULL) {
+ m_freem(mc_in);
+ mc_in = NULL;
+ }
continue;
}
}
- if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
+ if (bridge_pf(&mc, dst_if, sc_filter_flags, FALSE) != 0) {
+ if (mc_in != NULL) {
+ m_freem(mc_in);
+ mc_in = NULL;
+ }
continue;
- if (mc == NULL)
+ }
+ if (mc == NULL) {
+ if (mc_in != NULL) {
+ m_freem(mc_in);
+ mc_in = NULL;
+ }
continue;
+ }
}
-#endif /* PFIL_HOOKS */
- (void) bridge_enqueue(sc, dst_if, mc);
- }
- if (used == 0)
+ if (mc != NULL) {
+ (void) bridge_enqueue(bridge_ifp,
+ NULL, dst_if, mc, cksum_op);
+ }
+
+ /* in */
+ if (mc_in == NULL) {
+ continue;
+ }
+ bpf_tap_in(dst_if, DLT_EN10MB, mc_in, NULL, 0);
+ mbuf_pkthdr_setrcvif(mc_in, dst_if);
+ mbuf_pkthdr_setheader(mc_in, mbuf_data(mc_in));
+ mbuf_setdata(mc_in, (char *)mbuf_data(mc_in) + ETHER_HDR_LEN,
+ mbuf_len(mc_in) - ETHER_HDR_LEN);
+ mbuf_pkthdr_adjustlen(mc_in, -ETHER_HDR_LEN);
+ mc_in->m_flags |= M_PROTO1; /* set to avoid loops */
+ dlil_input_packet_list(dst_if, mc_in);
+ }
+ if (used == 0) {
m_freem(m);
+ }
-#ifdef PFIL_HOOKS
-out:
-#endif /* PFIL_HOOKS */
BRIDGE_UNREF(sc);
}
struct ifnet *dst_if;
struct mbuf *mc;
- if (TAILQ_EMPTY(&sc->sc_spanlist))
+ if (TAILQ_EMPTY(&sc->sc_spanlist)) {
return;
+ }
TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
dst_if = bif->bif_ifp;
- if ((dst_if->if_flags & IFF_RUNNING) == 0)
+ if ((dst_if->if_flags & IFF_RUNNING) == 0) {
continue;
+ }
mc = m_copypacket(m, M_DONTWAIT);
if (mc == NULL) {
continue;
}
- (void) bridge_enqueue(sc, dst_if, mc);
+ (void) bridge_enqueue(sc->sc_ifp, NULL, dst_if, mc,
+ kChecksumOperationNone);
}
}
*/
static int
bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
- struct bridge_iflist *bif, int setflags, uint8_t flags)
+ struct bridge_iflist *bif, int setflags, uint8_t flags)
{
struct bridge_rtnode *brt;
int error;
BRIDGE_LOCK_ASSERT_HELD(sc);
- ASSERT(bridge_in_bsd_mode(sc));
/* Check the source address is valid and not multicast. */
if (ETHER_IS_MULTICAST(dst) ||
(dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
- dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
- return (EINVAL);
+ dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) {
+ return EINVAL;
+ }
/* 802.1p frames map to vlan 1 */
- if (vlan == 0)
+ if (vlan == 0) {
vlan = 1;
+ }
/*
* A route for this destination might already exist. If so,
if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
if (sc->sc_brtcnt >= sc->sc_brtmax) {
sc->sc_brtexceeded++;
- return (ENOSPC);
+ return ENOSPC;
}
/* Check per interface address limits (if enabled) */
if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
bif->bif_addrexceeded++;
- return (ENOSPC);
+ return ENOSPC;
}
/*
* address.
*/
brt = zalloc_noblock(bridge_rtnode_pool);
- if (brt == NULL)
- return (ENOMEM);
+ if (brt == NULL) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_RT_TABLE)) {
+ printf("%s: zalloc_nolock failed", __func__);
+ }
+ return ENOMEM;
+ }
bzero(brt, sizeof(struct bridge_rtnode));
- if (bif->bif_ifflags & IFBIF_STICKY)
+ if (bif->bif_ifflags & IFBIF_STICKY) {
brt->brt_flags = IFBAF_STICKY;
- else
+ } else {
brt->brt_flags = IFBAF_DYNAMIC;
+ }
memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
brt->brt_vlan = vlan;
if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
zfree(bridge_rtnode_pool, brt);
- return (error);
+ return error;
}
brt->brt_dst = bif;
bif->bif_addrcnt++;
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_RT_TABLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_RT_TABLE)) {
printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
"on %s count %u hashsize %u\n", __func__,
dst[0], dst[1], dst[2], dst[3], dst[4], dst[5],
sc->sc_ifp->if_xname, sc->sc_brtcnt,
sc->sc_rthash_size);
+ }
#endif
}
now = (unsigned long) net_uptime();
brt->brt_expire = now + sc->sc_brttimeout;
}
- if (setflags)
+ if (setflags) {
brt->brt_flags = flags;
+ }
- return (0);
+ return 0;
}
/*
BRIDGE_LOCK_ASSERT_HELD(sc);
- if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
- return (NULL);
+ if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) {
+ return NULL;
+ }
- return (brt->brt_ifp);
+ return brt->brt_ifp;
}
/*
BRIDGE_LOCK_ASSERT_HELD(sc);
/* Make sure we actually need to do this. */
- if (sc->sc_brtcnt <= sc->sc_brtmax)
+ if (sc->sc_brtcnt <= sc->sc_brtmax) {
return;
+ }
/* Force an aging cycle; this might trim enough addresses. */
bridge_rtage(sc);
- if (sc->sc_brtcnt <= sc->sc_brtmax)
+ if (sc->sc_brtcnt <= sc->sc_brtmax) {
return;
+ }
LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
bridge_rtnode_destroy(sc, brt);
- if (sc->sc_brtcnt <= sc->sc_brtmax)
+ if (sc->sc_brtcnt <= sc->sc_brtmax) {
return;
+ }
}
}
}
BRIDGE_LOCK_ASSERT_HELD(sc);
bridge_rtage(sc);
-
if ((sc->sc_ifp->if_flags & IFF_RUNNING) &&
(sc->sc_flags & SCF_DETACHING) == 0) {
sc->sc_aging_timer.bdc_sc = sc;
LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
- if (now >= brt->brt_expire)
+ if (now >= brt->brt_expire) {
bridge_rtnode_destroy(sc, brt);
+ }
}
}
+ if (sc->sc_mac_nat_bif != NULL) {
+ bridge_mac_nat_age_entries(sc, now);
+ }
}
/*
BRIDGE_LOCK_ASSERT_HELD(sc);
LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
- if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
+ if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
bridge_rtnode_destroy(sc, brt);
+ }
}
}
found = 1;
}
- return (found ? 0 : ENOENT);
+ return found ? 0 : ENOENT;
}
/*
* bridge_rtdelete:
*
- * Delete routes to a speicifc member interface.
+ * Delete routes to a specific member interface.
*/
static void
bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
if (brt->brt_ifp == ifp && (full ||
- (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
+ (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
bridge_rtnode_destroy(sc, brt);
+ }
}
}
{
u_int32_t i;
- ASSERT(bridge_in_bsd_mode(sc));
-
- sc->sc_rthash = _MALLOC(sizeof (*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
+ sc->sc_rthash = _MALLOC(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
M_DEVBUF, M_WAITOK | M_ZERO);
if (sc->sc_rthash == NULL) {
printf("%s: no memory\n", __func__);
- return (ENOMEM);
+ return ENOMEM;
}
sc->sc_rthash_size = BRIDGE_RTHASH_SIZE;
- for (i = 0; i < sc->sc_rthash_size; i++)
+ for (i = 0; i < sc->sc_rthash_size; i++) {
LIST_INIT(&sc->sc_rthash[i]);
+ }
sc->sc_rthash_key = RandomULong();
LIST_INIT(&sc->sc_rtlist);
- return (0);
+ return 0;
}
/*
/*
* Four entries per hash bucket is our ideal load factor
*/
- if (sc->sc_brtcnt < sc->sc_rthash_size * 4)
+ if (sc->sc_brtcnt < sc->sc_rthash_size * 4) {
goto out;
+ }
/*
* Doubling the number of hash buckets may be too simplistic
sc->sc_flags |= SCF_RESIZING;
BRIDGE_UNLOCK(sc);
- new_rthash = _MALLOC(sizeof (*sc->sc_rthash) * new_rthash_size,
+ new_rthash = _MALLOC(sizeof(*sc->sc_rthash) * new_rthash_size,
M_DEVBUF, M_WAITOK | M_ZERO);
BRIDGE_LOCK(sc);
*/
sc->sc_rthash_key = RandomULong();
- for (i = 0; i < sc->sc_rthash_size; i++)
+ for (i = 0; i < sc->sc_rthash_size; i++) {
LIST_INIT(&sc->sc_rthash[i]);
+ }
- LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
+ LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
LIST_REMOVE(brt, brt_hash);
(void) bridge_rtnode_hash(sc, brt);
}
out:
if (error == 0) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_RT_TABLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_RT_TABLE)) {
printf("%s: %s new size %u\n", __func__,
sc->sc_ifp->if_xname, sc->sc_rthash_size);
+ }
#endif /* BRIDGE_DEBUG */
- if (old_rthash)
+ if (old_rthash) {
_FREE(old_rthash, M_DEVBUF);
+ }
} else {
#if BRIDGE_DEBUG
printf("%s: %s failed %d\n", __func__,
sc->sc_ifp->if_xname, error);
#endif /* BRIDGE_DEBUG */
- if (new_rthash != NULL)
+ if (new_rthash != NULL) {
_FREE(new_rthash, M_DEVBUF);
+ }
}
}
{
BRIDGE_LOCK_ASSERT_HELD(sc);
- if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING))
+ if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING)) {
return;
+ }
/*
* Four entries per hash bucket is our ideal load factor
*/
- if (sc->sc_brtcnt < sc->sc_rthash_size * 4)
+ if (sc->sc_brtcnt < sc->sc_rthash_size * 4) {
return;
+ }
/*
* Hard limit on the size of the routing hash table
*/
- if (sc->sc_rthash_size >= bridge_rtable_hash_size_max)
+ if (sc->sc_rthash_size >= bridge_rtable_hash_size_max) {
return;
+ }
sc->sc_resize_call.bdc_sc = sc;
sc->sc_resize_call.bdc_func = bridge_rthash_delayed_resize;
* The following hash function is adapted from "Hash Functions" by Bob Jenkins
* ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
*/
-#define mix(a, b, c) \
-do { \
- a -= b; a -= c; a ^= (c >> 13); \
- b -= c; b -= a; b ^= (a << 8); \
- c -= a; c -= b; c ^= (b >> 13); \
- a -= b; a -= c; a ^= (c >> 12); \
- b -= c; b -= a; b ^= (a << 16); \
- c -= a; c -= b; c ^= (b >> 5); \
- a -= b; a -= c; a ^= (c >> 3); \
- b -= c; b -= a; b ^= (a << 10); \
- c -= a; c -= b; c ^= (b >> 15); \
-} while (/*CONSTCOND*/0)
+#define mix(a, b, c) \
+do { \
+ a -= b; a -= c; a ^= (c >> 13); \
+ b -= c; b -= a; b ^= (a << 8); \
+ c -= a; c -= b; c ^= (b >> 13); \
+ a -= b; a -= c; a ^= (c >> 12); \
+ b -= c; b -= a; b ^= (a << 16); \
+ c -= a; c -= b; c ^= (b >> 5); \
+ a -= b; a -= c; a ^= (c >> 3); \
+ b -= c; b -= a; b ^= (a << 10); \
+ c -= a; c -= b; c ^= (b >> 15); \
+} while ( /*CONSTCOND*/ 0)
static __inline uint32_t
bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
mix(a, b, c);
- return (c & BRIDGE_RTHASH_MASK(sc));
+ return c & BRIDGE_RTHASH_MASK(sc);
}
#undef mix
d = ((int)a[i]) - ((int)b[i]);
}
- return (d);
+ return d;
}
/*
*/
static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
- uint16_t vlan)
+ uint16_t vlan)
{
struct bridge_rtnode *brt;
uint32_t hash;
int dir;
BRIDGE_LOCK_ASSERT_HELD(sc);
- ASSERT(bridge_in_bsd_mode(sc));
hash = bridge_rthash(sc, addr);
LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
- if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
- return (brt);
- if (dir > 0)
- return (NULL);
+ if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) {
+ return brt;
+ }
+ if (dir > 0) {
+ return NULL;
+ }
}
- return (NULL);
+ return NULL;
}
/*
dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) {
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_RT_TABLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_RT_TABLE)) {
printf("%s: %s EEXIST "
"%02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, sc->sc_ifp->if_xname,
brt->brt_addr[0], brt->brt_addr[1],
brt->brt_addr[2], brt->brt_addr[3],
brt->brt_addr[4], brt->brt_addr[5]);
+ }
#endif
- return (EEXIST);
+ return EEXIST;
}
if (dir > 0) {
LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
} while (lbrt != NULL);
#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_RT_TABLE)
+ if (IF_BRIDGE_DEBUG(BR_DBGF_RT_TABLE)) {
printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, sc->sc_ifp->if_xname,
brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5]);
+ }
#endif
out:
- return (0);
+ return 0;
}
/*
int error;
error = bridge_rtnode_hash(sc, brt);
- if (error != 0)
- return (error);
+ if (error != 0) {
+ return error;
+ }
LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
sc->sc_brtcnt++;
bridge_rthash_resize(sc);
- return (0);
+ return 0;
}
/*
/* Cap the expiry time to 'age' */
if (brt->brt_ifp == ifp &&
brt->brt_expire > now + age &&
- (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
+ (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
brt->brt_expire = now + age;
+ }
}
}
BRIDGE_UNLOCK(sc);
"discarding"
};
- if (log_stp)
+ if (log_stp) {
log(LOG_NOTICE, "%s: state changed to %s on %s\n",
sc->sc_ifp->if_xname,
stpstates[state], ifp->if_xname);
+ }
}
#endif /* BRIDGESTP */
-#ifdef PFIL_HOOKS
/*
- * Send bridge packets through pfil if they are one of the types pfil can deal
- * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
- * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
- * that interface.
+ * bridge_set_bpf_tap:
+ *
+ * Sets ups the BPF callbacks.
*/
-static int
-bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
+static errno_t
+bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
{
- int snap, error, i, hlen;
- struct ether_header *eh1, eh2;
- struct ip_fw_args args;
- struct ip *ip;
- struct llc llc1;
- u_int16_t ether_type;
-
- snap = 0;
- error = -1; /* Default error if not error == 0 */
-
-#if 0
- /* we may return with the IP fields swapped, ensure its not shared */
- KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
-#endif
-
- if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
- return (0); /* filtering is disabled */
+ struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
- i = min((*mp)->m_pkthdr.len, max_protohdr);
- if ((*mp)->m_len < i) {
- *mp = m_pullup(*mp, i);
- if (*mp == NULL) {
- printf("%s: m_pullup failed\n", __func__);
- return (-1);
- }
+ /* TBD locking */
+ if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
+ return ENODEV;
}
+ switch (mode) {
+ case BPF_TAP_DISABLE:
+ sc->sc_bpf_input = sc->sc_bpf_output = NULL;
+ break;
- eh1 = mtod(*mp, struct ether_header *);
- ether_type = ntohs(eh1->ether_type);
-
- /*
- * Check for SNAP/LLC.
- */
- if (ether_type < ETHERMTU) {
- struct llc *llc2 = (struct llc *)(eh1 + 1);
+ case BPF_TAP_INPUT:
+ sc->sc_bpf_input = bpf_callback;
+ break;
- if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
- llc2->llc_dsap == LLC_SNAP_LSAP &&
- llc2->llc_ssap == LLC_SNAP_LSAP &&
- llc2->llc_control == LLC_UI) {
- ether_type = htons(llc2->llc_un.type_snap.ether_type);
- snap = 1;
- }
- }
+ case BPF_TAP_OUTPUT:
+ sc->sc_bpf_output = bpf_callback;
+ break;
- /*
- * If we're trying to filter bridge traffic, don't look at anything
- * other than IP and ARP traffic. If the filter doesn't understand
- * IPv6, don't allow IPv6 through the bridge either. This is lame
- * since if we really wanted, say, an AppleTalk filter, we are hosed,
- * but of course we don't have an AppleTalk filter to begin with.
- * (Note that since pfil doesn't understand ARP it will pass *ALL*
- * ARP traffic.)
- */
- switch (ether_type) {
- case ETHERTYPE_ARP:
- case ETHERTYPE_REVARP:
- if (pfil_ipfw_arp == 0)
- return (0); /* Automatically pass */
- break;
+ case BPF_TAP_INPUT_OUTPUT:
+ sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
+ break;
- case ETHERTYPE_IP:
-#if INET6
- case ETHERTYPE_IPV6:
-#endif /* INET6 */
- break;
- default:
- /*
- * Check to see if the user wants to pass non-ip
- * packets, these will not be checked by pfil(9) and
- * passed unconditionally so the default is to drop.
- */
- if (pfil_onlyip)
- goto bad;
+ default:
+ break;
}
- /* Strip off the Ethernet header and keep a copy. */
- m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2);
- m_adj(*mp, ETHER_HDR_LEN);
+ return 0;
+}
- /* Strip off snap header, if present */
- if (snap) {
- m_copydata(*mp, 0, sizeof (struct llc), (caddr_t)&llc1);
- m_adj(*mp, sizeof (struct llc));
- }
+/*
+ * bridge_detach:
+ *
+ * Callback when interface has been detached.
+ */
+static void
+bridge_detach(ifnet_t ifp)
+{
+ struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
- /*
- * Check the IP header for alignment and errors
- */
- if (dir == PFIL_IN) {
- switch (ether_type) {
- case ETHERTYPE_IP:
- error = bridge_ip_checkbasic(mp);
- break;
-#if INET6
- case ETHERTYPE_IPV6:
- error = bridge_ip6_checkbasic(mp);
- break;
-#endif /* INET6 */
- default:
- error = 0;
- }
- if (error)
- goto bad;
- }
+#if BRIDGESTP
+ bstp_detach(&sc->sc_stp);
+#endif /* BRIDGESTP */
- if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
- error = -1;
- args.rule = ip_dn_claim_rule(*mp);
- if (args.rule != NULL && fw_one_pass)
- goto ipfwpass; /* packet already partially processed */
+ /* Tear down the routing table. */
+ bridge_rtable_fini(sc);
- args.m = *mp;
- args.oif = ifp;
- args.next_hop = NULL;
- args.eh = &eh2;
- args.inp = NULL; /* used by ipfw uid/gid/jail rules */
- i = ip_fw_chk_ptr(&args);
- *mp = args.m;
+ lck_mtx_lock(&bridge_list_mtx);
+ LIST_REMOVE(sc, sc_list);
+ lck_mtx_unlock(&bridge_list_mtx);
- if (*mp == NULL)
- return (error);
+ ifnet_release(ifp);
- if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
+ lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp);
+ if_clone_softc_deallocate(&bridge_cloner, sc);
+}
- /* put the Ethernet header back on */
- M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
- if (*mp == NULL)
- return (error);
- bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
+/*
+ * bridge_bpf_input:
+ *
+ * Invoke the input BPF callback if enabled
+ */
+static errno_t
+bridge_bpf_input(ifnet_t ifp, struct mbuf *m, const char * func, int line)
+{
+ struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
+ bpf_packet_func input_func = sc->sc_bpf_input;
- /*
- * Pass the pkt to dummynet, which consumes it. The
- * packet will return to us via bridge_dummynet().
- */
- args.oif = ifp;
- ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW);
- return (error);
+ if (input_func != NULL) {
+ if (mbuf_pkthdr_rcvif(m) != ifp) {
+ printf("%s.%d: rcvif: 0x%llx != ifp 0x%llx\n", func, line,
+ (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp));
}
-
- if (i != IP_FW_PASS) /* drop */
- goto bad;
+ (*input_func)(ifp, m);
}
+ return 0;
+}
-ipfwpass:
- error = 0;
-
- /*
- * Run the packet through pfil
- */
- switch (ether_type) {
- case ETHERTYPE_IP:
- /*
- * before calling the firewall, swap fields the same as
- * IP does. here we assume the header is contiguous
- */
- ip = mtod(*mp, struct ip *);
-
- ip->ip_len = ntohs(ip->ip_len);
- ip->ip_off = ntohs(ip->ip_off);
-
- /*
- * Run pfil on the member interface and the bridge, both can
- * be skipped by clearing pfil_member or pfil_bridge.
- *
- * Keep the order:
- * in_if -> bridge_if -> out_if
- */
- if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
- error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
- dir, NULL);
+/*
+ * bridge_bpf_output:
+ *
+ * Invoke the output BPF callback if enabled
+ */
+static errno_t
+bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
+{
+ struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
+ bpf_packet_func output_func = sc->sc_bpf_output;
- if (*mp == NULL || error != 0) /* filter may consume */
- break;
+ if (output_func != NULL) {
+ (*output_func)(ifp, m);
+ }
+ return 0;
+}
- if (pfil_member && ifp != NULL)
- error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
- dir, NULL);
+/*
+ * bridge_link_event:
+ *
+ * Report a data link event on an interface
+ */
+static void
+bridge_link_event(struct ifnet *ifp, u_int32_t event_code)
+{
+ struct event {
+ u_int32_t ifnet_family;
+ u_int32_t unit;
+ char if_name[IFNAMSIZ];
+ };
+ _Alignas(struct kern_event_msg) char message[sizeof(struct kern_event_msg) + sizeof(struct event)] = { 0 };
+ struct kern_event_msg *header = (struct kern_event_msg*)message;
+ struct event *data = (struct event *)(header + 1);
- if (*mp == NULL || error != 0) /* filter may consume */
- break;
+#if BRIDGE_DEBUG
+ if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) {
+ printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname,
+ event_code, dlil_kev_dl_code_str(event_code));
+ }
+#endif /* BRIDGE_DEBUG */
- if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
- error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
- dir, NULL);
+ header->total_size = sizeof(message);
+ header->vendor_code = KEV_VENDOR_APPLE;
+ header->kev_class = KEV_NETWORK_CLASS;
+ header->kev_subclass = KEV_DL_SUBCLASS;
+ header->event_code = event_code;
+ data->ifnet_family = ifnet_family(ifp);
+ data->unit = (u_int32_t)ifnet_unit(ifp);
+ strlcpy(data->if_name, ifnet_name(ifp), IFNAMSIZ);
+ ifnet_event(ifp, header);
+}
- if (*mp == NULL || error != 0) /* filter may consume */
- break;
+#define BRIDGE_HF_DROP(reason, func, line) { \
+ bridge_hostfilter_stats.reason++; \
+ if (IF_BRIDGE_DEBUG(BR_DBGF_HOSTFILTER)) { \
+ printf("%s.%d" #reason, func, line); \
+ error = EINVAL; \
+ } \
+ }
- /* check if we need to fragment the packet */
- if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
- i = (*mp)->m_pkthdr.len;
- if (i > ifp->if_mtu) {
- error = bridge_fragment(ifp, *mp, &eh2, snap,
- &llc1);
- return (error);
- }
- }
+/*
+ * Make sure this is a DHCP or Bootp request that match the host filter
+ */
+static int
+bridge_dhcp_filter(struct bridge_iflist *bif, struct mbuf *m, size_t offset)
+{
+ int error = EINVAL;
+ struct dhcp dhcp;
- /* Recalculate the ip checksum and restore byte ordering */
- ip = mtod(*mp, struct ip *);
- hlen = ip->ip_hl << 2;
- if (hlen < sizeof (struct ip))
- goto bad;
- if (hlen > (*mp)->m_len) {
- if ((*mp = m_pullup(*mp, hlen)) == 0)
- goto bad;
- ip = mtod(*mp, struct ip *);
- if (ip == NULL)
- goto bad;
+ /*
+ * Note: We use the dhcp structure because bootp structure definition
+ * is larger and some vendors do not pad the request
+ */
+ error = mbuf_copydata(m, offset, sizeof(struct dhcp), &dhcp);
+ if (error != 0) {
+ BRIDGE_HF_DROP(brhf_dhcp_too_small, __func__, __LINE__);
+ goto done;
+ }
+ if (dhcp.dp_op != BOOTREQUEST) {
+ BRIDGE_HF_DROP(brhf_dhcp_bad_op, __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * The hardware address must be an exact match
+ */
+ if (dhcp.dp_htype != ARPHRD_ETHER) {
+ BRIDGE_HF_DROP(brhf_dhcp_bad_htype, __func__, __LINE__);
+ goto done;
+ }
+ if (dhcp.dp_hlen != ETHER_ADDR_LEN) {
+ BRIDGE_HF_DROP(brhf_dhcp_bad_hlen, __func__, __LINE__);
+ goto done;
+ }
+ if (bcmp(dhcp.dp_chaddr, bif->bif_hf_hwsrc,
+ ETHER_ADDR_LEN) != 0) {
+ BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr, __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Client address must match the host address or be not specified
+ */
+ if (dhcp.dp_ciaddr.s_addr != bif->bif_hf_ipsrc.s_addr &&
+ dhcp.dp_ciaddr.s_addr != INADDR_ANY) {
+ BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr, __func__, __LINE__);
+ goto done;
+ }
+ error = 0;
+done:
+ return error;
+}
+
+static int
+bridge_host_filter(struct bridge_iflist *bif, mbuf_t *data)
+{
+ int error = EINVAL;
+ struct ether_header *eh;
+ static struct in_addr inaddr_any = { .s_addr = INADDR_ANY };
+ mbuf_t m = *data;
+
+ eh = mtod(m, struct ether_header *);
+
+ /*
+ * Restrict the source hardware address
+ */
+ if ((bif->bif_flags & BIFF_HF_HWSRC) == 0 ||
+ bcmp(eh->ether_shost, bif->bif_hf_hwsrc,
+ ETHER_ADDR_LEN) != 0) {
+ BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr, __func__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * Restrict Ethernet protocols to ARP and IP
+ */
+ if (eh->ether_type == htons(ETHERTYPE_ARP)) {
+ struct ether_arp *ea;
+ size_t minlen = sizeof(struct ether_header) +
+ sizeof(struct ether_arp);
+
+ /*
+ * Make the Ethernet and ARP headers contiguous
+ */
+ if (mbuf_pkthdr_len(m) < minlen) {
+ BRIDGE_HF_DROP(brhf_arp_too_small, __func__, __LINE__);
+ goto done;
+ }
+ if (mbuf_len(m) < minlen && mbuf_pullup(data, minlen) != 0) {
+ BRIDGE_HF_DROP(brhf_arp_pullup_failed,
+ __func__, __LINE__);
+ goto done;
+ }
+ m = *data;
+
+ /*
+ * Verify this is an ethernet/ip arp
+ */
+ eh = mtod(m, struct ether_header *);
+ ea = (struct ether_arp *)(eh + 1);
+ if (ea->arp_hrd != htons(ARPHRD_ETHER)) {
+ BRIDGE_HF_DROP(brhf_arp_bad_hw_type,
+ __func__, __LINE__);
+ goto done;
+ }
+ if (ea->arp_pro != htons(ETHERTYPE_IP)) {
+ BRIDGE_HF_DROP(brhf_arp_bad_pro_type,
+ __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Verify the address lengths are correct
+ */
+ if (ea->arp_hln != ETHER_ADDR_LEN) {
+ BRIDGE_HF_DROP(brhf_arp_bad_hw_len, __func__, __LINE__);
+ goto done;
+ }
+ if (ea->arp_pln != sizeof(struct in_addr)) {
+ BRIDGE_HF_DROP(brhf_arp_bad_pro_len,
+ __func__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * Allow only ARP request or ARP reply
+ */
+ if (ea->arp_op != htons(ARPOP_REQUEST) &&
+ ea->arp_op != htons(ARPOP_REPLY)) {
+ BRIDGE_HF_DROP(brhf_arp_bad_op, __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Verify source hardware address matches
+ */
+ if (bcmp(ea->arp_sha, bif->bif_hf_hwsrc,
+ ETHER_ADDR_LEN) != 0) {
+ BRIDGE_HF_DROP(brhf_arp_bad_sha, __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Verify source protocol address:
+ * May be null for an ARP probe
+ */
+ if (bcmp(ea->arp_spa, &bif->bif_hf_ipsrc.s_addr,
+ sizeof(struct in_addr)) != 0 &&
+ bcmp(ea->arp_spa, &inaddr_any,
+ sizeof(struct in_addr)) != 0) {
+ BRIDGE_HF_DROP(brhf_arp_bad_spa, __func__, __LINE__);
+ goto done;
+ }
+ bridge_hostfilter_stats.brhf_arp_ok += 1;
+ error = 0;
+ } else if (eh->ether_type == htons(ETHERTYPE_IP)) {
+ size_t minlen = sizeof(struct ether_header) + sizeof(struct ip);
+ struct ip iphdr;
+ size_t offset;
+
+ /*
+ * Make the Ethernet and IP headers contiguous
+ */
+ if (mbuf_pkthdr_len(m) < minlen) {
+ BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
+ goto done;
+ }
+ offset = sizeof(struct ether_header);
+ error = mbuf_copydata(m, offset, sizeof(struct ip), &iphdr);
+ if (error != 0) {
+ BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Verify the source IP address
+ */
+ if (iphdr.ip_p == IPPROTO_UDP) {
+ struct udphdr udp;
+
+ minlen += sizeof(struct udphdr);
+ if (mbuf_pkthdr_len(m) < minlen) {
+ BRIDGE_HF_DROP(brhf_ip_too_small,
+ __func__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * Allow all zero addresses for DHCP requests
+ */
+ if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr &&
+ iphdr.ip_src.s_addr != INADDR_ANY) {
+ BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
+ __func__, __LINE__);
+ goto done;
+ }
+ offset = sizeof(struct ether_header) +
+ (IP_VHL_HL(iphdr.ip_vhl) << 2);
+ error = mbuf_copydata(m, offset,
+ sizeof(struct udphdr), &udp);
+ if (error != 0) {
+ BRIDGE_HF_DROP(brhf_ip_too_small,
+ __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Either it's a Bootp/DHCP packet that we like or
+ * it's a UDP packet from the host IP as source address
+ */
+ if (udp.uh_sport == htons(IPPORT_BOOTPC) &&
+ udp.uh_dport == htons(IPPORT_BOOTPS)) {
+ minlen += sizeof(struct dhcp);
+ if (mbuf_pkthdr_len(m) < minlen) {
+ BRIDGE_HF_DROP(brhf_ip_too_small,
+ __func__, __LINE__);
+ goto done;
+ }
+ offset += sizeof(struct udphdr);
+ error = bridge_dhcp_filter(bif, m, offset);
+ if (error != 0) {
+ goto done;
+ }
+ } else if (iphdr.ip_src.s_addr == INADDR_ANY) {
+ BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
+ __func__, __LINE__);
+ goto done;
+ }
+ } else if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr ||
+ bif->bif_hf_ipsrc.s_addr == INADDR_ANY) {
+ BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, __func__, __LINE__);
+ goto done;
+ }
+ /*
+ * Allow only boring IP protocols
+ */
+ if (iphdr.ip_p != IPPROTO_TCP &&
+ iphdr.ip_p != IPPROTO_UDP &&
+ iphdr.ip_p != IPPROTO_ICMP &&
+ iphdr.ip_p != IPPROTO_ESP &&
+ iphdr.ip_p != IPPROTO_AH &&
+ iphdr.ip_p != IPPROTO_GRE) {
+ BRIDGE_HF_DROP(brhf_ip_bad_proto, __func__, __LINE__);
+ goto done;
+ }
+ bridge_hostfilter_stats.brhf_ip_ok += 1;
+ error = 0;
+ } else {
+ BRIDGE_HF_DROP(brhf_bad_ether_type, __func__, __LINE__);
+ goto done;
+ }
+done:
+ if (error != 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_HOSTFILTER)) {
+ if (m) {
+ printf_mbuf_data(m, 0,
+ sizeof(struct ether_header) +
+ sizeof(struct ip));
+ }
+ printf("\n");
+ }
+
+ if (m != NULL) {
+ m_freem(m);
+ }
+ }
+ return error;
+}
+
+/*
+ * MAC NAT
+ */
+
+static errno_t
+bridge_mac_nat_enable(struct bridge_softc *sc, struct bridge_iflist *bif)
+{
+ errno_t error = 0;
+
+ BRIDGE_LOCK_ASSERT_HELD(sc);
+
+ if (sc->sc_mac_nat_bif != NULL) {
+ if (sc->sc_mac_nat_bif != bif) {
+ error = EBUSY;
+ }
+ goto done;
+ }
+ sc->sc_mac_nat_bif = bif;
+ bif->bif_ifflags |= IFBIF_MAC_NAT;
+ bridge_mac_nat_populate_entries(sc);
+
+done:
+ return error;
+}
+
+static void
+bridge_mac_nat_disable(struct bridge_softc *sc)
+{
+ struct bridge_iflist *mac_nat_bif = sc->sc_mac_nat_bif;
+
+ assert(mac_nat_bif != NULL);
+ bridge_mac_nat_flush_entries(sc, mac_nat_bif);
+ mac_nat_bif->bif_ifflags &= ~IFBIF_MAC_NAT;
+ sc->sc_mac_nat_bif = NULL;
+ return;
+}
+
+static void
+mac_nat_entry_print2(struct mac_nat_entry *mne,
+ char *ifname, const char *msg1, const char *msg2)
+{
+ int af;
+ char etopbuf[24];
+ char ntopbuf[MAX_IPv6_STR_LEN];
+ const char *space;
+
+ af = ((mne->mne_flags & MNE_FLAGS_IPV6) != 0) ? AF_INET6 : AF_INET;
+ ether_ntop(etopbuf, sizeof(etopbuf), mne->mne_mac);
+ (void)inet_ntop(af, &mne->mne_u, ntopbuf, sizeof(ntopbuf));
+ if (msg2 == NULL) {
+ msg2 = "";
+ space = "";
+ } else {
+ space = " ";
+ }
+ printf("%s %s%s%s %p (%s, %s, %s)\n",
+ ifname, msg1, space, msg2, mne, mne->mne_bif->bif_ifp->if_xname,
+ ntopbuf, etopbuf);
+}
+
+static void
+mac_nat_entry_print(struct mac_nat_entry *mne,
+ char *ifname, const char *msg)
+{
+ mac_nat_entry_print2(mne, ifname, msg, NULL);
+}
+
+static struct mac_nat_entry *
+bridge_lookup_mac_nat_entry(struct bridge_softc *sc, int af, void * ip)
+{
+ struct mac_nat_entry *mne;
+ struct mac_nat_entry *ret_mne = NULL;
+
+ if (af == AF_INET) {
+ in_addr_t s_addr = ((struct in_addr *)ip)->s_addr;
+
+ LIST_FOREACH(mne, &sc->sc_mne_list, mne_list) {
+ if (mne->mne_ip.s_addr == s_addr) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ mac_nat_entry_print(mne, sc->sc_if_xname,
+ "found");
+ }
+ ret_mne = mne;
+ break;
+ }
+ }
+ } else {
+ const struct in6_addr *ip6 = (const struct in6_addr *)ip;
+
+ LIST_FOREACH(mne, &sc->sc_mne_list_v6, mne_list) {
+ if (IN6_ARE_ADDR_EQUAL(&mne->mne_ip6, ip6)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ mac_nat_entry_print(mne, sc->sc_if_xname,
+ "found");
+ }
+ ret_mne = mne;
+ break;
+ }
+ }
+ }
+ return ret_mne;
+}
+
+static void
+bridge_destroy_mac_nat_entry(struct bridge_softc *sc,
+ struct mac_nat_entry *mne, const char *reason)
+{
+ LIST_REMOVE(mne, mne_list);
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ mac_nat_entry_print(mne, sc->sc_if_xname, reason);
+ }
+ zfree(bridge_mne_pool, mne);
+ sc->sc_mne_count--;
+}
+
+static struct mac_nat_entry *
+bridge_create_mac_nat_entry(struct bridge_softc *sc,
+ struct bridge_iflist *bif, int af, const void *ip, uint8_t *eaddr)
+{
+ struct mac_nat_entry_list *list;
+ struct mac_nat_entry *mne;
+
+ if (sc->sc_mne_count >= sc->sc_mne_max) {
+ sc->sc_mne_allocation_failures++;
+ return NULL;
+ }
+ mne = zalloc_noblock(bridge_mne_pool);
+ if (mne == NULL) {
+ sc->sc_mne_allocation_failures++;
+ return NULL;
+ }
+ sc->sc_mne_count++;
+ bzero(mne, sizeof(*mne));
+ bcopy(eaddr, mne->mne_mac, sizeof(mne->mne_mac));
+ mne->mne_bif = bif;
+ if (af == AF_INET) {
+ bcopy(ip, &mne->mne_ip, sizeof(mne->mne_ip));
+ list = &sc->sc_mne_list;
+ } else {
+ bcopy(ip, &mne->mne_ip6, sizeof(mne->mne_ip6));
+ mne->mne_flags |= MNE_FLAGS_IPV6;
+ list = &sc->sc_mne_list_v6;
+ }
+ LIST_INSERT_HEAD(list, mne, mne_list);
+ mne->mne_expire = (unsigned long)net_uptime() + sc->sc_brttimeout;
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ mac_nat_entry_print(mne, sc->sc_if_xname, "created");
+ }
+ return mne;
+}
+
+static struct mac_nat_entry *
+bridge_update_mac_nat_entry(struct bridge_softc *sc,
+ struct bridge_iflist *bif, int af, void *ip, uint8_t *eaddr)
+{
+ struct mac_nat_entry *mne;
+
+ mne = bridge_lookup_mac_nat_entry(sc, af, ip);
+ if (mne != NULL) {
+ struct bridge_iflist *mac_nat_bif = sc->sc_mac_nat_bif;
+
+ if (mne->mne_bif == mac_nat_bif) {
+ /* the MAC NAT interface takes precedence */
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ if (mne->mne_bif != bif) {
+ mac_nat_entry_print2(mne,
+ sc->sc_if_xname, "reject",
+ bif->bif_ifp->if_xname);
+ }
+ }
+ } else if (mne->mne_bif != bif) {
+ const char *old_if = mne->mne_bif->bif_ifp->if_xname;
+
+ mne->mne_bif = bif;
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ mac_nat_entry_print2(mne,
+ sc->sc_if_xname, "replaced",
+ old_if);
+ }
+ bcopy(eaddr, mne->mne_mac, sizeof(mne->mne_mac));
+ }
+ mne->mne_expire = (unsigned long)net_uptime() +
+ sc->sc_brttimeout;
+ } else {
+ mne = bridge_create_mac_nat_entry(sc, bif, af, ip, eaddr);
+ }
+ return mne;
+}
+
+static void
+bridge_mac_nat_flush_entries_common(struct bridge_softc *sc,
+ struct mac_nat_entry_list *list, struct bridge_iflist *bif)
+{
+ struct mac_nat_entry *mne;
+ struct mac_nat_entry *tmne;
+
+ LIST_FOREACH_SAFE(mne, list, mne_list, tmne) {
+ if (bif != NULL && mne->mne_bif != bif) {
+ continue;
+ }
+ bridge_destroy_mac_nat_entry(sc, mne, "flushed");
+ }
+}
+
+/*
+ * bridge_mac_nat_flush_entries:
+ *
+ * Flush MAC NAT entries for the specified member. Flush all entries if
+ * the member is the one that requires MAC NAT, otherwise just flush the
+ * ones for the specified member.
+ */
+static void
+bridge_mac_nat_flush_entries(struct bridge_softc *sc, struct bridge_iflist * bif)
+{
+ struct bridge_iflist *flush_bif;
+
+ flush_bif = (bif == sc->sc_mac_nat_bif) ? NULL : bif;
+ bridge_mac_nat_flush_entries_common(sc, &sc->sc_mne_list, flush_bif);
+ bridge_mac_nat_flush_entries_common(sc, &sc->sc_mne_list_v6, flush_bif);
+}
+
+static void
+bridge_mac_nat_populate_entries(struct bridge_softc *sc)
+{
+ errno_t error;
+ ifnet_t ifp;
+ ifaddr_t *list;
+ struct bridge_iflist *mac_nat_bif = sc->sc_mac_nat_bif;
+
+ assert(mac_nat_bif != NULL);
+ ifp = mac_nat_bif->bif_ifp;
+ error = ifnet_get_address_list(ifp, &list);
+ if (error != 0) {
+ printf("%s: ifnet_get_address_list(%s) failed %d\n",
+ __func__, ifp->if_xname, error);
+ return;
+ }
+ for (ifaddr_t *scan = list; *scan != NULL; scan++) {
+ sa_family_t af;
+ void *ip;
+
+ union {
+ struct sockaddr sa;
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ } u;
+ af = ifaddr_address_family(*scan);
+ switch (af) {
+ case AF_INET:
+ case AF_INET6:
+ error = ifaddr_address(*scan, &u.sa, sizeof(u));
+ if (error != 0) {
+ printf("%s: ifaddr_address failed %d\n",
+ __func__, error);
+ break;
+ }
+ if (af == AF_INET) {
+ ip = (void *)&u.sin.sin_addr;
+ } else {
+ if (IN6_IS_ADDR_LINKLOCAL(&u.sin6.sin6_addr)) {
+ /* remove scope ID */
+ u.sin6.sin6_addr.s6_addr16[1] = 0;
+ }
+ ip = (void *)&u.sin6.sin6_addr;
+ }
+ bridge_create_mac_nat_entry(sc, mac_nat_bif, af, ip,
+ (uint8_t *)IF_LLADDR(ifp));
+ break;
+ default:
+ break;
+ }
+ }
+ ifnet_free_address_list(list);
+ return;
+}
+
+static void
+bridge_mac_nat_age_entries_common(struct bridge_softc *sc,
+ struct mac_nat_entry_list *list, unsigned long now)
+{
+ struct mac_nat_entry *mne;
+ struct mac_nat_entry *tmne;
+
+ LIST_FOREACH_SAFE(mne, list, mne_list, tmne) {
+ if (now >= mne->mne_expire) {
+ bridge_destroy_mac_nat_entry(sc, mne, "aged out");
+ }
+ }
+}
+
+static void
+bridge_mac_nat_age_entries(struct bridge_softc *sc, unsigned long now)
+{
+ if (sc->sc_mac_nat_bif == NULL) {
+ return;
+ }
+ bridge_mac_nat_age_entries_common(sc, &sc->sc_mne_list, now);
+ bridge_mac_nat_age_entries_common(sc, &sc->sc_mne_list_v6, now);
+}
+
+static const char *
+get_in_out_string(boolean_t is_output)
+{
+ return is_output ? "OUT" : "IN";
+}
+
+/*
+ * is_valid_arp_packet:
+ * Verify that this is a valid ARP packet.
+ *
+ * Returns TRUE if the packet is valid, FALSE otherwise.
+ */
+static boolean_t
+is_valid_arp_packet(mbuf_t *data, boolean_t is_output,
+ struct ether_header **eh_p, struct ether_arp **ea_p)
+{
+ struct ether_arp *ea;
+ struct ether_header *eh;
+ size_t minlen = sizeof(struct ether_header) + sizeof(struct ether_arp);
+ boolean_t is_valid = FALSE;
+ int flags = is_output ? BR_DBGF_OUTPUT : BR_DBGF_INPUT;
+
+ if (mbuf_pkthdr_len(*data) < minlen) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: ARP %s short frame %lu < %lu\n",
+ __func__,
+ get_in_out_string(is_output),
+ mbuf_pkthdr_len(*data), minlen);
+ }
+ goto done;
+ }
+ if (mbuf_len(*data) < minlen && mbuf_pullup(data, minlen) != 0) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: ARP %s size %lu mbuf_pullup fail\n",
+ __func__,
+ get_in_out_string(is_output),
+ minlen);
+ }
+ *data = NULL;
+ goto done;
+ }
+
+ /* validate ARP packet */
+ eh = mtod(*data, struct ether_header *);
+ ea = (struct ether_arp *)(eh + 1);
+ if (ntohs(ea->arp_hrd) != ARPHRD_ETHER) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: ARP %s htype not ethernet\n",
+ __func__,
+ get_in_out_string(is_output));
+ }
+ goto done;
+ }
+ if (ea->arp_hln != ETHER_ADDR_LEN) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: ARP %s hlen not ethernet\n",
+ __func__,
+ get_in_out_string(is_output));
+ }
+ goto done;
+ }
+ if (ntohs(ea->arp_pro) != ETHERTYPE_IP) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: ARP %s ptype not IP\n",
+ __func__,
+ get_in_out_string(is_output));
+ }
+ goto done;
+ }
+ if (ea->arp_pln != sizeof(struct in_addr)) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: ARP %s plen not IP\n",
+ __func__,
+ get_in_out_string(is_output));
+ }
+ goto done;
+ }
+ is_valid = TRUE;
+ *ea_p = ea;
+ *eh_p = eh;
+done:
+ return is_valid;
+}
+
+static struct mac_nat_entry *
+bridge_mac_nat_arp_input(struct bridge_softc *sc, mbuf_t *data)
+{
+ struct ether_arp *ea;
+ struct ether_header *eh;
+ struct mac_nat_entry *mne = NULL;
+ u_short op;
+ struct in_addr tpa;
+
+ if (!is_valid_arp_packet(data, FALSE, &eh, &ea)) {
+ goto done;
+ }
+ op = ntohs(ea->arp_op);
+ switch (op) {
+ case ARPOP_REQUEST:
+ case ARPOP_REPLY:
+ /* only care about REQUEST and REPLY */
+ break;
+ default:
+ goto done;
+ }
+
+ /* check the target IP address for a NAT entry */
+ bcopy(ea->arp_tpa, &tpa, sizeof(tpa));
+ if (tpa.s_addr != 0) {
+ mne = bridge_lookup_mac_nat_entry(sc, AF_INET, &tpa);
+ }
+ if (mne != NULL) {
+ if (op == ARPOP_REPLY) {
+ /* translate the MAC address */
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ char mac_src[24];
+ char mac_dst[24];
+
+ ether_ntop(mac_src, sizeof(mac_src),
+ ea->arp_tha);
+ ether_ntop(mac_dst, sizeof(mac_dst),
+ mne->mne_mac);
+ printf("%s %s ARP %s -> %s\n",
+ sc->sc_if_xname,
+ mne->mne_bif->bif_ifp->if_xname,
+ mac_src, mac_dst);
+ }
+ bcopy(mne->mne_mac, ea->arp_tha, sizeof(ea->arp_tha));
+ }
+ } else {
+ /* handle conflicting ARP (sender matches mne) */
+ struct in_addr spa;
+
+ bcopy(ea->arp_spa, &spa, sizeof(spa));
+ if (spa.s_addr != 0 && spa.s_addr != tpa.s_addr) {
+ /* check the source IP for a NAT entry */
+ mne = bridge_lookup_mac_nat_entry(sc, AF_INET, &spa);
+ }
+ }
+
+done:
+ return mne;
+}
+
+static boolean_t
+bridge_mac_nat_arp_output(struct bridge_softc *sc,
+ struct bridge_iflist *bif, mbuf_t *data, struct mac_nat_record *mnr)
+{
+ struct ether_arp *ea;
+ struct ether_header *eh;
+ struct in_addr ip;
+ struct mac_nat_entry *mne = NULL;
+ u_short op;
+ boolean_t translate = FALSE;
+
+ if (!is_valid_arp_packet(data, TRUE, &eh, &ea)) {
+ goto done;
+ }
+ op = ntohs(ea->arp_op);
+ switch (op) {
+ case ARPOP_REQUEST:
+ case ARPOP_REPLY:
+ /* only care about REQUEST and REPLY */
+ break;
+ default:
+ goto done;
+ }
+
+ bcopy(ea->arp_spa, &ip, sizeof(ip));
+ if (ip.s_addr == 0) {
+ goto done;
+ }
+ /* XXX validate IP address: no multicast/broadcast */
+ mne = bridge_update_mac_nat_entry(sc, bif, AF_INET, &ip, ea->arp_sha);
+ if (mnr != NULL && mne != NULL) {
+ /* record the offset to do the replacement */
+ translate = TRUE;
+ mnr->mnr_arp_offset = (char *)ea->arp_sha - (char *)eh;
+ }
+
+done:
+ return translate;
+}
+
+#define ETHER_IPV4_HEADER_LEN (sizeof(struct ether_header) + \
+ + sizeof(struct ip))
+static struct ether_header *
+get_ether_ip_header(mbuf_t *data, boolean_t is_output)
+{
+ struct ether_header *eh = NULL;
+ int flags = is_output ? BR_DBGF_OUTPUT : BR_DBGF_INPUT;
+ size_t minlen = ETHER_IPV4_HEADER_LEN;
+
+ if (mbuf_pkthdr_len(*data) < minlen) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: IP %s short frame %lu < %lu\n",
+ __func__,
+ get_in_out_string(is_output),
+ mbuf_pkthdr_len(*data), minlen);
}
- ip->ip_len = htons(ip->ip_len);
- ip->ip_off = htons(ip->ip_off);
- ip->ip_sum = 0;
- if (hlen == sizeof (struct ip))
- ip->ip_sum = in_cksum_hdr(ip);
- else
- ip->ip_sum = in_cksum(*mp, hlen);
+ goto done;
+ }
+ if (mbuf_len(*data) < minlen && mbuf_pullup(data, minlen) != 0) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: IP %s size %lu mbuf_pullup fail\n",
+ __func__,
+ get_in_out_string(is_output),
+ minlen);
+ }
+ *data = NULL;
+ goto done;
+ }
+ eh = mtod(*data, struct ether_header *);
+done:
+ return eh;
+}
+
+static boolean_t
+is_broadcast_ip_packet(mbuf_t *data)
+{
+ struct ether_header *eh;
+ uint16_t ether_type;
+ boolean_t is_broadcast = FALSE;
+ eh = mtod(*data, struct ether_header *);
+ ether_type = ntohs(eh->ether_type);
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ eh = get_ether_ip_header(data, FALSE);
+ if (eh != NULL) {
+ struct in_addr dst;
+ struct ip *iphdr;
+
+ iphdr = (struct ip *)(void *)(eh + 1);
+ bcopy(&iphdr->ip_dst, &dst, sizeof(dst));
+ is_broadcast = (dst.s_addr == INADDR_BROADCAST);
+ }
break;
-#if INET6
- case ETHERTYPE_IPV6:
- if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
- error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
- dir, NULL);
+ default:
+ break;
+ }
+ return is_broadcast;
+}
- if (*mp == NULL || error != 0) /* filter may consume */
- break;
+static struct mac_nat_entry *
+bridge_mac_nat_ip_input(struct bridge_softc *sc, mbuf_t *data)
+{
+ struct in_addr dst;
+ struct ether_header *eh;
+ struct ip *iphdr;
+ struct mac_nat_entry *mne = NULL;
+
+ eh = get_ether_ip_header(data, FALSE);
+ if (eh == NULL) {
+ goto done;
+ }
+ iphdr = (struct ip *)(void *)(eh + 1);
+ bcopy(&iphdr->ip_dst, &dst, sizeof(dst));
+ /* XXX validate IP address */
+ if (dst.s_addr == 0) {
+ goto done;
+ }
+ mne = bridge_lookup_mac_nat_entry(sc, AF_INET, &dst);
+done:
+ return mne;
+}
+
+static void
+bridge_mac_nat_udp_output(struct bridge_softc *sc,
+ struct bridge_iflist *bif, mbuf_t m,
+ uint8_t ip_header_len, struct mac_nat_record *mnr)
+{
+ uint16_t dp_flags;
+ errno_t error;
+ size_t offset;
+ struct udphdr udphdr;
+
+ /* copy the UDP header */
+ offset = sizeof(struct ether_header) + ip_header_len;
+ error = mbuf_copydata(m, offset, sizeof(struct udphdr), &udphdr);
+ if (error != 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: mbuf_copydata udphdr failed %d",
+ __func__, error);
+ }
+ return;
+ }
+ if (ntohs(udphdr.uh_sport) != IPPORT_BOOTPC ||
+ ntohs(udphdr.uh_dport) != IPPORT_BOOTPS) {
+ /* not a BOOTP/DHCP packet */
+ return;
+ }
+ /* check whether the broadcast bit is already set */
+ offset += sizeof(struct udphdr) + offsetof(struct dhcp, dp_flags);
+ error = mbuf_copydata(m, offset, sizeof(dp_flags), &dp_flags);
+ if (error != 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: mbuf_copydata dp_flags failed %d",
+ __func__, error);
+ }
+ return;
+ }
+ if ((ntohs(dp_flags) & DHCP_FLAGS_BROADCAST) != 0) {
+ /* it's already set, nothing to do */
+ return;
+ }
+ /* broadcast bit needs to be set */
+ mnr->mnr_ip_dhcp_flags = dp_flags | htons(DHCP_FLAGS_BROADCAST);
+ mnr->mnr_ip_header_len = ip_header_len;
+ if (udphdr.uh_sum != 0) {
+ uint16_t delta;
+
+ /* adjust checksum to take modified dp_flags into account */
+ delta = dp_flags - mnr->mnr_ip_dhcp_flags;
+ mnr->mnr_ip_udp_csum = udphdr.uh_sum + delta;
+ }
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s %s DHCP dp_flags 0x%x UDP cksum 0x%x\n",
+ sc->sc_if_xname,
+ bif->bif_ifp->if_xname,
+ ntohs(mnr->mnr_ip_dhcp_flags),
+ ntohs(mnr->mnr_ip_udp_csum));
+ }
+ return;
+}
+
+static boolean_t
+bridge_mac_nat_ip_output(struct bridge_softc *sc,
+ struct bridge_iflist *bif, mbuf_t *data, struct mac_nat_record *mnr)
+{
+#pragma unused(mnr)
+ struct ether_header *eh;
+ struct in_addr ip;
+ struct ip *iphdr;
+ uint8_t ip_header_len;
+ struct mac_nat_entry *mne = NULL;
+ boolean_t translate = FALSE;
+
+ eh = get_ether_ip_header(data, TRUE);
+ if (eh == NULL) {
+ goto done;
+ }
+ iphdr = (struct ip *)(void *)(eh + 1);
+ ip_header_len = IP_VHL_HL(iphdr->ip_vhl) << 2;
+ if (ip_header_len < sizeof(ip)) {
+ /* bogus IP header */
+ goto done;
+ }
+ bcopy(&iphdr->ip_src, &ip, sizeof(ip));
+ /* XXX validate the source address */
+ if (ip.s_addr != 0) {
+ mne = bridge_update_mac_nat_entry(sc, bif, AF_INET, &ip,
+ eh->ether_shost);
+ }
+ if (mnr != NULL) {
+ if (iphdr->ip_p == IPPROTO_UDP) {
+ /* handle DHCP must broadcast */
+ bridge_mac_nat_udp_output(sc, bif, *data,
+ ip_header_len, mnr);
+ }
+ translate = TRUE;
+ }
+done:
+ return translate;
+}
+
+#define ETHER_IPV6_HEADER_LEN (sizeof(struct ether_header) + \
+ + sizeof(struct ip6_hdr))
+static struct ether_header *
+get_ether_ipv6_header(mbuf_t *data, boolean_t is_output)
+{
+ struct ether_header *eh = NULL;
+ int flags = is_output ? BR_DBGF_OUTPUT : BR_DBGF_INPUT;
+ size_t minlen = ETHER_IPV6_HEADER_LEN;
+
+ if (mbuf_pkthdr_len(*data) < minlen) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: IP %s short frame %lu < %lu\n",
+ __func__,
+ get_in_out_string(is_output),
+ mbuf_pkthdr_len(*data), minlen);
+ }
+ goto done;
+ }
+ if (mbuf_len(*data) < minlen && mbuf_pullup(data, minlen) != 0) {
+ if (IF_BRIDGE_DEBUG(flags)) {
+ printf("%s: IP %s size %lu mbuf_pullup fail\n",
+ __func__,
+ get_in_out_string(is_output),
+ minlen);
+ }
+ *data = NULL;
+ goto done;
+ }
+ eh = mtod(*data, struct ether_header *);
+done:
+ return eh;
+}
+
+#include <netinet/icmp6.h>
+#include <netinet6/nd6.h>
+
+#define ETHER_ND_LLADDR_LEN (ETHER_ADDR_LEN + sizeof(struct nd_opt_hdr))
+
+static void
+bridge_mac_nat_icmpv6_output(struct bridge_softc *sc, struct bridge_iflist *bif,
+ mbuf_t *data, struct ether_header *eh,
+ struct ip6_hdr *ip6h, struct in6_addr *saddrp, struct mac_nat_record *mnr)
+{
+ struct icmp6_hdr *icmp6;
+ unsigned int icmp6len;
+ int lladdrlen = 0;
+ char *lladdr = NULL;
+ mbuf_t m = *data;
+ unsigned int off = sizeof(*ip6h);
+
+ icmp6len = m->m_pkthdr.len - sizeof(*eh) - off;
+ if (icmp6len < sizeof(*icmp6)) {
+ printf("%s: short packet %d < %lu\n", __func__,
+ icmp6len, sizeof(*icmp6));
+ return;
+ }
+ icmp6 = (struct icmp6_hdr *)((caddr_t)ip6h + off);
+ switch (icmp6->icmp6_type) {
+ case ND_NEIGHBOR_SOLICIT: {
+ struct nd_neighbor_solicit *nd_ns;
+ union nd_opts ndopts;
+ boolean_t is_dad_probe;
+ struct in6_addr taddr;
+
+ if (icmp6len < sizeof(*nd_ns)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: short nd_ns %d < %lu\n", __func__,
+ icmp6len, sizeof(*nd_ns));
+ }
+ return;
+ }
+
+ nd_ns = (struct nd_neighbor_solicit *)(void *)icmp6;
+ bcopy(&nd_ns->nd_ns_target, &taddr, sizeof(taddr));
+ if (IN6_IS_ADDR_MULTICAST(&taddr) ||
+ IN6_IS_ADDR_UNSPECIFIED(&taddr)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: invalid target ignored\n", __func__);
+ }
+ return;
+ }
+ /* parse options */
+ nd6_option_init(nd_ns + 1, icmp6len - sizeof(*nd_ns), &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: invalid ND6 NS option\n", __func__);
+ }
+ return;
+ }
+ if (ndopts.nd_opts_src_lladdr != NULL) {
+ lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3;
+ }
+ is_dad_probe = IN6_IS_ADDR_UNSPECIFIED(saddrp);
+ if (lladdr != NULL) {
+ if (is_dad_probe) {
+ printf("%s: bad ND6 DAD packet\n", __func__);
+ return;
+ }
+ if (lladdrlen != ETHER_ND_LLADDR_LEN) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: source lladdrlen %d != %lu\n",
+ __func__,
+ lladdrlen, ETHER_ND_LLADDR_LEN);
+ }
+ return;
+ }
+ mnr->mnr_ip6_lladdr_offset = (void *)lladdr -
+ (void *)eh;
+ mnr->mnr_ip6_icmp6_len = icmp6len;
+ mnr->mnr_ip6_icmp6_type = icmp6->icmp6_type;
+ mnr->mnr_ip6_header_len = off;
+ }
+ if (is_dad_probe) {
+ /* node is trying use taddr, create an mne using taddr */
+ *saddrp = taddr;
+ }
+ break;
+ }
+ case ND_NEIGHBOR_ADVERT: {
+ struct nd_neighbor_advert *nd_na;
+ union nd_opts ndopts;
+ struct in6_addr taddr;
+
+
+ nd_na = (struct nd_neighbor_advert *)(void *)icmp6;
+
+ if (icmp6len < sizeof(*nd_na)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: short nd_na %d < %lu\n", __func__,
+ icmp6len, sizeof(*nd_na));
+ }
+ return;
+ }
+
+ bcopy(&nd_na->nd_na_target, &taddr, sizeof(taddr));
+ if (IN6_IS_ADDR_MULTICAST(&taddr) ||
+ IN6_IS_ADDR_UNSPECIFIED(&taddr)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: invalid target ignored\n", __func__);
+ }
+ return;
+ }
+ /* parse options */
+ nd6_option_init(nd_na + 1, icmp6len - sizeof(*nd_na), &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: invalid ND6 NA option\n", __func__);
+ }
+ return;
+ }
+ if (ndopts.nd_opts_tgt_lladdr == NULL) {
+ /* target linklayer, nothing to do */
+ return;
+ }
+ lladdr = (char *)(ndopts.nd_opts_tgt_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_tgt_lladdr->nd_opt_len << 3;
+ if (lladdrlen != ETHER_ND_LLADDR_LEN) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: target lladdrlen %d != %lu\n",
+ __func__, lladdrlen, ETHER_ND_LLADDR_LEN);
+ }
+ return;
+ }
+ mnr->mnr_ip6_lladdr_offset = (void *)lladdr - (void *)eh;
+ mnr->mnr_ip6_icmp6_len = icmp6len;
+ mnr->mnr_ip6_header_len = off;
+ mnr->mnr_ip6_icmp6_type = icmp6->icmp6_type;
+ break;
+ }
+ case ND_ROUTER_SOLICIT: {
+ struct nd_router_solicit *nd_rs;
+ union nd_opts ndopts;
+
+ if (icmp6len < sizeof(*nd_rs)) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: short nd_rs %d < %lu\n", __func__,
+ icmp6len, sizeof(*nd_rs));
+ }
+ return;
+ }
+ nd_rs = (struct nd_router_solicit *)(void *)icmp6;
- if (pfil_member && ifp != NULL)
- error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
- dir, NULL);
+ /* parse options */
+ nd6_option_init(nd_rs + 1, icmp6len - sizeof(*nd_rs), &ndopts);
+ if (nd6_options(&ndopts) < 0) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: invalid ND6 RS option\n", __func__);
+ }
+ return;
+ }
+ if (ndopts.nd_opts_src_lladdr != NULL) {
+ lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1);
+ lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3;
+ }
+ if (lladdr != NULL) {
+ if (lladdrlen != ETHER_ND_LLADDR_LEN) {
+ if (IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ printf("%s: source lladdrlen %d != %lu\n",
+ __func__,
+ lladdrlen, ETHER_ND_LLADDR_LEN);
+ }
+ return;
+ }
+ mnr->mnr_ip6_lladdr_offset = (void *)lladdr -
+ (void *)eh;
+ mnr->mnr_ip6_icmp6_len = icmp6len;
+ mnr->mnr_ip6_icmp6_type = icmp6->icmp6_type;
+ mnr->mnr_ip6_header_len = off;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ if (mnr->mnr_ip6_lladdr_offset != 0 &&
+ IF_BRIDGE_DEBUG(BR_DBGF_MAC_NAT)) {
+ const char *str;
- if (*mp == NULL || error != 0) /* filter may consume */
+ switch (mnr->mnr_ip6_icmp6_type) {
+ case ND_ROUTER_SOLICIT:
+ str = "ROUTER SOLICIT";
+ break;
+ case ND_NEIGHBOR_ADVERT:
+ str = "NEIGHBOR ADVERT";
+ break;
+ case ND_NEIGHBOR_SOLICIT:
+ str = "NEIGHBOR SOLICIT";
break;
+ default:
+ str = "";
+ break;
+ }
+ printf("%s %s %s ip6len %d icmp6len %d lladdr offset %d\n",
+ sc->sc_if_xname, bif->bif_ifp->if_xname, str,
+ mnr->mnr_ip6_header_len,
+ mnr->mnr_ip6_icmp6_len, mnr->mnr_ip6_lladdr_offset);
+ }
+}
+
+static struct mac_nat_entry *
+bridge_mac_nat_ipv6_input(struct bridge_softc *sc, mbuf_t *data)
+{
+ struct in6_addr dst;
+ struct ether_header *eh;
+ struct ip6_hdr *ip6h;
+ struct mac_nat_entry *mne = NULL;
+
+ eh = get_ether_ipv6_header(data, FALSE);
+ if (eh == NULL) {
+ goto done;
+ }
+ ip6h = (struct ip6_hdr *)(void *)(eh + 1);
+ bcopy(&ip6h->ip6_dst, &dst, sizeof(dst));
+ /* XXX validate IPv6 address */
+ if (IN6_IS_ADDR_UNSPECIFIED(&dst)) {
+ goto done;
+ }
+ mne = bridge_lookup_mac_nat_entry(sc, AF_INET6, &dst);
- if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
- error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
- dir, NULL);
+done:
+ return mne;
+}
+
+static boolean_t
+bridge_mac_nat_ipv6_output(struct bridge_softc *sc,
+ struct bridge_iflist *bif, mbuf_t *data, struct mac_nat_record *mnr)
+{
+ struct ether_header *eh;
+ struct ip6_hdr *ip6h;
+ struct in6_addr saddr;
+ boolean_t translate;
+
+ translate = (bif == sc->sc_mac_nat_bif) ? FALSE : TRUE;
+ eh = get_ether_ipv6_header(data, TRUE);
+ if (eh == NULL) {
+ translate = FALSE;
+ goto done;
+ }
+ ip6h = (struct ip6_hdr *)(void *)(eh + 1);
+ bcopy(&ip6h->ip6_src, &saddr, sizeof(saddr));
+ if (mnr != NULL && ip6h->ip6_nxt == IPPROTO_ICMPV6) {
+ bridge_mac_nat_icmpv6_output(sc, bif, data,
+ eh, ip6h, &saddr, mnr);
+ }
+ if (IN6_IS_ADDR_UNSPECIFIED(&saddr)) {
+ goto done;
+ }
+ (void)bridge_update_mac_nat_entry(sc, bif, AF_INET6, &saddr,
+ eh->ether_shost);
+
+done:
+ return translate;
+}
+
+/*
+ * bridge_mac_nat_input:
+ * Process a packet arriving on the MAC NAT interface (sc_mac_nat_bif).
+ * This interface is the "external" interface with respect to NAT.
+ * The interface is only capable of receiving a single MAC address
+ * (e.g. a Wi-Fi STA interface).
+ *
+ * When a packet arrives on the external interface, look up the destination
+ * IP address in the mac_nat_entry table. If there is a match, *is_input
+ * is set to TRUE if it's for the MAC NAT interface, otherwise *is_input
+ * is set to FALSE and translate the MAC address if necessary.
+ *
+ * Returns:
+ * The internal interface to direct the packet to, or NULL if the packet
+ * should not be redirected.
+ *
+ * *data may be updated to point at a different mbuf chain, or set to NULL
+ * if the chain was deallocated during processing.
+ */
+static ifnet_t
+bridge_mac_nat_input(struct bridge_softc *sc, mbuf_t *data,
+ boolean_t *is_input)
+{
+ ifnet_t dst_if = NULL;
+ struct ether_header *eh;
+ uint16_t ether_type;
+ boolean_t is_unicast;
+ mbuf_t m = *data;
+ struct mac_nat_entry *mne = NULL;
+
+ BRIDGE_LOCK_ASSERT_HELD(sc);
+ *is_input = FALSE;
+ assert(sc->sc_mac_nat_bif != NULL);
+ is_unicast = ((m->m_flags & (M_BCAST | M_MCAST)) == 0);
+ eh = mtod(m, struct ether_header *);
+ ether_type = ntohs(eh->ether_type);
+ switch (ether_type) {
+ case ETHERTYPE_ARP:
+ mne = bridge_mac_nat_arp_input(sc, data);
+ break;
+ case ETHERTYPE_IP:
+ if (is_unicast) {
+ mne = bridge_mac_nat_ip_input(sc, data);
+ }
+ break;
+ case ETHERTYPE_IPV6:
+ if (is_unicast) {
+ mne = bridge_mac_nat_ipv6_input(sc, data);
+ }
+ break;
+ default:
+ break;
+ }
+ if (mne != NULL) {
+ if (is_unicast) {
+ if (m != *data) {
+ /* it may have changed */
+ eh = mtod(*data, struct ether_header *);
+ }
+ bcopy(mne->mne_mac, eh->ether_dhost,
+ sizeof(eh->ether_dhost));
+ }
+ dst_if = mne->mne_bif->bif_ifp;
+ *is_input = (mne->mne_bif == sc->sc_mac_nat_bif);
+ }
+ return dst_if;
+}
+
+/*
+ * bridge_mac_nat_output:
+ * Process a packet destined to the MAC NAT interface (sc_mac_nat_bif)
+ * from the interface 'bif'.
+ *
+ * Create a mac_nat_entry containing the source IP address and MAC address
+ * from the packet. Populate a mac_nat_record with information detailing
+ * how to translate the packet. Translation takes place later when
+ * the bridge lock is no longer held.
+ *
+ * If 'bif' == sc_mac_nat_bif, the stack over the MAC NAT
+ * interface is generating an output packet. No translation is required in this
+ * case, we just record the IP address used to prevent another bif from
+ * claiming our IP address.
+ *
+ * Returns:
+ * TRUE if the packet should be translated (*mnr updated as well),
+ * FALSE otherwise.
+ *
+ * *data may be updated to point at a different mbuf chain or NULL if
+ * the chain was deallocated during processing.
+ */
+
+static boolean_t
+bridge_mac_nat_output(struct bridge_softc *sc,
+ struct bridge_iflist *bif, mbuf_t *data, struct mac_nat_record *mnr)
+{
+ struct ether_header *eh;
+ uint16_t ether_type;
+ boolean_t translate = FALSE;
+
+ BRIDGE_LOCK_ASSERT_HELD(sc);
+ assert(sc->sc_mac_nat_bif != NULL);
+
+ eh = mtod(*data, struct ether_header *);
+ ether_type = ntohs(eh->ether_type);
+ if (mnr != NULL) {
+ bzero(mnr, sizeof(*mnr));
+ mnr->mnr_ether_type = ether_type;
+ }
+ switch (ether_type) {
+ case ETHERTYPE_ARP:
+ translate = bridge_mac_nat_arp_output(sc, bif, data, mnr);
+ break;
+ case ETHERTYPE_IP:
+ translate = bridge_mac_nat_ip_output(sc, bif, data, mnr);
+ break;
+ case ETHERTYPE_IPV6:
+ translate = bridge_mac_nat_ipv6_output(sc, bif, data, mnr);
break;
-#endif
default:
- error = 0;
break;
}
+ return translate;
+}
- if (*mp == NULL)
- return (error);
- if (error != 0)
- goto bad;
+static void
+bridge_mac_nat_arp_translate(mbuf_t *data, struct mac_nat_record *mnr,
+ const caddr_t eaddr)
+{
+ errno_t error;
- error = -1;
+ if (mnr->mnr_arp_offset == 0) {
+ return;
+ }
+ /* replace the source hardware address */
+ error = mbuf_copyback(*data, mnr->mnr_arp_offset,
+ ETHER_ADDR_LEN, eaddr,
+ MBUF_DONTWAIT);
+ if (error != 0) {
+ printf("%s: mbuf_copyback failed\n",
+ __func__);
+ m_freem(*data);
+ *data = NULL;
+ }
+ return;
+}
+
+static void
+bridge_mac_nat_ip_translate(mbuf_t *data, struct mac_nat_record *mnr)
+{
+ errno_t error;
+ size_t offset;
+
+ if (mnr->mnr_ip_header_len == 0) {
+ return;
+ }
+ /* update the UDP checksum */
+ offset = sizeof(struct ether_header) + mnr->mnr_ip_header_len;
+ error = mbuf_copyback(*data, offset + offsetof(struct udphdr, uh_sum),
+ sizeof(mnr->mnr_ip_udp_csum),
+ &mnr->mnr_ip_udp_csum,
+ MBUF_DONTWAIT);
+ if (error != 0) {
+ printf("%s: mbuf_copyback uh_sum failed\n",
+ __func__);
+ m_freem(*data);
+ *data = NULL;
+ }
+ /* update the DHCP must broadcast flag */
+ offset += sizeof(struct udphdr);
+ error = mbuf_copyback(*data, offset + offsetof(struct dhcp, dp_flags),
+ sizeof(mnr->mnr_ip_dhcp_flags),
+ &mnr->mnr_ip_dhcp_flags,
+ MBUF_DONTWAIT);
+ if (error != 0) {
+ printf("%s: mbuf_copyback dp_flags failed\n",
+ __func__);
+ m_freem(*data);
+ *data = NULL;
+ }
+}
+
+static void
+bridge_mac_nat_ipv6_translate(mbuf_t *data, struct mac_nat_record *mnr,
+ const caddr_t eaddr)
+{
+ uint16_t cksum;
+ errno_t error;
+ mbuf_t m = *data;
+
+ if (mnr->mnr_ip6_header_len == 0) {
+ return;
+ }
+ switch (mnr->mnr_ip6_icmp6_type) {
+ case ND_ROUTER_SOLICIT:
+ case ND_NEIGHBOR_SOLICIT:
+ case ND_NEIGHBOR_ADVERT:
+ if (mnr->mnr_ip6_lladdr_offset == 0) {
+ /* nothing to do */
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * replace the lladdr
+ */
+ error = mbuf_copyback(m, mnr->mnr_ip6_lladdr_offset,
+ ETHER_ADDR_LEN, eaddr,
+ MBUF_DONTWAIT);
+ if (error != 0) {
+ printf("%s: mbuf_copyback lladdr failed\n",
+ __func__);
+ m_freem(m);
+ *data = NULL;
+ return;
+ }
/*
- * Finally, put everything back the way it was and return
+ * recompute the icmp6 checksum
*/
- if (snap) {
- M_PREPEND(*mp, sizeof (struct llc), M_DONTWAIT, 0);
- if (*mp == NULL)
- return (error);
- bcopy(&llc1, mtod(*mp, caddr_t), sizeof (struct llc));
+
+ /* skip past the ethernet header */
+ mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
+ mbuf_len(m) - ETHER_HDR_LEN);
+ mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN);
+
+#define CKSUM_OFFSET_ICMP6 offsetof(struct icmp6_hdr, icmp6_cksum)
+ /* set the checksum to zero */
+ cksum = 0;
+ error = mbuf_copyback(m, mnr->mnr_ip6_header_len + CKSUM_OFFSET_ICMP6,
+ sizeof(cksum), &cksum, MBUF_DONTWAIT);
+ if (error != 0) {
+ printf("%s: mbuf_copyback cksum=0 failed\n",
+ __func__);
+ m_freem(m);
+ *data = NULL;
+ return;
}
+ /* compute and set the new checksum */
+ cksum = in6_cksum(m, IPPROTO_ICMPV6, mnr->mnr_ip6_header_len,
+ mnr->mnr_ip6_icmp6_len);
+ error = mbuf_copyback(m, mnr->mnr_ip6_header_len + CKSUM_OFFSET_ICMP6,
+ sizeof(cksum), &cksum, MBUF_DONTWAIT);
+ if (error != 0) {
+ printf("%s: mbuf_copyback cksum failed\n",
+ __func__);
+ m_freem(m);
+ *data = NULL;
+ return;
+ }
+ /* restore the ethernet header */
+ mbuf_setdata(m, (char *)mbuf_data(m) - ETHER_HDR_LEN,
+ mbuf_len(m) + ETHER_HDR_LEN);
+ mbuf_pkthdr_adjustlen(m, ETHER_HDR_LEN);
+ return;
+}
- M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
- if (*mp == NULL)
- return (error);
- bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
+static void
+bridge_mac_nat_translate(mbuf_t *data, struct mac_nat_record *mnr,
+ const caddr_t eaddr)
+{
+ struct ether_header *eh;
+
+ /* replace the source ethernet address with the single MAC */
+ eh = mtod(*data, struct ether_header *);
+ bcopy(eaddr, eh->ether_shost, sizeof(eh->ether_shost));
+ switch (mnr->mnr_ether_type) {
+ case ETHERTYPE_ARP:
+ bridge_mac_nat_arp_translate(data, mnr, eaddr);
+ break;
- return (0);
+ case ETHERTYPE_IP:
+ bridge_mac_nat_ip_translate(data, mnr);
+ break;
-bad:
- m_freem(*mp);
- *mp = NULL;
- return (error);
+ case ETHERTYPE_IPV6:
+ bridge_mac_nat_ipv6_translate(data, mnr, eaddr);
+ break;
+
+ default:
+ break;
+ }
+ return;
}
+/*
+ * bridge packet filtering
+ */
+
/*
* Perform basic checks on header size since
* pfil assumes ip_input has already processed
int len, hlen;
u_short sum;
- if (*mp == NULL)
- return (-1);
+ if (*mp == NULL) {
+ return -1;
+ }
if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
/* max_linkhdr is already rounded up to nearest 4-byte */
- if ((m = m_copyup(m, sizeof (struct ip),
+ if ((m = m_copyup(m, sizeof(struct ip),
max_linkhdr)) == NULL) {
/* XXXJRT new stat, please */
ipstat.ips_toosmall++;
goto bad;
}
- } else if (__predict_false(m->m_len < sizeof (struct ip))) {
- if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ } else if (OS_EXPECT((size_t)m->m_len < sizeof(struct ip), 0)) {
+ if ((m = m_pullup(m, sizeof(struct ip))) == NULL) {
ipstat.ips_toosmall++;
goto bad;
}
}
ip = mtod(m, struct ip *);
- if (ip == NULL) goto bad;
+ if (ip == NULL) {
+ goto bad;
+ }
- if (ip->ip_v != IPVERSION) {
+ if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
ipstat.ips_badvers++;
goto bad;
}
- hlen = ip->ip_hl << 2;
- if (hlen < sizeof (struct ip)) { /* minimum header length */
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ if (hlen < (int)sizeof(struct ip)) { /* minimum header length */
ipstat.ips_badhlen++;
goto bad;
}
goto bad;
}
ip = mtod(m, struct ip *);
- if (ip == NULL) goto bad;
+ if (ip == NULL) {
+ goto bad;
+ }
}
if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
} else {
- if (hlen == sizeof (struct ip)) {
+ if (hlen == sizeof(struct ip)) {
sum = in_cksum_hdr(ip);
} else {
sum = in_cksum(m, hlen);
}
}
- if (sum) {
- ipstat.ips_badsum++;
+ if (sum) {
+ ipstat.ips_badsum++;
+ goto bad;
+ }
+
+ /* Retrieve the packet length. */
+ len = ntohs(ip->ip_len);
+
+ /*
+ * Check for additional length bogosity
+ */
+ if (len < hlen) {
+ ipstat.ips_badlen++;
+ goto bad;
+ }
+
+ /*
+ * Check that the amount of data in the buffers
+ * is as at least much as the IP header would have us expect.
+ * Drop packet if shorter than we expect.
+ */
+ if (m->m_pkthdr.len < len) {
+ ipstat.ips_tooshort++;
+ goto bad;
+ }
+
+ /* Checks out, proceed */
+ *mp = m;
+ return 0;
+
+bad:
+ *mp = m;
+ return -1;
+}
+
+/*
+ * Same as above, but for IPv6.
+ * Cut-and-pasted from ip6_input.c.
+ * XXX Should we update ip6stat, or not?
+ */
+static int
+bridge_ip6_checkbasic(struct mbuf **mp)
+{
+ struct mbuf *m = *mp;
+ struct ip6_hdr *ip6;
+
+ /*
+ * If the IPv6 header is not aligned, slurp it up into a new
+ * mbuf with space for link headers, in the event we forward
+ * it. Otherwise, if it is aligned, make sure the entire base
+ * IPv6 header is in the first mbuf of the chain.
+ */
+ if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
+ struct ifnet *inifp = m->m_pkthdr.rcvif;
+ /* max_linkhdr is already rounded up to nearest 4-byte */
+ if ((m = m_copyup(m, sizeof(struct ip6_hdr),
+ max_linkhdr)) == NULL) {
+ /* XXXJRT new stat, please */
+ ip6stat.ip6s_toosmall++;
+ in6_ifstat_inc(inifp, ifs6_in_hdrerr);
+ goto bad;
+ }
+ } else if (OS_EXPECT((size_t)m->m_len < sizeof(struct ip6_hdr), 0)) {
+ struct ifnet *inifp = m->m_pkthdr.rcvif;
+ if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
+ ip6stat.ip6s_toosmall++;
+ in6_ifstat_inc(inifp, ifs6_in_hdrerr);
+ goto bad;
+ }
+ }
+
+ ip6 = mtod(m, struct ip6_hdr *);
+
+ if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
+ ip6stat.ip6s_badvers++;
+ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
+ goto bad;
+ }
+
+ /* Checks out, proceed */
+ *mp = m;
+ return 0;
+
+bad:
+ *mp = m;
+ return -1;
+}
+
+/*
+ * the PF routines expect to be called from ip_input, so we
+ * need to do and undo here some of the same processing.
+ *
+ * XXX : this is heavily inspired on bridge_pfil()
+ */
+static int
+bridge_pf(struct mbuf **mp, struct ifnet *ifp, uint32_t sc_filter_flags,
+ int input)
+{
+ /*
+ * XXX : mpetit : heavily inspired by bridge_pfil()
+ */
+
+ int snap, error, i, hlen;
+ struct ether_header *eh1, eh2;
+ struct ip *ip;
+ struct llc llc1;
+ u_int16_t ether_type;
+
+ snap = 0;
+ error = -1; /* Default error if not error == 0 */
+
+ if ((sc_filter_flags & IFBF_FILT_MEMBER) == 0) {
+ return 0; /* filtering is disabled */
+ }
+ i = min((*mp)->m_pkthdr.len, max_protohdr);
+ if ((*mp)->m_len < i) {
+ *mp = m_pullup(*mp, i);
+ if (*mp == NULL) {
+ printf("%s: m_pullup failed\n", __func__);
+ return -1;
+ }
+ }
+
+ eh1 = mtod(*mp, struct ether_header *);
+ ether_type = ntohs(eh1->ether_type);
+
+ /*
+ * Check for SNAP/LLC.
+ */
+ if (ether_type < ETHERMTU) {
+ struct llc *llc2 = (struct llc *)(eh1 + 1);
+
+ if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
+ llc2->llc_dsap == LLC_SNAP_LSAP &&
+ llc2->llc_ssap == LLC_SNAP_LSAP &&
+ llc2->llc_control == LLC_UI) {
+ ether_type = htons(llc2->llc_un.type_snap.ether_type);
+ snap = 1;
+ }
+ }
+
+ /*
+ * If we're trying to filter bridge traffic, don't look at anything
+ * other than IP and ARP traffic. If the filter doesn't understand
+ * IPv6, don't allow IPv6 through the bridge either. This is lame
+ * since if we really wanted, say, an AppleTalk filter, we are hosed,
+ * but of course we don't have an AppleTalk filter to begin with.
+ * (Note that since pfil doesn't understand ARP it will pass *ALL*
+ * ARP traffic.)
+ */
+ switch (ether_type) {
+ case ETHERTYPE_ARP:
+ case ETHERTYPE_REVARP:
+ return 0; /* Automatically pass */
+
+ case ETHERTYPE_IP:
+ case ETHERTYPE_IPV6:
+ break;
+ default:
+ /*
+ * Check to see if the user wants to pass non-ip
+ * packets, these will not be checked by pf and
+ * passed unconditionally so the default is to drop.
+ */
+ if ((sc_filter_flags & IFBF_FILT_ONLYIP)) {
+ goto bad;
+ }
+ break;
+ }
+
+ /* Strip off the Ethernet header and keep a copy. */
+ m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2);
+ m_adj(*mp, ETHER_HDR_LEN);
+
+ /* Strip off snap header, if present */
+ if (snap) {
+ m_copydata(*mp, 0, sizeof(struct llc), (caddr_t)&llc1);
+ m_adj(*mp, sizeof(struct llc));
+ }
+
+ /*
+ * Check the IP header for alignment and errors
+ */
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ error = bridge_ip_checkbasic(mp);
+ break;
+ case ETHERTYPE_IPV6:
+ error = bridge_ip6_checkbasic(mp);
+ break;
+ default:
+ error = 0;
+ break;
+ }
+ if (error) {
+ goto bad;
+ }
+
+ error = 0;
+
+ /*
+ * Run the packet through pf rules
+ */
+ switch (ether_type) {
+ case ETHERTYPE_IP:
+ /*
+ * before calling the firewall, swap fields the same as
+ * IP does. here we assume the header is contiguous
+ */
+ ip = mtod(*mp, struct ip *);
+
+ ip->ip_len = ntohs(ip->ip_len);
+ ip->ip_off = ntohs(ip->ip_off);
+
+ if (ifp != NULL) {
+ error = pf_af_hook(ifp, 0, mp, AF_INET, input, NULL);
+ }
+
+ if (*mp == NULL || error != 0) { /* filter may consume */
+ break;
+ }
+
+ /* Recalculate the ip checksum and restore byte ordering */
+ ip = mtod(*mp, struct ip *);
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ if (hlen < (int)sizeof(struct ip)) {
+ goto bad;
+ }
+ if (hlen > (*mp)->m_len) {
+ if ((*mp = m_pullup(*mp, hlen)) == 0) {
+ goto bad;
+ }
+ ip = mtod(*mp, struct ip *);
+ if (ip == NULL) {
+ goto bad;
+ }
+ }
+ ip->ip_len = htons(ip->ip_len);
+ ip->ip_off = htons(ip->ip_off);
+ ip->ip_sum = 0;
+ if (hlen == sizeof(struct ip)) {
+ ip->ip_sum = in_cksum_hdr(ip);
+ } else {
+ ip->ip_sum = in_cksum(*mp, hlen);
+ }
+ break;
+
+ case ETHERTYPE_IPV6:
+ if (ifp != NULL) {
+ error = pf_af_hook(ifp, 0, mp, AF_INET6, input, NULL);
+ }
+
+ if (*mp == NULL || error != 0) { /* filter may consume */
+ break;
+ }
+ break;
+ default:
+ error = 0;
+ break;
+ }
+
+ if (*mp == NULL) {
+ return error;
+ }
+ if (error != 0) {
goto bad;
}
- /* Retrieve the packet length. */
- len = ntohs(ip->ip_len);
+ error = -1;
/*
- * Check for additional length bogosity
+ * Finally, put everything back the way it was and return
*/
- if (len < hlen) {
- ipstat.ips_badlen++;
- goto bad;
+ if (snap) {
+ M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT, 0);
+ if (*mp == NULL) {
+ return error;
+ }
+ bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
}
- /*
- * Check that the amount of data in the buffers
- * is as at least much as the IP header would have us expect.
- * Drop packet if shorter than we expect.
- */
- if (m->m_pkthdr.len < len) {
- ipstat.ips_tooshort++;
- goto bad;
+ M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0);
+ if (*mp == NULL) {
+ return error;
}
+ bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
- /* Checks out, proceed */
- *mp = m;
- return (0);
+ return 0;
bad:
- *mp = m;
- return (-1);
+ m_freem(*mp);
+ *mp = NULL;
+ return error;
}
-#if INET6
/*
- * Same as above, but for IPv6.
- * Cut-and-pasted from ip6_input.c.
- * XXX Should we update ip6stat, or not?
+ * Copyright (C) 2014, Stefano Garzarella - Universita` di Pisa.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*/
-static int
-bridge_ip6_checkbasic(struct mbuf **mp)
+
+/*
+ * XXX-ste: Maybe this function must be moved into kern/uipc_mbuf.c
+ *
+ * Create a queue of packets/segments which fit the given mss + hdr_len.
+ * m0 points to mbuf chain to be segmented.
+ * This function splits the payload (m0-> m_pkthdr.len - hdr_len)
+ * into segments of length MSS bytes and then copy the first hdr_len bytes
+ * from m0 at the top of each segment.
+ * If hdr2_buf is not NULL (hdr2_len is the buf length), it is copied
+ * in each segment after the first hdr_len bytes
+ *
+ * Return the new queue with the segments on success, NULL on failure.
+ * (the mbuf queue is freed in this case).
+ * nsegs contains the number of segments generated.
+ */
+
+static struct mbuf *
+m_seg(struct mbuf *m0, int hdr_len, int mss, int *nsegs,
+ char * hdr2_buf, int hdr2_len)
{
- struct mbuf *m = *mp;
- struct ip6_hdr *ip6;
+ int off = 0, n, firstlen;
+ struct mbuf **mnext, *mseg;
+ int total_len = m0->m_pkthdr.len;
/*
- * If the IPv6 header is not aligned, slurp it up into a new
- * mbuf with space for link headers, in the event we forward
- * it. Otherwise, if it is aligned, make sure the entire base
- * IPv6 header is in the first mbuf of the chain.
+ * Segmentation useless
*/
- if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
- struct ifnet *inifp = m->m_pkthdr.rcvif;
- /* max_linkhdr is already rounded up to nearest 4-byte */
- if ((m = m_copyup(m, sizeof (struct ip6_hdr),
- max_linkhdr)) == NULL) {
- /* XXXJRT new stat, please */
- ip6stat.ip6s_toosmall++;
- in6_ifstat_inc(inifp, ifs6_in_hdrerr);
- goto bad;
- }
- } else if (__predict_false(m->m_len < sizeof (struct ip6_hdr))) {
- struct ifnet *inifp = m->m_pkthdr.rcvif;
- if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == NULL) {
- ip6stat.ip6s_toosmall++;
- in6_ifstat_inc(inifp, ifs6_in_hdrerr);
- goto bad;
- }
+ if (total_len <= hdr_len + mss) {
+ return m0;
}
- ip6 = mtod(m, struct ip6_hdr *);
-
- if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
- ip6stat.ip6s_badvers++;
- in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
- goto bad;
+ if (hdr2_buf == NULL || hdr2_len <= 0) {
+ hdr2_buf = NULL;
+ hdr2_len = 0;
}
- /* Checks out, proceed */
- *mp = m;
- return (0);
-
-bad:
- *mp = m;
- return (-1);
-}
-#endif /* INET6 */
-
-/*
- * bridge_fragment:
- *
- * Return a fragmented mbuf chain.
- */
-static int
-bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
- int snap, struct llc *llc)
-{
- struct mbuf *m0;
- struct ip *ip;
- int error = -1;
-
- if (m->m_len < sizeof (struct ip) &&
- (m = m_pullup(m, sizeof (struct ip))) == NULL)
- goto out;
- ip = mtod(m, struct ip *);
+ off = hdr_len + mss;
+ firstlen = mss; /* first segment stored in the original mbuf */
- error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
- CSUM_DELAY_IP);
- if (error)
- goto out;
+ mnext = &(m0->m_nextpkt); /* pointer to next packet */
- /* walk the chain and re-add the Ethernet header */
- for (m0 = m; m0; m0 = m0->m_nextpkt) {
- if (error == 0) {
- if (snap) {
- M_PREPEND(m0, sizeof (struct llc), M_DONTWAIT, 0);
- if (m0 == NULL) {
- error = ENOBUFS;
- continue;
- }
- bcopy(llc, mtod(m0, caddr_t),
- sizeof (struct llc));
- }
- M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT, 0);
- if (m0 == NULL) {
- error = ENOBUFS;
- continue;
- }
- bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
+ for (n = 1; off < total_len; off += mss, n++) {
+ struct mbuf *m;
+ /*
+ * Copy the header from the original packet
+ * and create a new mbuf chain
+ */
+ if (MHLEN < hdr_len) {
+ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
} else {
- m_freem(m);
+ m = m_gethdr(M_NOWAIT, MT_DATA);
}
- }
-
- if (error == 0)
- ipstat.ips_fragmented++;
- return (error);
-
-out:
- if (m != NULL)
- m_freem(m);
- return (error);
-}
-#endif /* PFIL_HOOKS */
+ if (m == NULL) {
+#ifdef GSO_DEBUG
+ D("MGETHDR error\n");
+#endif
+ goto err;
+ }
-/*
- * bridge_set_bpf_tap:
- *
- * Sets ups the BPF callbacks.
- */
-static errno_t
-bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
+ m_copydata(m0, 0, hdr_len, mtod(m, caddr_t));
- /* TBD locking */
- if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
- return (ENODEV);
- }
- ASSERT(bridge_in_bsd_mode(sc));
- switch (mode) {
- case BPF_TAP_DISABLE:
- sc->sc_bpf_input = sc->sc_bpf_output = NULL;
- break;
+ m->m_len = hdr_len;
+ /*
+ * if the optional header is present, copy it
+ */
+ if (hdr2_buf != NULL) {
+ m_copyback(m, hdr_len, hdr2_len, hdr2_buf);
+ }
- case BPF_TAP_INPUT:
- sc->sc_bpf_input = bpf_callback;
- break;
+ m->m_flags |= (m0->m_flags & M_COPYFLAGS);
+ if (off + mss >= total_len) { /* last segment */
+ mss = total_len - off;
+ }
+ /*
+ * Copy the payload from original packet
+ */
+ mseg = m_copym(m0, off, mss, M_NOWAIT);
+ if (mseg == NULL) {
+ m_freem(m);
+#ifdef GSO_DEBUG
+ D("m_copym error\n");
+#endif
+ goto err;
+ }
+ m_cat(m, mseg);
- case BPF_TAP_OUTPUT:
- sc->sc_bpf_output = bpf_callback;
- break;
+ m->m_pkthdr.len = hdr_len + hdr2_len + mss;
+ m->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
+ /*
+ * Copy the checksum flags and data (in_cksum() need this)
+ */
+ m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
+ m->m_pkthdr.csum_data = m0->m_pkthdr.csum_data;
+ m->m_pkthdr.tso_segsz = m0->m_pkthdr.tso_segsz;
- case BPF_TAP_INPUT_OUTPUT:
- sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
- break;
+ *mnext = m;
+ mnext = &(m->m_nextpkt);
+ }
- default:
- break;
+ /*
+ * Update first segment.
+ * If the optional header is present, is necessary
+ * to insert it into the first segment.
+ */
+ if (hdr2_buf == NULL) {
+ m_adj(m0, hdr_len + firstlen - total_len);
+ m0->m_pkthdr.len = hdr_len + firstlen;
+ } else {
+ mseg = m_copym(m0, hdr_len, firstlen, M_NOWAIT);
+ if (mseg == NULL) {
+#ifdef GSO_DEBUG
+ D("m_copym error\n");
+#endif
+ goto err;
+ }
+ m_adj(m0, hdr_len - total_len);
+ m_copyback(m0, hdr_len, hdr2_len, hdr2_buf);
+ m_cat(m0, mseg);
+ m0->m_pkthdr.len = hdr_len + hdr2_len + firstlen;
}
- return (0);
+ if (nsegs != NULL) {
+ *nsegs = n;
+ }
+ return m0;
+err:
+ while (m0 != NULL) {
+ mseg = m0->m_nextpkt;
+ m0->m_nextpkt = NULL;
+ m_freem(m0);
+ m0 = mseg;
+ }
+ return NULL;
}
/*
- * bridge_detach:
- *
- * Callback when interface has been detached.
+ * Wrappers of IPv4 checksum functions
*/
-static void
-bridge_detach(ifnet_t ifp)
+static inline void
+gso_ipv4_data_cksum(struct mbuf *m, struct ip *ip, int mac_hlen)
{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
-#if BRIDGESTP
- bstp_detach(&sc->sc_stp);
-#endif /* BRIDGESTP */
+ m->m_data += mac_hlen;
+ m->m_len -= mac_hlen;
+ m->m_pkthdr.len -= mac_hlen;
+#if __FreeBSD_version < 1000000
+ ip->ip_len = ntohs(ip->ip_len); /* needed for in_delayed_cksum() */
+#endif
- if (bridge_in_bsd_mode(sc)) {
- /* Tear down the routing table. */
- bridge_rtable_fini(sc);
- }
+ in_delayed_cksum(m);
- lck_mtx_lock(&bridge_list_mtx);
- LIST_REMOVE(sc, sc_list);
- lck_mtx_unlock(&bridge_list_mtx);
+#if __FreeBSD_version < 1000000
+ ip->ip_len = htons(ip->ip_len);
+#endif
+ m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
+ m->m_len += mac_hlen;
+ m->m_pkthdr.len += mac_hlen;
+ m->m_data -= mac_hlen;
+}
- ifnet_release(ifp);
+static inline void
+gso_ipv4_hdr_cksum(struct mbuf *m, struct ip *ip, int mac_hlen, int ip_hlen)
+{
+ m->m_data += mac_hlen;
- lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp);
+ ip->ip_sum = in_cksum(m, ip_hlen);
- _FREE(sc, M_DEVBUF);
+ m->m_pkthdr.csum_flags &= ~CSUM_IP;
+ m->m_data -= mac_hlen;
}
/*
- * bridge_bpf_input:
- *
- * Invoke the input BPF callback if enabled
+ * Structure that contains the state during the TCP segmentation
*/
-__private_extern__ errno_t
-bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
-{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
- ASSERT(bridge_in_bsd_mode(sc));
- if (sc->sc_bpf_input) {
- if (mbuf_pkthdr_rcvif(m) != ifp) {
- printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__,
- (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
- (uint64_t)VM_KERNEL_ADDRPERM(ifp));
- }
- (*sc->sc_bpf_input)(ifp, m);
- }
- return (0);
-}
+struct gso_ip_tcp_state {
+ void (*update)
+ (struct gso_ip_tcp_state*, struct mbuf*);
+ void (*internal)
+ (struct gso_ip_tcp_state*, struct mbuf*);
+ union {
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ } hdr;
+ struct tcphdr *tcp;
+ int mac_hlen;
+ int ip_hlen;
+ int tcp_hlen;
+ int hlen;
+ int pay_len;
+ int sw_csum;
+ uint32_t tcp_seq;
+ uint16_t ip_id;
+ boolean_t is_tx;
+};
/*
- * bridge_bpf_output:
- *
- * Invoke the output BPF callback if enabled
+ * Update the pointers to TCP and IPv4 headers
*/
-__private_extern__ errno_t
-bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
+static inline void
+gso_ipv4_tcp_update(struct gso_ip_tcp_state *state, struct mbuf *m)
{
- struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
-
- ASSERT(bridge_in_bsd_mode(sc));
- if (sc->sc_bpf_output) {
- (*sc->sc_bpf_output)(ifp, m);
- }
- return (0);
+ state->hdr.ip = (struct ip *)(void *)(mtod(m, uint8_t *) + state->mac_hlen);
+ state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip) + state->ip_hlen);
+ state->pay_len = m->m_pkthdr.len - state->hlen;
}
/*
- * bridge_link_event:
- *
- * Report a data link event on an interface
+ * Set properly the TCP and IPv4 headers
*/
-static void
-bridge_link_event(struct ifnet *ifp, u_int32_t event_code)
+static inline void
+gso_ipv4_tcp_internal(struct gso_ip_tcp_state *state, struct mbuf *m)
{
- struct {
- struct kern_event_msg header;
- u_int32_t unit;
- char if_name[IFNAMSIZ];
- } event;
-
-#if BRIDGE_DEBUG
- if (if_bridge_debug & BR_DBGF_LIFECYCLE)
- printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname,
- event_code, dlil_kev_dl_code_str(event_code));
-#endif /* BRIDGE_DEBUG */
+ /*
+ * Update IP header
+ */
+ state->hdr.ip->ip_id = htons((state->ip_id)++);
+ state->hdr.ip->ip_len = htons(m->m_pkthdr.len - state->mac_hlen);
+ /*
+ * TCP Checksum
+ */
+ state->tcp->th_sum = 0;
+ state->tcp->th_sum = in_pseudo(state->hdr.ip->ip_src.s_addr,
+ state->hdr.ip->ip_dst.s_addr,
+ htons(state->tcp_hlen + IPPROTO_TCP + state->pay_len));
+ /*
+ * Checksum HW not supported (TCP)
+ */
+ if (state->sw_csum & CSUM_DELAY_DATA) {
+ gso_ipv4_data_cksum(m, state->hdr.ip, state->mac_hlen);
+ }
- bzero(&event, sizeof (event));
- event.header.total_size = sizeof (event);
- event.header.vendor_code = KEV_VENDOR_APPLE;
- event.header.kev_class = KEV_NETWORK_CLASS;
- event.header.kev_subclass = KEV_DL_SUBCLASS;
- event.header.event_code = event_code;
- event.header.event_data[0] = ifnet_family(ifp);
- event.unit = (u_int32_t)ifnet_unit(ifp);
- strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ);
- ifnet_event(ifp, &event.header);
+ state->tcp_seq += state->pay_len;
+ /*
+ * IP Checksum
+ */
+ state->hdr.ip->ip_sum = 0;
+ /*
+ * Checksum HW not supported (IP)
+ */
+ if (state->sw_csum & CSUM_IP) {
+ gso_ipv4_hdr_cksum(m, state->hdr.ip, state->mac_hlen, state->ip_hlen);
+ }
}
-#define BRIDGE_HF_DROP(reason, func, line) { \
- bridge_hostfilter_stats.reason++; \
- if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
- printf("%s.%d" #reason, func, line); \
- error = EINVAL; \
-}
/*
- * Make sure this is a DHCP or Bootp request that match the host filter
+ * Updates the pointers to TCP and IPv6 headers
*/
-static int
-bridge_dhcp_filter(struct bridge_iflist *bif, struct mbuf *m, size_t offset)
+static inline void
+gso_ipv6_tcp_update(struct gso_ip_tcp_state *state, struct mbuf *m)
{
- int error = EINVAL;
- struct dhcp dhcp;
+ state->hdr.ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + state->mac_hlen);
+ state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip6) + state->ip_hlen);
+ state->pay_len = m->m_pkthdr.len - state->hlen;
+}
+/*
+ * Sets properly the TCP and IPv6 headers
+ */
+static inline void
+gso_ipv6_tcp_internal(struct gso_ip_tcp_state *state, struct mbuf *m)
+{
+ state->hdr.ip6->ip6_plen = htons(m->m_pkthdr.len -
+ state->mac_hlen - state->ip_hlen);
/*
- * Note: We use the dhcp structure because bootp structure definition
- * is larger and some vendors do not pad the request
- */
- error = mbuf_copydata(m, offset, sizeof(struct dhcp), &dhcp);
- if (error != 0) {
- BRIDGE_HF_DROP(brhf_dhcp_too_small, __func__, __LINE__);
- goto done;
- }
- if (dhcp.dp_op != BOOTREQUEST) {
- BRIDGE_HF_DROP(brhf_dhcp_bad_op, __func__, __LINE__);
- goto done;
- }
- /*
- * The hardware address must be an exact match
- */
- if (dhcp.dp_htype != ARPHRD_ETHER) {
- BRIDGE_HF_DROP(brhf_dhcp_bad_htype, __func__, __LINE__);
- goto done;
- }
- if (dhcp.dp_hlen != ETHER_ADDR_LEN) {
- BRIDGE_HF_DROP(brhf_dhcp_bad_hlen, __func__, __LINE__);
- goto done;
- }
- if (bcmp(dhcp.dp_chaddr, bif->bif_hf_hwsrc,
- ETHER_ADDR_LEN) != 0) {
- BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr, __func__, __LINE__);
- goto done;
- }
- /*
- * Client address must match the host address or be not specified
+ * TCP Checksum
*/
- if (dhcp.dp_ciaddr.s_addr != bif->bif_hf_ipsrc.s_addr &&
- dhcp.dp_ciaddr.s_addr != INADDR_ANY) {
- BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr, __func__, __LINE__);
- goto done;
+ state->tcp->th_sum = 0;
+ state->tcp->th_sum = in6_pseudo(&state->hdr.ip6->ip6_src,
+ &state->hdr.ip6->ip6_dst,
+ htonl(state->tcp_hlen + state->pay_len + IPPROTO_TCP));
+ /*
+ * Checksum HW not supported (TCP)
+ */
+ if (state->sw_csum & CSUM_DELAY_IPV6_DATA) {
+ (void)in6_finalize_cksum(m, state->mac_hlen, -1, -1, state->sw_csum);
+ m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IPV6_DATA;
}
- error = 0;
-done:
- return (error);
+ state->tcp_seq += state->pay_len;
+}
+
+/*
+ * Init the state during the TCP segmentation
+ */
+static inline boolean_t
+gso_ip_tcp_init_state(struct gso_ip_tcp_state *state, struct ifnet *ifp, struct mbuf *m, int mac_hlen, int ip_hlen, boolean_t isipv6)
+{
+#pragma unused(ifp)
+
+ if (isipv6) {
+ state->hdr.ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + mac_hlen);
+ if (state->hdr.ip6->ip6_nxt != IPPROTO_TCP) {
+ printf("%s: Non-TCP (%d) IPv6 frame", __func__, state->hdr.ip6->ip6_nxt);
+ return FALSE;
+ }
+ state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip6) + ip_hlen);
+ state->update = gso_ipv6_tcp_update;
+ state->internal = gso_ipv6_tcp_internal;
+ state->sw_csum = CSUM_DELAY_IPV6_DATA;
+ } else {
+ state->hdr.ip = (struct ip *)(void *)(mtod(m, uint8_t *) + mac_hlen);
+ if (state->hdr.ip->ip_p != IPPROTO_TCP) {
+ printf("%s: Non-TCP (%d) IPv4 frame", __func__, state->hdr.ip->ip_p);
+ return FALSE;
+ }
+ state->ip_id = ntohs(state->hdr.ip->ip_id);
+ state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip) + ip_hlen);
+ state->update = gso_ipv4_tcp_update;
+ state->internal = gso_ipv4_tcp_internal;
+ state->sw_csum = CSUM_DELAY_DATA | CSUM_IP;
+ }
+ state->mac_hlen = mac_hlen;
+ state->ip_hlen = ip_hlen;
+ state->tcp_hlen = state->tcp->th_off << 2;
+ state->hlen = mac_hlen + ip_hlen + state->tcp_hlen;
+ state->tcp_seq = ntohl(state->tcp->th_seq);
+ //state->sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_hwassist;
+ return TRUE;
}
+/*
+ * GSO on TCP/IP (v4 or v6)
+ *
+ * If is_tx is TRUE, segmented packets are transmitted after they are
+ * segmented.
+ *
+ * If is_tx is FALSE, the segmented packets are returned as a chain in *mp.
+ */
static int
-bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m)
+gso_ip_tcp(struct ifnet *ifp, struct mbuf **mp, struct gso_ip_tcp_state *state,
+ boolean_t is_tx)
{
- int error = EINVAL;
- struct ether_header *eh;
- static struct in_addr inaddr_any = { .s_addr = INADDR_ANY };
+ struct mbuf *m, *m_tx;
+ int error = 0;
+ int mss = 0;
+ int nsegs = 0;
+ struct mbuf *m0 = *mp;
+#ifdef GSO_STATS
+ int total_len = m0->m_pkthdr.len;
+#endif /* GSO_STATS */
- /*
- * Check the Ethernet header is large enough
- */
- if (mbuf_pkthdr_len(m) < sizeof(struct ether_header)) {
- BRIDGE_HF_DROP(brhf_ether_too_small, __func__, __LINE__);
- goto done;
+#if 1
+ mss = ifp->if_mtu - state->ip_hlen - state->tcp_hlen;
+#else
+ if (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) {/* TSO with GSO */
+ mss = ifp->if_hw_tsomax - state->ip_hlen - state->tcp_hlen;
+ } else {
+ mss = m0->m_pkthdr.tso_segsz;
}
- if (mbuf_len(m) < sizeof(struct ether_header) &&
- mbuf_pullup(&m, sizeof(struct ether_header)) != 0) {
- BRIDGE_HF_DROP(brhf_ether_pullup_failed, __func__, __LINE__);
- goto done;
+#endif
+
+ *mp = m0 = m_seg(m0, state->hlen, mss, &nsegs, 0, 0);
+ if (m0 == NULL) {
+ return ENOBUFS; /* XXX ok? */
}
- eh = mtod(m, struct ether_header *);
+#if BRIDGE_DEBUG
+ if (IF_BRIDGE_DEBUG(BR_DBGF_SEGMENTATION)) {
+ printf("%s: %s %s mss %d nsegs %d\n", __func__,
+ ifp->if_xname,
+ is_tx ? "TX" : "RX",
+ mss, nsegs);
+ }
+#endif /* BRIDGE_DEBUG */
+
/*
- * Restrict the source hardware address
+ * XXX-ste: can this happen?
*/
- if ((bif->bif_flags & BIFF_HF_HWSRC) == 0 ||
- bcmp(eh->ether_shost, bif->bif_hf_hwsrc,
- ETHER_ADDR_LEN) != 0) {
- BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr, __func__, __LINE__);
- goto done;
+ if (m0->m_nextpkt == NULL) {
+#ifdef GSO_DEBUG
+ D("only 1 segment");
+#endif
+ if (is_tx) {
+ error = bridge_transmit(ifp, m0);
+ }
+ return error;
}
+#ifdef GSO_STATS
+ GSOSTAT_SET_MAX(tcp.gsos_max_mss, mss);
+ GSOSTAT_SET_MIN(tcp.gsos_min_mss, mss);
+ GSOSTAT_ADD(tcp.gsos_osegments, nsegs);
+#endif /* GSO_STATS */
- /*
- * Restrict Ethernet protocols to ARP and IP
- */
- if (eh->ether_type == htons(ETHERTYPE_ARP)) {
- struct ether_arp *ea;
- size_t minlen = sizeof(struct ether_header) +
- sizeof(struct ether_arp);
+ /* first pkt */
+ m = m0;
- /*
- * Make the Ethernet and ARP headers contiguous
- */
- if (mbuf_pkthdr_len(m) < minlen) {
- BRIDGE_HF_DROP(brhf_arp_too_small, __func__, __LINE__);
- goto done;
- }
- if (mbuf_len(m) < minlen && mbuf_pullup(&m, minlen) != 0) {
- BRIDGE_HF_DROP(brhf_arp_pullup_failed,
- __func__, __LINE__);
- goto done;
- }
- /*
- * Verify this is an ethernet/ip arp
- */
- eh = mtod(m, struct ether_header *);
- ea = (struct ether_arp *)(eh + 1);
- if (ea->arp_hrd != htons(ARPHRD_ETHER)) {
- BRIDGE_HF_DROP(brhf_arp_bad_hw_type,
- __func__, __LINE__);
- goto done;
- }
- if (ea->arp_pro != htons(ETHERTYPE_IP)) {
- BRIDGE_HF_DROP(brhf_arp_bad_pro_type,
- __func__, __LINE__);
- goto done;
- }
- /*
- * Verify the address lengths are correct
- */
- if (ea->arp_hln != ETHER_ADDR_LEN) {
- BRIDGE_HF_DROP(brhf_arp_bad_hw_len, __func__, __LINE__);
- goto done;
- }
- if (ea->arp_pln != sizeof(struct in_addr)) {
- BRIDGE_HF_DROP(brhf_arp_bad_pro_len,
- __func__, __LINE__);
- goto done;
+ state->update(state, m);
+
+ do {
+ state->tcp->th_flags &= ~(TH_FIN | TH_PUSH);
+
+ state->internal(state, m);
+ m_tx = m;
+ m = m->m_nextpkt;
+ if (is_tx) {
+ m_tx->m_nextpkt = NULL;
+ if ((error = bridge_transmit(ifp, m_tx)) != 0) {
+ /*
+ * XXX: If a segment can not be sent, discard the following
+ * segments and propagate the error to the upper levels.
+ * In this way the TCP retransmits all the initial packet.
+ */
+#ifdef GSO_DEBUG
+ D("if_transmit error\n");
+#endif
+ goto err;
+ }
}
+ state->update(state, m);
- /*
- * Allow only ARP request or ARP reply
- */
- if (ea->arp_op != htons(ARPOP_REQUEST) &&
- ea->arp_op != htons(ARPOP_REPLY)) {
- BRIDGE_HF_DROP(brhf_arp_bad_op, __func__, __LINE__);
- goto done;
+ state->tcp->th_flags &= ~TH_CWR;
+ state->tcp->th_seq = htonl(state->tcp_seq);
+ } while (m->m_nextpkt);
+
+ /* last pkt */
+ state->internal(state, m);
+
+ if (is_tx) {
+ error = bridge_transmit(ifp, m);
+#ifdef GSO_DEBUG
+ if (error) {
+ D("last if_transmit error\n");
+ D("error - type = %d \n", error);
}
- /*
- * Verify source hardware address matches
- */
- if (bcmp(ea->arp_sha, bif->bif_hf_hwsrc,
- ETHER_ADDR_LEN) != 0) {
- BRIDGE_HF_DROP(brhf_arp_bad_sha, __func__, __LINE__);
- goto done;
+#endif
+ }
+#ifdef GSO_STATS
+ if (!error) {
+ GSOSTAT_INC(tcp.gsos_segmented);
+ GSOSTAT_SET_MAX(tcp.gsos_maxsegmented, total_len);
+ GSOSTAT_SET_MIN(tcp.gsos_minsegmented, total_len);
+ GSOSTAT_ADD(tcp.gsos_totalbyteseg, total_len);
+ }
+#endif /* GSO_STATS */
+ return error;
+
+err:
+#ifdef GSO_DEBUG
+ D("error - type = %d \n", error);
+#endif
+ while (m != NULL) {
+ m_tx = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ m_freem(m);
+ m = m_tx;
+ }
+ return error;
+}
+
+/*
+ * GSO on TCP/IPv4
+ */
+static int
+gso_ipv4_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen,
+ boolean_t is_tx)
+{
+ struct ip *ip;
+ struct gso_ip_tcp_state state;
+ int hlen;
+ int ip_hlen;
+ struct mbuf *m0 = *mp;
+
+ if (!is_tx && ipforwarding == 0) {
+ /* no need to segment if the packet will not be forwarded */
+ return 0;
+ }
+ hlen = mac_hlen + sizeof(struct ip);
+ if (m0->m_len < hlen) {
+#ifdef GSO_DEBUG
+ D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen);
+#endif
+ *mp = m0 = m_pullup(m0, hlen);
+ if (m0 == NULL) {
+ return ENOBUFS;
}
- /*
- * Verify source protocol address:
- * May be null for an ARP probe
- */
- if (bcmp(ea->arp_spa, &bif->bif_hf_ipsrc.s_addr,
- sizeof(struct in_addr)) != 0 &&
- bcmp(ea->arp_spa, &inaddr_any,
- sizeof(struct in_addr)) != 0) {
- BRIDGE_HF_DROP(brhf_arp_bad_spa, __func__, __LINE__);
- goto done;
+ }
+ ip = (struct ip *)(void *)(mtod(m0, uint8_t *) + mac_hlen);
+ ip_hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ hlen = mac_hlen + ip_hlen + sizeof(struct tcphdr);
+ if (m0->m_len < hlen) {
+#ifdef GSO_DEBUG
+ D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen);
+#endif
+ *mp = m0 = m_pullup(m0, hlen);
+ if (m0 == NULL) {
+ return ENOBUFS;
}
- /*
- *
- */
- bridge_hostfilter_stats.brhf_arp_ok += 1;
- error = 0;
- } else if (eh->ether_type == htons(ETHERTYPE_IP)) {
- size_t minlen = sizeof(struct ether_header) + sizeof(struct ip);
- struct ip iphdr;
- size_t offset;
+ }
+ if (!is_tx) {
+ /* if the destination is a local IP address, don't segment */
+ struct in_addr dst_ip;
- /*
- * Make the Ethernet and IP headers contiguous
- */
- if (mbuf_pkthdr_len(m) < minlen) {
- BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
- goto done;
- }
- offset = sizeof(struct ether_header);
- error = mbuf_copydata(m, offset, sizeof(struct ip), &iphdr);
- if (error != 0) {
- BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
- goto done;
+ bcopy(&ip->ip_dst, &dst_ip, sizeof(dst_ip));
+ if (in_addr_is_ours(dst_ip)) {
+ return 0;
}
- /*
- * Verify the source IP address
- */
- if (iphdr.ip_p == IPPROTO_UDP) {
- struct udphdr udp;
+ }
- minlen += sizeof(struct udphdr);
- if (mbuf_pkthdr_len(m) < minlen) {
- BRIDGE_HF_DROP(brhf_ip_too_small,
- __func__, __LINE__);
- goto done;
- }
+ m0->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ m0->m_pkthdr.csum_flags = CSUM_DELAY_DATA;
- /*
- * Allow all zero addresses for DHCP requests
- */
- if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr &&
- iphdr.ip_src.s_addr != INADDR_ANY) {
- BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
- __func__, __LINE__);
- goto done;
- }
- offset = sizeof(struct ether_header) +
- (IP_VHL_HL(iphdr.ip_vhl) << 2);
- error = mbuf_copydata(m, offset,
- sizeof(struct udphdr), &udp);
- if (error != 0) {
- BRIDGE_HF_DROP(brhf_ip_too_small,
- __func__, __LINE__);
- goto done;
- }
- /*
- * Either it's a Bootp/DHCP packet that we like or
- * it's a UDP packet from the host IP as source address
- */
- if (udp.uh_sport == htons(IPPORT_BOOTPC) &&
- udp.uh_dport == htons(IPPORT_BOOTPS)) {
- minlen += sizeof(struct dhcp);
- if (mbuf_pkthdr_len(m) < minlen) {
- BRIDGE_HF_DROP(brhf_ip_too_small,
- __func__, __LINE__);
- goto done;
- }
- offset += sizeof(struct udphdr);
- error = bridge_dhcp_filter(bif, m, offset);
- if (error != 0)
- goto done;
- } else if (iphdr.ip_src.s_addr == INADDR_ANY) {
- BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
- __func__, __LINE__);
- goto done;
- }
- } else if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr ||
- bif->bif_hf_ipsrc.s_addr == INADDR_ANY) {
+ if (!gso_ip_tcp_init_state(&state, ifp, m0, mac_hlen, ip_hlen, FALSE)) {
+ m_freem(m0);
+ *mp = NULL;
+ return EINVAL;
+ }
- BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, __func__, __LINE__);
- goto done;
- }
- /*
- * Allow only boring IP protocols
- */
- if (iphdr.ip_p != IPPROTO_TCP &&
- iphdr.ip_p != IPPROTO_UDP &&
- iphdr.ip_p != IPPROTO_ICMP &&
- iphdr.ip_p != IPPROTO_ESP &&
- iphdr.ip_p != IPPROTO_AH &&
- iphdr.ip_p != IPPROTO_GRE) {
- BRIDGE_HF_DROP(brhf_ip_bad_proto, __func__, __LINE__);
- goto done;
+ return gso_ip_tcp(ifp, mp, &state, is_tx);
+}
+
+/*
+ * GSO on TCP/IPv6
+ */
+static int
+gso_ipv6_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen,
+ boolean_t is_tx)
+{
+ struct ip6_hdr *ip6;
+ struct gso_ip_tcp_state state;
+ int hlen;
+ int ip_hlen;
+ struct mbuf *m0 = *mp;
+
+ if (!is_tx && ip6_forwarding == 0) {
+ /* no need to segment if the packet will not be forwarded */
+ return 0;
+ }
+
+ hlen = mac_hlen + sizeof(struct ip6_hdr);
+ if (m0->m_len < hlen) {
+#ifdef GSO_DEBUG
+ D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen);
+#endif
+ *mp = m0 = m_pullup(m0, hlen);
+ if (m0 == NULL) {
+ return ENOBUFS;
}
- bridge_hostfilter_stats.brhf_ip_ok += 1;
- error = 0;
- } else {
- BRIDGE_HF_DROP(brhf_bad_ether_type, __func__, __LINE__);
- goto done;
}
-done:
- if (error != 0) {
- if (if_bridge_debug & BR_DBGF_HOSTFILTER) {
- if (m) {
- printf_mbuf_data(m, 0,
- sizeof(struct ether_header) +
- sizeof(struct ip));
- }
- printf("\n");
+ ip6 = (struct ip6_hdr *)(mtod(m0, uint8_t *) + mac_hlen);
+ ip_hlen = ip6_lasthdr(m0, mac_hlen, IPPROTO_IPV6, NULL) - mac_hlen;
+ hlen = mac_hlen + ip_hlen + sizeof(struct tcphdr);
+ if (m0->m_len < hlen) {
+#ifdef GSO_DEBUG
+ D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen);
+#endif
+ *mp = m0 = m_pullup(m0, hlen);
+ if (m0 == NULL) {
+ return ENOBUFS;
}
+ }
+ if (!is_tx) {
+ struct in6_addr dst_ip6;
- if (m != NULL)
- m_freem(m);
+ bcopy(&ip6->ip6_dst, &dst_ip6, sizeof(dst_ip6));
+ if (IN6_IS_ADDR_LINKLOCAL(&dst_ip6)) {
+ dst_ip6.s6_addr16[1] = htons(ifp->if_index);
+ }
+ if (in6_addr_is_ours(&dst_ip6)) {
+ /* local IP address, no need to segment */
+ return 0;
+ }
}
- return (error);
-}
+ m0->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
+ m0->m_pkthdr.csum_flags = CSUM_DELAY_IPV6_DATA;
+ if (!gso_ip_tcp_init_state(&state, ifp, m0, mac_hlen, ip_hlen, TRUE)) {
+ m_freem(m0);
+ *mp = NULL;
+ return EINVAL;
+ }
+ return gso_ip_tcp(ifp, mp, &state, is_tx);
+}