+
+static errno_t
+ifproto_media_input_v1(struct ifnet *ifp, protocol_family_t protocol,
+ struct mbuf *packet, char *header)
+{
+#pragma unused(ifp, protocol, packet, header)
+ return (ENXIO);
+}
+
+static errno_t
+ifproto_media_input_v2(struct ifnet *ifp, protocol_family_t protocol,
+ struct mbuf *packet)
+{
+#pragma unused(ifp, protocol, packet)
+ return (ENXIO);
+
+}
+
+static errno_t
+ifproto_media_preout(struct ifnet *ifp, protocol_family_t protocol,
+ mbuf_t *packet, const struct sockaddr *dest, void *route, char *frame_type,
+ char *link_layer_dest)
+{
+#pragma unused(ifp, protocol, packet, dest, route, frame_type, link_layer_dest)
+ return (ENXIO);
+
+}
+
+static void
+ifproto_media_event(struct ifnet *ifp, protocol_family_t protocol,
+ const struct kev_msg *event)
+{
+#pragma unused(ifp, protocol, event)
+}
+
+static errno_t
+ifproto_media_ioctl(struct ifnet *ifp, protocol_family_t protocol,
+ unsigned long command, void *argument)
+{
+#pragma unused(ifp, protocol, command, argument)
+ return (ENXIO);
+}
+
+static errno_t
+ifproto_media_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr,
+ struct sockaddr_dl *out_ll, size_t ll_len)
+{
+#pragma unused(ifp, proto_addr, out_ll, ll_len)
+ return (ENXIO);
+}
+
+static errno_t
+ifproto_media_send_arp(struct ifnet *ifp, u_short arpop,
+ const struct sockaddr_dl *sender_hw, const struct sockaddr *sender_proto,
+ const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto)
+{
+#pragma unused(ifp, arpop, sender_hw, sender_proto, target_hw, target_proto)
+ return (ENXIO);
+}
+
+extern int if_next_index(void);
+
+errno_t
+ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr)
+{
+ struct ifnet *tmp_if;
+ struct ifaddr *ifa;
+ struct if_data_internal if_data_saved;
+ struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
+
+ if (ifp == NULL)
+ return (EINVAL);
+
+ /*
+ * Serialize ifnet attach using dlil_ifnet_lock, in order to
+ * prevent the interface from being configured while it is
+ * embryonic, as ifnet_head_lock is dropped and reacquired
+ * below prior to marking the ifnet with IFRF_ATTACHED.
+ */
+ dlil_if_lock();
+ ifnet_head_lock_exclusive();
+ /* Verify we aren't already on the list */
+ TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
+ if (tmp_if == ifp) {
+ ifnet_head_done();
+ dlil_if_unlock();
+ return (EEXIST);
+ }
+ }
+
+ lck_mtx_lock_spin(&ifp->if_ref_lock);
+ if (ifp->if_refflags & IFRF_ATTACHED) {
+ panic("%s: flags mismatch (attached set) ifp=%p",
+ __func__, ifp);
+ /* NOTREACHED */
+ }
+ lck_mtx_unlock(&ifp->if_ref_lock);
+
+ ifnet_lock_exclusive(ifp);
+
+ /* Sanity check */
+ VERIFY(ifp->if_detaching_link.tqe_next == NULL);
+ VERIFY(ifp->if_detaching_link.tqe_prev == NULL);
+
+ if (ll_addr != NULL) {
+ if (ifp->if_addrlen == 0) {
+ ifp->if_addrlen = ll_addr->sdl_alen;
+ } else if (ll_addr->sdl_alen != ifp->if_addrlen) {
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ dlil_if_unlock();
+ return (EINVAL);
+ }
+ }
+
+ /*
+ * Allow interfaces without protocol families to attach
+ * only if they have the necessary fields filled out.
+ */
+ if (ifp->if_add_proto == NULL || ifp->if_del_proto == NULL) {
+ DLIL_PRINTF("%s: Attempt to attach interface without "
+ "family module - %d\n", __func__, ifp->if_family);
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ dlil_if_unlock();
+ return (ENODEV);
+ }
+
+ /* Allocate protocol hash table */
+ VERIFY(ifp->if_proto_hash == NULL);
+ ifp->if_proto_hash = zalloc(dlif_phash_zone);
+ if (ifp->if_proto_hash == NULL) {
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ dlil_if_unlock();
+ return (ENOBUFS);
+ }
+ bzero(ifp->if_proto_hash, dlif_phash_size);
+
+ lck_mtx_lock_spin(&ifp->if_flt_lock);
+ VERIFY(TAILQ_EMPTY(&ifp->if_flt_head));
+ TAILQ_INIT(&ifp->if_flt_head);
+ VERIFY(ifp->if_flt_busy == 0);
+ VERIFY(ifp->if_flt_waiters == 0);
+ lck_mtx_unlock(&ifp->if_flt_lock);
+
+ VERIFY(TAILQ_EMPTY(&ifp->if_prefixhead));
+ TAILQ_INIT(&ifp->if_prefixhead);
+
+ if (!(dl_if->dl_if_flags & DLIF_REUSE)) {
+ VERIFY(LIST_EMPTY(&ifp->if_multiaddrs));
+ LIST_INIT(&ifp->if_multiaddrs);
+ }
+
+ VERIFY(ifp->if_allhostsinm == NULL);
+ VERIFY(TAILQ_EMPTY(&ifp->if_addrhead));
+ TAILQ_INIT(&ifp->if_addrhead);
+
+ if (ifp->if_snd.ifq_maxlen == 0)
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+
+ if (ifp->if_index == 0) {
+ int idx = if_next_index();
+
+ if (idx == -1) {
+ ifp->if_index = 0;
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ dlil_if_unlock();
+ return (ENOBUFS);
+ }
+ ifp->if_index = idx;
+ }
+ /* There should not be anything occupying this slot */
+ VERIFY(ifindex2ifnet[ifp->if_index] == NULL);
+
+ /* allocate (if needed) and initialize a link address */
+ VERIFY(!(dl_if->dl_if_flags & DLIF_REUSE) || ifp->if_lladdr != NULL);
+ ifa = dlil_alloc_lladdr(ifp, ll_addr);
+ if (ifa == NULL) {
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ dlil_if_unlock();
+ return (ENOBUFS);
+ }
+
+ VERIFY(ifnet_addrs[ifp->if_index - 1] == NULL);
+ ifnet_addrs[ifp->if_index - 1] = ifa;
+
+ /* make this address the first on the list */
+ IFA_LOCK(ifa);
+ /* hold a reference for ifnet_addrs[] */
+ IFA_ADDREF_LOCKED(ifa);
+ /* if_attach_link_ifa() holds a reference for ifa_link */
+ if_attach_link_ifa(ifp, ifa);
+ IFA_UNLOCK(ifa);
+
+#if CONFIG_MACF_NET
+ mac_ifnet_label_associate(ifp);
+#endif
+
+ TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
+ ifindex2ifnet[ifp->if_index] = ifp;
+
+ /* Hold a reference to the underlying dlil_ifnet */
+ ifnet_reference(ifp);
+
+ /*
+ * A specific dlil input thread is created per Ethernet/cellular
+ * interface. pseudo interfaces or other types of interfaces use
+ * the main ("loopback") thread.
+ *
+ * If the sysctl "net.link.generic.system.multi_threaded_input" is set
+ * to zero, all packets will be handled by the main loopback thread,
+ * reverting to 10.4.x behaviour.
+ */
+ if (dlil_multithreaded_input &&
+ (ifp->if_type == IFT_ETHER || ifp->if_type == IFT_CELLULAR)) {
+ int err;
+
+ ifp->if_input_thread = zalloc(dlif_inp_zone);
+ if (ifp->if_input_thread == NULL) {
+ panic("%s: ifp=%p couldn't alloc threading",
+ __func__, ifp);
+ /* NOTREACHED */
+ }
+ bzero(ifp->if_input_thread, dlif_inp_size);
+ err = dlil_create_input_thread(ifp, ifp->if_input_thread);
+ if (err != 0) {
+ panic("%s: ifp=%p couldn't get a thread. "
+ "err=%d", __func__, ifp, err);
+ /* NOTREACHED */
+ }
+#ifdef DLIL_DEBUG
+ printf("%s: dlil thread for ifp=%p if_index=%d\n",
+ __func__, ifp, ifp->if_index);
+#endif
+ }
+
+ /* Clear stats (save and restore other fields that we care) */
+ if_data_saved = ifp->if_data;
+ bzero(&ifp->if_data, sizeof (ifp->if_data));
+ ifp->if_data.ifi_type = if_data_saved.ifi_type;
+ ifp->if_data.ifi_typelen = if_data_saved.ifi_typelen;
+ ifp->if_data.ifi_physical = if_data_saved.ifi_physical;
+ ifp->if_data.ifi_addrlen = if_data_saved.ifi_addrlen;
+ ifp->if_data.ifi_hdrlen = if_data_saved.ifi_hdrlen;
+ ifp->if_data.ifi_mtu = if_data_saved.ifi_mtu;
+ ifp->if_data.ifi_baudrate = if_data_saved.ifi_baudrate;
+ ifp->if_data.ifi_hwassist = if_data_saved.ifi_hwassist;
+ ifp->if_data.ifi_tso_v4_mtu = if_data_saved.ifi_tso_v4_mtu;
+ ifp->if_data.ifi_tso_v6_mtu = if_data_saved.ifi_tso_v6_mtu;
+ ifnet_touch_lastchange(ifp);
+
+ /* Record attach PC stacktrace */
+ ctrace_record(&((struct dlil_ifnet *)ifp)->dl_if_attach);
+
+ ifp->if_updatemcasts = 0;
+ if (!LIST_EMPTY(&ifp->if_multiaddrs)) {
+ struct ifmultiaddr *ifma;
+ LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ IFMA_LOCK(ifma);
+ if (ifma->ifma_addr->sa_family == AF_LINK ||
+ ifma->ifma_addr->sa_family == AF_UNSPEC)
+ ifp->if_updatemcasts++;
+ IFMA_UNLOCK(ifma);
+ }
+
+ printf("%s%d: attached with %d suspended link-layer multicast "
+ "membership(s)\n", ifp->if_name, ifp->if_unit,
+ ifp->if_updatemcasts);
+ }
+
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+
+ lck_mtx_lock(&ifp->if_cached_route_lock);
+ /* Enable forwarding cached route */
+ ifp->if_fwd_cacheok = 1;
+ /* Clean up any existing cached routes */
+ if (ifp->if_fwd_route.ro_rt != NULL)
+ rtfree(ifp->if_fwd_route.ro_rt);
+ bzero(&ifp->if_fwd_route, sizeof (ifp->if_fwd_route));
+ if (ifp->if_src_route.ro_rt != NULL)
+ rtfree(ifp->if_src_route.ro_rt);
+ bzero(&ifp->if_src_route, sizeof (ifp->if_src_route));
+ if (ifp->if_src_route6.ro_rt != NULL)
+ rtfree(ifp->if_src_route6.ro_rt);
+ bzero(&ifp->if_src_route6, sizeof (ifp->if_src_route6));
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+
+ ifnet_llreach_ifattach(ifp, (dl_if->dl_if_flags & DLIF_REUSE));
+
+ /*
+ * Allocate and attach IGMPv3/MLDv2 interface specific variables
+ * and trees; do this before the ifnet is marked as attached.
+ * The ifnet keeps the reference to the info structures even after
+ * the ifnet is detached, since the network-layer records still
+ * refer to the info structures even after that. This also
+ * makes it possible for them to still function after the ifnet
+ * is recycled or reattached.
+ */
+#if INET
+ if (IGMP_IFINFO(ifp) == NULL) {
+ IGMP_IFINFO(ifp) = igmp_domifattach(ifp, M_WAITOK);
+ VERIFY(IGMP_IFINFO(ifp) != NULL);
+ } else {
+ VERIFY(IGMP_IFINFO(ifp)->igi_ifp == ifp);
+ igmp_domifreattach(IGMP_IFINFO(ifp));
+ }
+#endif /* INET */
+#if INET6
+ if (MLD_IFINFO(ifp) == NULL) {
+ MLD_IFINFO(ifp) = mld_domifattach(ifp, M_WAITOK);
+ VERIFY(MLD_IFINFO(ifp) != NULL);
+ } else {
+ VERIFY(MLD_IFINFO(ifp)->mli_ifp == ifp);
+ mld_domifreattach(MLD_IFINFO(ifp));
+ }
+#endif /* INET6 */
+
+ /*
+ * Finally, mark this ifnet as attached.
+ */
+ lck_mtx_lock(rnh_lock);
+ ifnet_lock_exclusive(ifp);
+ lck_mtx_lock_spin(&ifp->if_ref_lock);
+ ifp->if_refflags = IFRF_ATTACHED;
+ lck_mtx_unlock(&ifp->if_ref_lock);
+ if (net_rtref) {
+ /* boot-args override; enable idle notification */
+ (void) ifnet_set_idle_flags_locked(ifp, IFRF_IDLE_NOTIFY,
+ IFRF_IDLE_NOTIFY);
+ } else {
+ /* apply previous request(s) to set the idle flags, if any */
+ (void) ifnet_set_idle_flags_locked(ifp, ifp->if_idle_new_flags,
+ ifp->if_idle_new_flags_mask);
+
+ }
+ ifnet_lock_done(ifp);
+ lck_mtx_unlock(rnh_lock);
+ dlil_if_unlock();
+
+#if PF
+ /*
+ * Attach packet filter to this interface, if enabled.
+ */
+ pf_ifnet_hook(ifp, 1);
+#endif /* PF */
+
+ dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, NULL, 0);
+
+ if (dlil_verbose) {
+ printf("%s%d: attached%s\n", ifp->if_name, ifp->if_unit,
+ (dl_if->dl_if_flags & DLIF_REUSE) ? " (recycled)" : "");
+ }
+
+ return (0);
+}
+
+/*
+ * Prepare the storage for the first/permanent link address, which must
+ * must have the same lifetime as the ifnet itself. Although the link
+ * address gets removed from if_addrhead and ifnet_addrs[] at detach time,
+ * its location in memory must never change as it may still be referred
+ * to by some parts of the system afterwards (unfortunate implementation
+ * artifacts inherited from BSD.)
+ *
+ * Caller must hold ifnet lock as writer.
+ */
+static struct ifaddr *
+dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr)
+{
+ struct ifaddr *ifa, *oifa;
+ struct sockaddr_dl *asdl, *msdl;
+ char workbuf[IFNAMSIZ*2];
+ int namelen, masklen, socksize;
+ struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
+
+ ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
+ VERIFY(ll_addr == NULL || ll_addr->sdl_alen == ifp->if_addrlen);
+
+ namelen = snprintf(workbuf, sizeof (workbuf), "%s%d",
+ ifp->if_name, ifp->if_unit);
+ masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
+ socksize = masklen + ifp->if_addrlen;
+#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof (u_int32_t) - 1)))
+ if ((u_int32_t)socksize < sizeof (struct sockaddr_dl))
+ socksize = sizeof(struct sockaddr_dl);
+ socksize = ROUNDUP(socksize);
+#undef ROUNDUP
+
+ ifa = ifp->if_lladdr;
+ if (socksize > DLIL_SDLMAXLEN ||
+ (ifa != NULL && ifa != &dl_if->dl_if_lladdr.ifa)) {
+ /*
+ * Rare, but in the event that the link address requires
+ * more storage space than DLIL_SDLMAXLEN, allocate the
+ * largest possible storages for address and mask, such
+ * that we can reuse the same space when if_addrlen grows.
+ * This same space will be used when if_addrlen shrinks.
+ */
+ if (ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa) {
+ int ifasize = sizeof (*ifa) + 2 * SOCK_MAXADDRLEN;
+ ifa = _MALLOC(ifasize, M_IFADDR, M_WAITOK | M_ZERO);
+ if (ifa == NULL)
+ return (NULL);
+ ifa_lock_init(ifa);
+ /* Don't set IFD_ALLOC, as this is permanent */
+ ifa->ifa_debug = IFD_LINK;
+ }
+ IFA_LOCK(ifa);
+ /* address and mask sockaddr_dl locations */
+ asdl = (struct sockaddr_dl *)(ifa + 1);
+ bzero(asdl, SOCK_MAXADDRLEN);
+ msdl = (struct sockaddr_dl *)((char *)asdl + SOCK_MAXADDRLEN);
+ bzero(msdl, SOCK_MAXADDRLEN);
+ } else {
+ VERIFY(ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa);
+ /*
+ * Use the storage areas for address and mask within the
+ * dlil_ifnet structure. This is the most common case.
+ */
+ if (ifa == NULL) {
+ ifa = &dl_if->dl_if_lladdr.ifa;
+ ifa_lock_init(ifa);
+ /* Don't set IFD_ALLOC, as this is permanent */
+ ifa->ifa_debug = IFD_LINK;
+ }
+ IFA_LOCK(ifa);
+ /* address and mask sockaddr_dl locations */
+ asdl = (struct sockaddr_dl *)&dl_if->dl_if_lladdr.asdl;
+ bzero(asdl, sizeof (dl_if->dl_if_lladdr.asdl));
+ msdl = (struct sockaddr_dl *)&dl_if->dl_if_lladdr.msdl;
+ bzero(msdl, sizeof (dl_if->dl_if_lladdr.msdl));
+ }
+
+ /* hold a permanent reference for the ifnet itself */
+ IFA_ADDREF_LOCKED(ifa);
+ oifa = ifp->if_lladdr;
+ ifp->if_lladdr = ifa;
+
+ VERIFY(ifa->ifa_debug == IFD_LINK);
+ ifa->ifa_ifp = ifp;
+ ifa->ifa_rtrequest = link_rtrequest;
+ ifa->ifa_addr = (struct sockaddr *)asdl;
+ asdl->sdl_len = socksize;
+ asdl->sdl_family = AF_LINK;
+ bcopy(workbuf, asdl->sdl_data, namelen);
+ asdl->sdl_nlen = namelen;
+ asdl->sdl_index = ifp->if_index;
+ asdl->sdl_type = ifp->if_type;
+ if (ll_addr != NULL) {
+ asdl->sdl_alen = ll_addr->sdl_alen;
+ bcopy(CONST_LLADDR(ll_addr), LLADDR(asdl), asdl->sdl_alen);
+ } else {
+ asdl->sdl_alen = 0;
+ }
+ ifa->ifa_netmask = (struct sockaddr*)msdl;
+ msdl->sdl_len = masklen;
+ while (namelen != 0)
+ msdl->sdl_data[--namelen] = 0xff;
+ IFA_UNLOCK(ifa);
+
+ if (oifa != NULL)
+ IFA_REMREF(oifa);
+
+ return (ifa);
+}
+
+static void
+if_purgeaddrs(struct ifnet *ifp)
+{
+#if INET
+ in_purgeaddrs(ifp);
+#endif /* INET */
+#if INET6
+ in6_purgeaddrs(ifp);
+#endif /* INET6 */
+#if NETAT
+ at_purgeaddrs(ifp);
+#endif
+}
+
+errno_t
+ifnet_detach(ifnet_t ifp)
+{
+ if (ifp == NULL)
+ return (EINVAL);
+
+ ifnet_head_lock_exclusive();
+ lck_mtx_lock(rnh_lock);
+ ifnet_lock_exclusive(ifp);
+
+ /*
+ * Check to see if this interface has previously triggered
+ * aggressive protocol draining; if so, decrement the global
+ * refcnt and clear PR_AGGDRAIN on the route domain if
+ * there are no more of such an interface around.
+ */
+ (void) ifnet_set_idle_flags_locked(ifp, 0, ~0);
+
+ lck_mtx_lock_spin(&ifp->if_ref_lock);
+ if (!(ifp->if_refflags & IFRF_ATTACHED)) {
+ lck_mtx_unlock(&ifp->if_ref_lock);
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ lck_mtx_unlock(rnh_lock);
+ return (EINVAL);
+ } else if (ifp->if_refflags & IFRF_DETACHING) {
+ /* Interface has already been detached */
+ lck_mtx_unlock(&ifp->if_ref_lock);
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ lck_mtx_unlock(rnh_lock);
+ return (ENXIO);
+ }
+ /* Indicate this interface is being detached */
+ ifp->if_refflags &= ~IFRF_ATTACHED;
+ ifp->if_refflags |= IFRF_DETACHING;
+ lck_mtx_unlock(&ifp->if_ref_lock);
+
+ if (dlil_verbose)
+ printf("%s%d: detaching\n", ifp->if_name, ifp->if_unit);
+
+ /*
+ * Remove ifnet from the ifnet_head, ifindex2ifnet[]; it will
+ * no longer be visible during lookups from this point.
+ */
+ VERIFY(ifindex2ifnet[ifp->if_index] == ifp);
+ TAILQ_REMOVE(&ifnet_head, ifp, if_link);
+ ifp->if_link.tqe_next = NULL;
+ ifp->if_link.tqe_prev = NULL;
+ ifindex2ifnet[ifp->if_index] = NULL;
+
+ /* Record detach PC stacktrace */
+ ctrace_record(&((struct dlil_ifnet *)ifp)->dl_if_detach);
+
+ ifnet_lock_done(ifp);
+ ifnet_head_done();
+ lck_mtx_unlock(rnh_lock);
+
+ /* Let BPF know we're detaching */
+ bpfdetach(ifp);
+
+ /* Mark the interface as DOWN */
+ if_down(ifp);
+
+ /* Disable forwarding cached route */
+ lck_mtx_lock(&ifp->if_cached_route_lock);
+ ifp->if_fwd_cacheok = 0;
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+
+ /*
+ * Drain any deferred IGMPv3/MLDv2 query responses, but keep the
+ * references to the info structures and leave them attached to
+ * this ifnet.
+ */
+#if INET
+ igmp_domifdetach(ifp);
+#endif /* INET */
+#if INET6
+ mld_domifdetach(ifp);
+#endif /* INET6 */
+
+ dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, NULL, 0);
+
+ /* Let worker thread take care of the rest, to avoid reentrancy */
+ dlil_if_lock();
+ ifnet_detaching_enqueue(ifp);
+ dlil_if_unlock();
+
+ return (0);
+}
+
+static void
+ifnet_detaching_enqueue(struct ifnet *ifp)
+{
+ dlil_if_lock_assert();
+
+ ++ifnet_detaching_cnt;
+ VERIFY(ifnet_detaching_cnt != 0);
+ TAILQ_INSERT_TAIL(&ifnet_detaching_head, ifp, if_detaching_link);
+ wakeup((caddr_t)&ifnet_delayed_run);
+}
+
+static struct ifnet *
+ifnet_detaching_dequeue(void)
+{
+ struct ifnet *ifp;
+
+ dlil_if_lock_assert();
+
+ ifp = TAILQ_FIRST(&ifnet_detaching_head);
+ VERIFY(ifnet_detaching_cnt != 0 || ifp == NULL);
+ if (ifp != NULL) {
+ VERIFY(ifnet_detaching_cnt != 0);
+ --ifnet_detaching_cnt;
+ TAILQ_REMOVE(&ifnet_detaching_head, ifp, if_detaching_link);
+ ifp->if_detaching_link.tqe_next = NULL;
+ ifp->if_detaching_link.tqe_prev = NULL;
+ }
+ return (ifp);
+}
+
+static void
+ifnet_delayed_thread_func(void)
+{
+ struct ifnet *ifp;
+
+ for (;;) {
+ dlil_if_lock();
+ while (ifnet_detaching_cnt == 0) {
+ (void) msleep(&ifnet_delayed_run, &dlil_ifnet_lock,
+ (PZERO - 1), "ifnet_delayed_thread", NULL);
+ }
+
+ VERIFY(TAILQ_FIRST(&ifnet_detaching_head) != NULL);
+
+ /* Take care of detaching ifnet */
+ ifp = ifnet_detaching_dequeue();
+ dlil_if_unlock();
+ if (ifp != NULL)
+ ifnet_detach_final(ifp);
+ }
+}
+
+static void
+ifnet_detach_final(struct ifnet *ifp)
+{
+ struct ifnet_filter *filter, *filter_next;
+ struct ifnet_filter_head fhead;
+ struct dlil_threading_info *inputthread;
+ struct ifaddr *ifa;
+ ifnet_detached_func if_free;
+ int i;
+
+ lck_mtx_lock(&ifp->if_ref_lock);
+ if (!(ifp->if_refflags & IFRF_DETACHING)) {
+ panic("%s: flags mismatch (detaching not set) ifp=%p",
+ __func__, ifp);
+ /* NOTREACHED */
+ }
+
+ /* Wait until the existing IO references get released
+ * before we proceed with ifnet_detach
+ */
+ while (ifp->if_refio > 0) {
+ printf("%s: Waiting for IO references on %s%d interface "
+ "to be released\n", __func__, ifp->if_name, ifp->if_unit);
+ (void) msleep(&(ifp->if_refio), &ifp->if_ref_lock,
+ (PZERO - 1), "ifnet_ioref_wait", NULL);
+ }
+ lck_mtx_unlock(&ifp->if_ref_lock);
+
+ /* Detach interface filters */
+ lck_mtx_lock(&ifp->if_flt_lock);
+ if_flt_monitor_enter(ifp);
+
+ lck_mtx_assert(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
+ fhead = ifp->if_flt_head;
+ TAILQ_INIT(&ifp->if_flt_head);
+
+ for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
+ filter_next = TAILQ_NEXT(filter, filt_next);
+ lck_mtx_unlock(&ifp->if_flt_lock);
+
+ dlil_detach_filter_internal(filter, 1);
+ lck_mtx_lock(&ifp->if_flt_lock);
+ }
+ if_flt_monitor_leave(ifp);
+ lck_mtx_unlock(&ifp->if_flt_lock);
+
+ /* Tell upper layers to drop their network addresses */
+ if_purgeaddrs(ifp);
+
+ ifnet_lock_exclusive(ifp);
+
+ /* Uplumb all protocols */
+ for (i = 0; i < PROTO_HASH_SLOTS; i++) {
+ struct if_proto *proto;
+
+ proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
+ while (proto != NULL) {
+ protocol_family_t family = proto->protocol_family;
+ ifnet_lock_done(ifp);
+ proto_unplumb(family, ifp);
+ ifnet_lock_exclusive(ifp);
+ proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
+ }
+ /* There should not be any protocols left */
+ VERIFY(SLIST_EMPTY(&ifp->if_proto_hash[i]));
+ }
+ zfree(dlif_phash_zone, ifp->if_proto_hash);
+ ifp->if_proto_hash = NULL;
+
+ /* Detach (permanent) link address from if_addrhead */
+ ifa = TAILQ_FIRST(&ifp->if_addrhead);
+ VERIFY(ifnet_addrs[ifp->if_index - 1] == ifa);
+ IFA_LOCK(ifa);
+ if_detach_link_ifa(ifp, ifa);
+ IFA_UNLOCK(ifa);
+
+ /* Remove (permanent) link address from ifnet_addrs[] */
+ IFA_REMREF(ifa);
+ ifnet_addrs[ifp->if_index - 1] = NULL;
+
+ /* This interface should not be on {ifnet_head,detaching} */
+ VERIFY(ifp->if_link.tqe_next == NULL);
+ VERIFY(ifp->if_link.tqe_prev == NULL);
+ VERIFY(ifp->if_detaching_link.tqe_next == NULL);
+ VERIFY(ifp->if_detaching_link.tqe_prev == NULL);
+
+ /* Prefix list should be empty by now */
+ VERIFY(TAILQ_EMPTY(&ifp->if_prefixhead));
+
+ /* The slot should have been emptied */
+ VERIFY(ifindex2ifnet[ifp->if_index] == NULL);
+
+ /* There should not be any addresses left */
+ VERIFY(TAILQ_EMPTY(&ifp->if_addrhead));
+
+ /*
+ * If thread affinity was set for the workloop thread, we will need
+ * to tear down the affinity and release the extra reference count
+ * taken at attach time;
+ */
+ if ((inputthread = ifp->if_input_thread) != NULL) {
+ if (inputthread->net_affinity) {
+ struct thread *tp;
+
+ if (inputthread == dlil_lo_thread_ptr) {
+ panic("%s: Thread affinity should not be "
+ "enabled on the loopback dlil input "
+ "thread", __func__);
+ /* NOTREACHED */
+ }
+
+ lck_mtx_lock_spin(&inputthread->input_lck);
+ tp = inputthread->workloop_thread;
+ inputthread->workloop_thread = NULL;
+ inputthread->tag = 0;
+ inputthread->net_affinity = FALSE;
+ lck_mtx_unlock(&inputthread->input_lck);
+
+ /* Tear down workloop thread affinity */
+ if (tp != NULL) {
+ (void) dlil_affinity_set(tp,
+ THREAD_AFFINITY_TAG_NULL);
+ thread_deallocate(tp);
+ }
+
+ /* Tear down dlil input thread affinity */
+ tp = inputthread->input_thread;
+ (void) dlil_affinity_set(tp, THREAD_AFFINITY_TAG_NULL);
+ thread_deallocate(tp);
+ }
+
+ /* cleanup ifp dlil input thread, if any */
+ ifp->if_input_thread = NULL;
+
+ if (inputthread != dlil_lo_thread_ptr) {
+#ifdef DLIL_DEBUG
+ printf("%s: wakeup thread threadinfo: %p "
+ "input_thread=%p threads: cur=%d max=%d\n",
+ __func__, inputthread, inputthread->input_thread,
+ dlil_multithreaded_input, cur_dlil_input_threads);
+#endif
+ lck_mtx_lock_spin(&inputthread->input_lck);
+
+ inputthread->input_waiting |= DLIL_INPUT_TERMINATE;
+ if (!(inputthread->input_waiting & DLIL_INPUT_RUNNING))
+ wakeup((caddr_t)&inputthread->input_waiting);
+
+ lck_mtx_unlock(&inputthread->input_lck);
+ }
+ }
+
+ /* The driver might unload, so point these to ourselves */
+ if_free = ifp->if_free;
+ ifp->if_output = ifp_if_output;
+ ifp->if_ioctl = ifp_if_ioctl;
+ ifp->if_set_bpf_tap = ifp_if_set_bpf_tap;
+ ifp->if_free = ifp_if_free;
+ ifp->if_demux = ifp_if_demux;
+ ifp->if_event = ifp_if_event;
+ ifp->if_framer = ifp_if_framer;
+ ifp->if_add_proto = ifp_if_add_proto;
+ ifp->if_del_proto = ifp_if_del_proto;
+ ifp->if_check_multi = ifp_if_check_multi;
+
+ ifnet_lock_done(ifp);
+
+#if PF
+ /*
+ * Detach this interface from packet filter, if enabled.
+ */
+ pf_ifnet_hook(ifp, 0);
+#endif /* PF */
+
+ /* Filter list should be empty */
+ lck_mtx_lock_spin(&ifp->if_flt_lock);
+ VERIFY(TAILQ_EMPTY(&ifp->if_flt_head));
+ VERIFY(ifp->if_flt_busy == 0);
+ VERIFY(ifp->if_flt_waiters == 0);
+ lck_mtx_unlock(&ifp->if_flt_lock);
+
+ /* Last chance to cleanup any cached route */
+ lck_mtx_lock(&ifp->if_cached_route_lock);
+ VERIFY(!ifp->if_fwd_cacheok);
+ if (ifp->if_fwd_route.ro_rt != NULL)
+ rtfree(ifp->if_fwd_route.ro_rt);
+ bzero(&ifp->if_fwd_route, sizeof (ifp->if_fwd_route));
+ if (ifp->if_src_route.ro_rt != NULL)
+ rtfree(ifp->if_src_route.ro_rt);
+ bzero(&ifp->if_src_route, sizeof (ifp->if_src_route));
+ if (ifp->if_src_route6.ro_rt != NULL)
+ rtfree(ifp->if_src_route6.ro_rt);
+ bzero(&ifp->if_src_route6, sizeof (ifp->if_src_route6));
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+
+ ifnet_llreach_ifdetach(ifp);
+
+ dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, NULL, 0);
+
+ if (if_free != NULL)
+ if_free(ifp);
+
+ /*
+ * Finally, mark this ifnet as detached.
+ */
+ lck_mtx_lock_spin(&ifp->if_ref_lock);
+ if (!(ifp->if_refflags & IFRF_DETACHING)) {
+ panic("%s: flags mismatch (detaching not set) ifp=%p",
+ __func__, ifp);
+ /* NOTREACHED */
+ }
+ ifp->if_refflags &= ~IFRF_DETACHING;
+ lck_mtx_unlock(&ifp->if_ref_lock);
+
+ if (dlil_verbose)
+ printf("%s%d: detached\n", ifp->if_name, ifp->if_unit);
+
+ /* Release reference held during ifnet attach */
+ ifnet_release(ifp);
+}
+
+static errno_t
+ifp_if_output(struct ifnet *ifp, struct mbuf *m)
+{
+#pragma unused(ifp)
+ m_freem(m);
+ return (0);
+}
+
+static errno_t
+ifp_if_demux(struct ifnet *ifp, struct mbuf *m, char *fh, protocol_family_t *pf)
+{
+#pragma unused(ifp, fh, pf)
+ m_freem(m);
+ return (EJUSTRETURN);
+}
+
+static errno_t
+ifp_if_add_proto(struct ifnet *ifp, protocol_family_t pf,
+ const struct ifnet_demux_desc *da, u_int32_t dc)
+{
+#pragma unused(ifp, pf, da, dc)
+ return (EINVAL);
+}
+
+static errno_t
+ifp_if_del_proto(struct ifnet *ifp, protocol_family_t pf)
+{
+#pragma unused(ifp, pf)
+ return (EINVAL);
+}
+
+static errno_t
+ifp_if_check_multi(struct ifnet *ifp, const struct sockaddr *sa)
+{
+#pragma unused(ifp, sa)
+ return (EOPNOTSUPP);
+}
+
+static errno_t
+ifp_if_framer(struct ifnet *ifp, struct mbuf **m,
+ const struct sockaddr *sa, const char *ll, const char *t)
+{
+#pragma unused(ifp, m, sa, ll, t)
+ m_freem(*m);
+ *m = NULL;
+ return (EJUSTRETURN);
+}
+
+static errno_t
+ifp_if_ioctl(struct ifnet *ifp, unsigned long cmd, void *arg)
+{
+#pragma unused(ifp, cmd, arg)
+ return (EOPNOTSUPP);
+}
+
+static errno_t
+ifp_if_set_bpf_tap(struct ifnet *ifp, bpf_tap_mode tm, bpf_packet_func f)
+{
+#pragma unused(ifp, tm, f)
+ /* XXX not sure what to do here */
+ return (0);
+}
+
+static void
+ifp_if_free(struct ifnet *ifp)
+{
+#pragma unused(ifp)
+}
+
+static void
+ifp_if_event(struct ifnet *ifp, const struct kev_msg *e)
+{
+#pragma unused(ifp, e)
+}
+
+__private_extern__
+int dlil_if_acquire(u_int32_t family, const void *uniqueid,
+ size_t uniqueid_len, struct ifnet **ifp)
+{
+ struct ifnet *ifp1 = NULL;
+ struct dlil_ifnet *dlifp1 = NULL;
+ void *buf, *base, **pbuf;
+ int ret = 0;
+
+ dlil_if_lock();
+ TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
+ ifp1 = (struct ifnet *)dlifp1;
+
+ if (ifp1->if_family != family)
+ continue;
+
+ lck_mtx_lock(&dlifp1->dl_if_lock);
+ /* same uniqueid and same len or no unique id specified */
+ if ((uniqueid_len == dlifp1->dl_if_uniqueid_len) &&
+ !bcmp(uniqueid, dlifp1->dl_if_uniqueid, uniqueid_len)) {
+ /* check for matching interface in use */
+ if (dlifp1->dl_if_flags & DLIF_INUSE) {
+ if (uniqueid_len) {
+ ret = EBUSY;
+ lck_mtx_unlock(&dlifp1->dl_if_lock);
+ goto end;
+ }
+ } else {
+ dlifp1->dl_if_flags |= (DLIF_INUSE|DLIF_REUSE);
+ lck_mtx_unlock(&dlifp1->dl_if_lock);
+ *ifp = ifp1;
+ goto end;
+ }
+ }
+ lck_mtx_unlock(&dlifp1->dl_if_lock);
+ }
+
+ /* no interface found, allocate a new one */
+ buf = zalloc(dlif_zone);
+ if (buf == NULL) {
+ ret = ENOMEM;
+ goto end;
+ }
+ bzero(buf, dlif_bufsize);
+
+ /* Get the 64-bit aligned base address for this object */
+ base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t),
+ sizeof (u_int64_t));
+ VERIFY(((intptr_t)base + dlif_size) <= ((intptr_t)buf + dlif_bufsize));
+
+ /*
+ * Wind back a pointer size from the aligned base and
+ * save the original address so we can free it later.
+ */
+ pbuf = (void **)((intptr_t)base - sizeof (void *));
+ *pbuf = buf;
+ dlifp1 = base;
+
+ if (uniqueid_len) {
+ MALLOC(dlifp1->dl_if_uniqueid, void *, uniqueid_len,
+ M_NKE, M_WAITOK);
+ if (dlifp1->dl_if_uniqueid == NULL) {
+ zfree(dlif_zone, dlifp1);
+ ret = ENOMEM;
+ goto end;
+ }
+ bcopy(uniqueid, dlifp1->dl_if_uniqueid, uniqueid_len);
+ dlifp1->dl_if_uniqueid_len = uniqueid_len;
+ }
+
+ ifp1 = (struct ifnet *)dlifp1;
+ dlifp1->dl_if_flags = DLIF_INUSE;
+ if (ifnet_debug) {
+ dlifp1->dl_if_flags |= DLIF_DEBUG;
+ dlifp1->dl_if_trace = dlil_if_trace;
+ }
+ ifp1->if_name = dlifp1->dl_if_namestorage;
+#if CONFIG_MACF_NET
+ mac_ifnet_label_init(ifp1);
+#endif
+
+ lck_mtx_init(&dlifp1->dl_if_lock, ifnet_lock_group, ifnet_lock_attr);
+ lck_rw_init(&ifp1->if_lock, ifnet_lock_group, ifnet_lock_attr);
+ lck_mtx_init(&ifp1->if_ref_lock, ifnet_lock_group, ifnet_lock_attr);
+ lck_mtx_init(&ifp1->if_flt_lock, ifnet_lock_group, ifnet_lock_attr);
+ lck_mtx_init(&ifp1->if_cached_route_lock, ifnet_lock_group,
+ ifnet_lock_attr);
+ lck_mtx_init(&ifp1->if_addrconfig_lock, ifnet_lock_group,
+ ifnet_lock_attr);
+ lck_rw_init(&ifp1->if_llreach_lock, ifnet_lock_group, ifnet_lock_attr);
+
+ TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
+
+ *ifp = ifp1;
+
+end:
+ dlil_if_unlock();
+
+ VERIFY(dlifp1 == NULL || (IS_P2ALIGNED(dlifp1, sizeof (u_int64_t)) &&
+ IS_P2ALIGNED(&ifp1->if_data, sizeof (u_int64_t))));
+
+ return (ret);
+}
+
+__private_extern__ void
+dlil_if_release(ifnet_t ifp)
+{
+ struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
+
+ ifnet_lock_exclusive(ifp);
+ lck_mtx_lock(&dlifp->dl_if_lock);
+ dlifp->dl_if_flags &= ~DLIF_INUSE;
+ strncpy(dlifp->dl_if_namestorage, ifp->if_name, IFNAMSIZ);
+ ifp->if_name = dlifp->dl_if_namestorage;
+ lck_mtx_unlock(&dlifp->dl_if_lock);
+#if CONFIG_MACF_NET
+ /*
+ * We can either recycle the MAC label here or in dlil_if_acquire().
+ * It seems logical to do it here but this means that anything that
+ * still has a handle on ifp will now see it as unlabeled.
+ * Since the interface is "dead" that may be OK. Revisit later.
+ */
+ mac_ifnet_label_recycle(ifp);
+#endif
+ ifnet_lock_done(ifp);
+}
+
+__private_extern__ void
+dlil_if_lock(void)
+{
+ lck_mtx_lock(&dlil_ifnet_lock);
+}
+
+__private_extern__ void
+dlil_if_unlock(void)
+{
+ lck_mtx_unlock(&dlil_ifnet_lock);
+}
+
+__private_extern__ void
+dlil_if_lock_assert(void)
+{
+ lck_mtx_assert(&dlil_ifnet_lock, LCK_MTX_ASSERT_OWNED);
+}
+
+__private_extern__ void
+dlil_proto_unplumb_all(struct ifnet *ifp)
+{
+ /*
+ * if_proto_hash[0-3] are for PF_INET, PF_INET6, PF_APPLETALK
+ * and PF_VLAN, where each bucket contains exactly one entry;
+ * PF_VLAN does not need an explicit unplumb.
+ *
+ * if_proto_hash[4] is for other protocols; we expect anything
+ * in this bucket to respond to the DETACHING event (which would
+ * have happened by now) and do the unplumb then.
+ */
+ (void) proto_unplumb(PF_INET, ifp);
+#if INET6
+ (void) proto_unplumb(PF_INET6, ifp);
+#endif /* INET6 */
+#if NETAT
+ (void) proto_unplumb(PF_APPLETALK, ifp);
+#endif /* NETAT */
+}
+
+static void
+ifp_src_route_copyout(struct ifnet *ifp, struct route *dst)
+{
+ lck_mtx_lock_spin(&ifp->if_cached_route_lock);
+ lck_mtx_convert_spin(&ifp->if_cached_route_lock);
+
+ route_copyout(dst, &ifp->if_src_route, sizeof (*dst));
+
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+}
+
+static void
+ifp_src_route_copyin(struct ifnet *ifp, struct route *src)
+{
+ lck_mtx_lock_spin(&ifp->if_cached_route_lock);
+ lck_mtx_convert_spin(&ifp->if_cached_route_lock);
+
+ if (ifp->if_fwd_cacheok) {
+ route_copyin(src, &ifp->if_src_route, sizeof (*src));
+ } else {
+ rtfree(src->ro_rt);
+ src->ro_rt = NULL;
+ }
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+}
+
+#if INET6
+static void
+ifp_src_route6_copyout(struct ifnet *ifp, struct route_in6 *dst)
+{
+ lck_mtx_lock_spin(&ifp->if_cached_route_lock);
+ lck_mtx_convert_spin(&ifp->if_cached_route_lock);
+
+ route_copyout((struct route *)dst, (struct route *)&ifp->if_src_route6,
+ sizeof (*dst));
+
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+}
+
+static void
+ifp_src_route6_copyin(struct ifnet *ifp, struct route_in6 *src)
+{
+ lck_mtx_lock_spin(&ifp->if_cached_route_lock);
+ lck_mtx_convert_spin(&ifp->if_cached_route_lock);
+
+ if (ifp->if_fwd_cacheok) {
+ route_copyin((struct route *)src,
+ (struct route *)&ifp->if_src_route6, sizeof (*src));
+ } else {
+ rtfree(src->ro_rt);
+ src->ro_rt = NULL;
+ }
+ lck_mtx_unlock(&ifp->if_cached_route_lock);
+}
+#endif /* INET6 */
+
+struct rtentry *
+ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip)
+{
+ struct route src_rt;
+ struct sockaddr_in *dst = (struct sockaddr_in *)(&src_rt.ro_dst);
+
+ ifp_src_route_copyout(ifp, &src_rt);
+
+ if (src_rt.ro_rt == NULL || !(src_rt.ro_rt->rt_flags & RTF_UP) ||
+ src_ip.s_addr != dst->sin_addr.s_addr ||
+ src_rt.ro_rt->generation_id != route_generation) {
+ if (src_rt.ro_rt != NULL) {
+ rtfree(src_rt.ro_rt);
+ src_rt.ro_rt = NULL;
+ } else if (dst->sin_family != AF_INET) {
+ bzero(&src_rt.ro_dst, sizeof (src_rt.ro_dst));
+ dst->sin_len = sizeof (src_rt.ro_dst);
+ dst->sin_family = AF_INET;
+ }
+ dst->sin_addr = src_ip;
+
+ if (src_rt.ro_rt == NULL) {
+ src_rt.ro_rt = rtalloc1_scoped((struct sockaddr *)dst,
+ 0, 0, ifp->if_index);
+
+ if (src_rt.ro_rt != NULL) {
+ /* retain a ref, copyin consumes one */
+ struct rtentry *rte = src_rt.ro_rt;
+ RT_ADDREF(rte);
+ ifp_src_route_copyin(ifp, &src_rt);
+ src_rt.ro_rt = rte;
+ }
+ }
+ }
+
+ return (src_rt.ro_rt);
+}
+
+#if INET6
+struct rtentry*
+ifnet_cached_rtlookup_inet6(struct ifnet *ifp, struct in6_addr *src_ip6)
+{
+ struct route_in6 src_rt;
+
+ ifp_src_route6_copyout(ifp, &src_rt);
+
+ if (src_rt.ro_rt == NULL || !(src_rt.ro_rt->rt_flags & RTF_UP) ||
+ !IN6_ARE_ADDR_EQUAL(src_ip6, &src_rt.ro_dst.sin6_addr) ||
+ src_rt.ro_rt->generation_id != route_generation) {
+ if (src_rt.ro_rt != NULL) {
+ rtfree(src_rt.ro_rt);
+ src_rt.ro_rt = NULL;
+ } else if (src_rt.ro_dst.sin6_family != AF_INET6) {
+ bzero(&src_rt.ro_dst, sizeof (src_rt.ro_dst));
+ src_rt.ro_dst.sin6_len = sizeof (src_rt.ro_dst);
+ src_rt.ro_dst.sin6_family = AF_INET6;
+ }
+ src_rt.ro_dst.sin6_scope_id = in6_addr2scopeid(ifp, src_ip6);
+ src_rt.ro_dst.sin6_addr = *src_ip6;
+
+ if (src_rt.ro_rt == NULL) {
+ src_rt.ro_rt = rtalloc1_scoped(
+ (struct sockaddr *)&src_rt.ro_dst, 0, 0,
+ ifp->if_index);
+
+ if (src_rt.ro_rt != NULL) {
+ /* retain a ref, copyin consumes one */
+ struct rtentry *rte = src_rt.ro_rt;
+ RT_ADDREF(rte);
+ ifp_src_route6_copyin(ifp, &src_rt);
+ src_rt.ro_rt = rte;
+ }
+ }
+ }
+
+ return (src_rt.ro_rt);
+}
+#endif /* INET6 */