+
+ case SIOCSIFBRDADDR: /* struct ifreq */
+ IFA_LOCK(&ia->ia_ifa);
+
+ ia->ia_broadaddr.sin_family = AF_INET;
+ ia->ia_broadaddr.sin_len = sizeof(struct sockaddr_in);
+ ia->ia_broadaddr.sin_port = 0;
+ bcopy(&(SIN(&ifr->ifr_broadaddr)->sin_addr),
+ &ia->ia_broadaddr.sin_addr, sizeof(ia->ia_broadaddr.sin_addr));
+ bzero(&ia->ia_broadaddr.sin_zero, sizeof(ia->ia_broadaddr.sin_zero));
+
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_INET_SUBCLASS;
+
+ ev_msg.event_code = KEV_INET_SIFBRDADDR;
+
+ if (ia->ia_ifa.ifa_dstaddr) {
+ in_event_data.ia_dstaddr = ((struct sockaddr_in *)
+ (void *)ia->ia_ifa.ifa_dstaddr)->sin_addr;
+ } else {
+ in_event_data.ia_dstaddr.s_addr = INADDR_ANY;
+ }
+ in_event_data.ia_addr = ia->ia_addr.sin_addr;
+ in_event_data.ia_net = ia->ia_net;
+ in_event_data.ia_netmask = ia->ia_netmask;
+ in_event_data.ia_subnet = ia->ia_subnet;
+ in_event_data.ia_subnetmask = ia->ia_subnetmask;
+ in_event_data.ia_netbroadcast = ia->ia_netbroadcast;
+ IFA_UNLOCK(&ia->ia_ifa);
+ (void) strlcpy(&in_event_data.link_data.if_name[0],
+ ifp->if_name, IFNAMSIZ);
+ in_event_data.link_data.if_family = ifp->if_family;
+ in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit;
+
+ ev_msg.dv[0].data_ptr = &in_event_data;
+ ev_msg.dv[0].data_length = sizeof(struct kev_in_data);
+ ev_msg.dv[1].data_length = 0;
+
+ dlil_post_complete_msg(ifp, &ev_msg);
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return error;
+}
+
+/*
+ * Caller passes in the ioctl data pointer directly via "ifr", with the
+ * expectation that this routine always uses bcopy() or other byte-aligned
+ * memory accesses.
+ */
+static __attribute__((noinline)) int
+inctl_ifnetmask(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd,
+ struct ifreq *ifr)
+{
+ struct kev_in_data in_event_data;
+ struct kev_msg ev_msg;
+ struct sockaddr_in mask;
+ int error = 0;
+
+ VERIFY(ifp != NULL);
+
+ bzero(&in_event_data, sizeof(struct kev_in_data));
+ bzero(&ev_msg, sizeof(struct kev_msg));
+
+ switch (cmd) {
+ case SIOCGIFNETMASK: /* struct ifreq */
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ break;
+ }
+ IFA_LOCK(&ia->ia_ifa);
+ bcopy(&ia->ia_sockmask, &ifr->ifr_addr, sizeof(mask));
+ IFA_UNLOCK(&ia->ia_ifa);
+ break;
+
+ case SIOCSIFNETMASK: { /* struct ifreq */
+ in_addr_t i;
+
+ bcopy(&ifr->ifr_addr, &mask, sizeof(mask));
+ i = mask.sin_addr.s_addr;
+
+ VERIFY(ia != NULL);
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_subnetmask = ntohl(ia->ia_sockmask.sin_addr.s_addr = i);
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_INET_SUBCLASS;
+
+ ev_msg.event_code = KEV_INET_SIFNETMASK;
+
+ if (ia->ia_ifa.ifa_dstaddr) {
+ in_event_data.ia_dstaddr = ((struct sockaddr_in *)
+ (void *)ia->ia_ifa.ifa_dstaddr)->sin_addr;
+ } else {
+ in_event_data.ia_dstaddr.s_addr = INADDR_ANY;
+ }
+ in_event_data.ia_addr = ia->ia_addr.sin_addr;
+ in_event_data.ia_net = ia->ia_net;
+ in_event_data.ia_netmask = ia->ia_netmask;
+ in_event_data.ia_subnet = ia->ia_subnet;
+ in_event_data.ia_subnetmask = ia->ia_subnetmask;
+ in_event_data.ia_netbroadcast = ia->ia_netbroadcast;
+ IFA_UNLOCK(&ia->ia_ifa);
+ (void) strlcpy(&in_event_data.link_data.if_name[0],
+ ifp->if_name, IFNAMSIZ);
+ in_event_data.link_data.if_family = ifp->if_family;
+ in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit;
+
+ ev_msg.dv[0].data_ptr = &in_event_data;
+ ev_msg.dv[0].data_length = sizeof(struct kev_in_data);
+ ev_msg.dv[1].data_length = 0;
+
+ dlil_post_complete_msg(ifp, &ev_msg);
+ break;
+ }
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return error;
+}
+
+/*
+ * Generic INET control operations (ioctl's).
+ *
+ * ifp is NULL if not an interface-specific ioctl.
+ *
+ * Most of the routines called to handle the ioctls would end up being
+ * tail-call optimized, which unfortunately causes this routine to
+ * consume too much stack space; this is the reason for the "noinline"
+ * attribute used on those routines.
+ *
+ * If called directly from within the networking stack (as opposed to via
+ * pru_control), the socket parameter may be NULL.
+ */
+int
+in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp,
+ struct proc *p)
+{
+ struct ifreq *ifr = (struct ifreq *)(void *)data;
+ struct sockaddr_in addr, dstaddr;
+ struct sockaddr_in sin, *sa = NULL;
+ boolean_t privileged = (proc_suser(p) == 0);
+ boolean_t so_unlocked = FALSE;
+ struct in_ifaddr *ia = NULL;
+ struct ifaddr *ifa;
+ int error = 0;
+ int intval;
+
+ /* In case it's NULL, make sure it came from the kernel */
+ VERIFY(so != NULL || p == kernproc);
+
+ /*
+ * ioctls which don't require ifp, but require socket.
+ */
+ switch (cmd) {
+ case SIOCGASSOCIDS32: /* struct so_aidreq32 */
+ case SIOCGASSOCIDS64: /* struct so_aidreq64 */
+ return inctl_associd(so, cmd, data);
+ /* NOTREACHED */
+
+ case SIOCGCONNIDS32: /* struct so_cidreq32 */
+ case SIOCGCONNIDS64: /* struct so_cidreq64 */
+ return inctl_connid(so, cmd, data);
+ /* NOTREACHED */
+
+ case SIOCGCONNINFO32: /* struct so_cinforeq32 */
+ case SIOCGCONNINFO64: /* struct so_cinforeq64 */
+ return inctl_conninfo(so, cmd, data);
+ /* NOTREACHED */
+ }
+
+ /*
+ * The rest of ioctls require ifp; reject if we don't have one;
+ * return ENXIO to be consistent with ifioctl().
+ */
+ if (ifp == NULL) {
+ return ENXIO;
+ }
+
+ /*
+ * ioctls which require ifp but not interface address.
+ */
+ switch (cmd) {
+ case SIOCAUTOADDR: /* struct ifreq */
+ if (!privileged) {
+ return EPERM;
+ }
+ return inctl_autoaddr(ifp, ifr);
+ /* NOTREACHED */
+
+ case SIOCARPIPLL: /* struct ifreq */
+ if (!privileged) {
+ return EPERM;
+ }
+ return inctl_arpipll(ifp, ifr);
+ /* NOTREACHED */
+
+ case SIOCGETROUTERMODE: /* struct ifreq */
+ intval = (ifp->if_eflags & IFEF_IPV4_ROUTER) != 0 ? 1 : 0;
+ bcopy(&intval, &ifr->ifr_intval, sizeof(intval));
+ return 0;
+ /* NOTREACHED */
+
+ case SIOCSETROUTERMODE: /* struct ifreq */
+ if (!privileged) {
+ return EPERM;
+ }
+ return inctl_setrouter(ifp, ifr);
+ /* NOTREACHED */
+
+ case SIOCPROTOATTACH: /* struct ifreq */
+ if (!privileged) {
+ return EPERM;
+ }
+ return in_domifattach(ifp);
+ /* NOTREACHED */
+
+ case SIOCPROTODETACH: /* struct ifreq */
+ if (!privileged) {
+ return EPERM;
+ }
+
+ /*
+ * If an IPv4 address is still present, refuse to detach.
+ */
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ IFA_LOCK(ifa);
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ IFA_UNLOCK(ifa);
+ break;
+ }
+ IFA_UNLOCK(ifa);
+ }
+ ifnet_lock_done(ifp);
+ return (ifa == NULL) ? proto_unplumb(PF_INET, ifp) : EBUSY;
+ /* NOTREACHED */
+ }
+
+ /*
+ * ioctls which require interface address; obtain sockaddr_in.
+ */
+ switch (cmd) {
+ case SIOCAIFADDR: /* struct {if,in_}aliasreq */
+ if (!privileged) {
+ return EPERM;
+ }
+ bcopy(&((struct in_aliasreq *)(void *)data)->ifra_addr,
+ &sin, sizeof(sin));
+ sa = &sin;
+ break;
+
+ case SIOCDIFADDR: /* struct ifreq */
+ case SIOCSIFADDR: /* struct ifreq */
+ case SIOCSIFDSTADDR: /* struct ifreq */
+ case SIOCSIFNETMASK: /* struct ifreq */
+ case SIOCSIFBRDADDR: /* struct ifreq */
+ if (!privileged) {
+ return EPERM;
+ }
+ OS_FALLTHROUGH;
+ case SIOCGIFADDR: /* struct ifreq */
+ case SIOCGIFDSTADDR: /* struct ifreq */
+ case SIOCGIFNETMASK: /* struct ifreq */
+ case SIOCGIFBRDADDR: /* struct ifreq */
+ bcopy(&ifr->ifr_addr, &sin, sizeof(sin));
+ sa = &sin;
+ break;
+ }
+
+ /*
+ * Find address for this interface, if it exists.
+ *
+ * If an alias address was specified, find that one instead of
+ * the first one on the interface, if possible.
+ */
+ VERIFY(ia == NULL);
+ if (sa != NULL) {
+ struct in_ifaddr *iap;
+
+ /*
+ * Any failures from this point on must take into account
+ * a non-NULL "ia" with an outstanding reference count, and
+ * therefore requires IFA_REMREF. Jump to "done" label
+ * instead of calling return if "ia" is valid.
+ */
+ lck_rw_lock_shared(in_ifaddr_rwlock);
+ TAILQ_FOREACH(iap, INADDR_HASH(sa->sin_addr.s_addr), ia_hash) {
+ IFA_LOCK(&iap->ia_ifa);
+ if (iap->ia_ifp == ifp &&
+ iap->ia_addr.sin_addr.s_addr ==
+ sa->sin_addr.s_addr) {
+ ia = iap;
+ IFA_ADDREF_LOCKED(&iap->ia_ifa);
+ IFA_UNLOCK(&iap->ia_ifa);
+ break;
+ }
+ IFA_UNLOCK(&iap->ia_ifa);
+ }
+ lck_rw_done(in_ifaddr_rwlock);
+
+ if (ia == NULL) {
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ iap = ifatoia(ifa);
+ IFA_LOCK(&iap->ia_ifa);
+ if (iap->ia_addr.sin_family == AF_INET) {
+ ia = iap;
+ IFA_ADDREF_LOCKED(&iap->ia_ifa);
+ IFA_UNLOCK(&iap->ia_ifa);
+ break;
+ }
+ IFA_UNLOCK(&iap->ia_ifa);
+ }
+ ifnet_lock_done(ifp);
+ }
+ }
+
+ /*
+ * Unlock the socket since ifnet_ioctl() may be invoked by
+ * one of the ioctl handlers below. Socket will be re-locked
+ * prior to returning.
+ */
+ if (so != NULL) {
+ socket_unlock(so, 0);
+ so_unlocked = TRUE;
+ }
+
+ switch (cmd) {
+ case SIOCAIFADDR: /* struct {if,in_}aliasreq */
+ case SIOCDIFADDR: /* struct ifreq */
+ if (cmd == SIOCAIFADDR) {
+ bcopy(&((struct in_aliasreq *)(void *)data)->
+ ifra_addr, &addr, sizeof(addr));
+ bcopy(&((struct in_aliasreq *)(void *)data)->
+ ifra_dstaddr, &dstaddr, sizeof(dstaddr));
+ } else {
+ VERIFY(cmd == SIOCDIFADDR);
+ bcopy(&((struct ifreq *)(void *)data)->ifr_addr,
+ &addr, sizeof(addr));
+ bzero(&dstaddr, sizeof(dstaddr));
+ }
+
+ if (addr.sin_family == AF_INET) {
+ struct in_ifaddr *oia;
+
+ lck_rw_lock_shared(in_ifaddr_rwlock);
+ for (oia = ia; ia; ia = ia->ia_link.tqe_next) {
+ IFA_LOCK(&ia->ia_ifa);
+ if (ia->ia_ifp == ifp &&
+ ia->ia_addr.sin_addr.s_addr ==
+ addr.sin_addr.s_addr) {
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+ IFA_UNLOCK(&ia->ia_ifa);
+ break;
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ }
+ lck_rw_done(in_ifaddr_rwlock);
+ if (oia != NULL) {
+ IFA_REMREF(&oia->ia_ifa);
+ }
+ if ((ifp->if_flags & IFF_POINTOPOINT) &&
+ (cmd == SIOCAIFADDR) &&
+ (dstaddr.sin_addr.s_addr == INADDR_ANY)) {
+ error = EDESTADDRREQ;
+ goto done;
+ }
+ } else if (cmd == SIOCAIFADDR) {
+ error = EINVAL;
+ goto done;
+ }
+ if (cmd == SIOCDIFADDR) {
+ if (ia == NULL) {
+ error = EADDRNOTAVAIL;
+ goto done;
+ }
+
+ IFA_LOCK(&ia->ia_ifa);
+ /*
+ * Avoid the race condition seen when two
+ * threads process SIOCDIFADDR command
+ * at the same time.
+ */
+ while (ia->ia_ifa.ifa_debug & IFD_DETACHING) {
+ os_log(OS_LOG_DEFAULT,
+ "Another thread is already attempting to "
+ "delete IPv4 address: %s on interface %s. "
+ "Go to sleep and check again after the operation is done",
+ inet_ntoa(sa->sin_addr), ia->ia_ifp->if_xname);
+ ia->ia_ifa.ifa_del_waiters++;
+ (void) msleep(ia->ia_ifa.ifa_del_wc, &ia->ia_ifa.ifa_lock, (PZERO - 1),
+ __func__, NULL);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+ }
+
+ if ((ia->ia_ifa.ifa_debug & IFD_ATTACHED) == 0) {
+ error = EADDRNOTAVAIL;
+ IFA_UNLOCK(&ia->ia_ifa);
+ goto done;
+ }
+
+ ia->ia_ifa.ifa_debug |= IFD_DETACHING;
+ IFA_UNLOCK(&ia->ia_ifa);
+ }
+
+ OS_FALLTHROUGH;
+ case SIOCSIFADDR: /* struct ifreq */
+ case SIOCSIFDSTADDR: /* struct ifreq */
+ case SIOCSIFNETMASK: /* struct ifreq */
+ if (cmd == SIOCAIFADDR) {
+ /* fell thru from above; just repeat it */
+ bcopy(&((struct in_aliasreq *)(void *)data)->
+ ifra_addr, &addr, sizeof(addr));
+ } else {
+ VERIFY(cmd == SIOCDIFADDR || cmd == SIOCSIFADDR ||
+ cmd == SIOCSIFNETMASK || cmd == SIOCSIFDSTADDR);
+ bcopy(&((struct ifreq *)(void *)data)->ifr_addr,
+ &addr, sizeof(addr));
+ }
+
+ if (addr.sin_family != AF_INET && cmd == SIOCSIFADDR) {
+ error = EINVAL;
+ goto done;
+ }
+ if (ia == NULL) {
+ ia = in_ifaddr_alloc(M_WAITOK);
+ if (ia == NULL) {
+ error = ENOBUFS;
+ goto done;
+ }
+ ifnet_lock_exclusive(ifp);
+ ifa = &ia->ia_ifa;
+ IFA_LOCK(ifa);
+ /* Hold a reference for this routine */
+ IFA_ADDREF_LOCKED(ifa);
+ IA_HASH_INIT(ia);
+ ifa->ifa_addr = (struct sockaddr *)&ia->ia_addr;
+ ifa->ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr;
+ ifa->ifa_netmask = (struct sockaddr *)&ia->ia_sockmask;
+ ia->ia_sockmask.sin_len = offsetof(struct sockaddr_in, sin_zero);
+ if (ifp->if_flags & IFF_BROADCAST) {
+ ia->ia_broadaddr.sin_len = sizeof(ia->ia_addr);
+ ia->ia_broadaddr.sin_family = AF_INET;
+ }
+ ia->ia_ifp = ifp;
+ if (!(ifp->if_flags & IFF_LOOPBACK)) {
+ in_interfaces++;
+ }
+ /* if_attach_ifa() holds a reference for ifa_link */
+ if_attach_ifa(ifp, ifa);
+ /*
+ * If we have to go through in_ifinit(), make sure
+ * to avoid installing route(s) based on this address
+ * via PFC_IFUP event, before the link resolver (ARP)
+ * initializes it.
+ */
+ if (cmd == SIOCAIFADDR || cmd == SIOCSIFADDR) {
+ ifa->ifa_debug |= IFD_NOTREADY;
+ }
+ IFA_UNLOCK(ifa);
+ ifnet_lock_done(ifp);
+ lck_rw_lock_exclusive(in_ifaddr_rwlock);
+ /* Hold a reference for ia_link */
+ IFA_ADDREF(ifa);
+ TAILQ_INSERT_TAIL(&in_ifaddrhead, ia, ia_link);
+ lck_rw_done(in_ifaddr_rwlock);
+ /* discard error */
+ (void) in_domifattach(ifp);
+ error = 0;
+ }
+ break;
+ }
+
+ switch (cmd) {
+ case SIOCGIFDSTADDR: /* struct ifreq */
+ case SIOCSIFDSTADDR: /* struct ifreq */
+ error = inctl_ifdstaddr(ifp, ia, cmd, ifr);
+ break;
+
+ case SIOCGIFBRDADDR: /* struct ifreq */
+ case SIOCSIFBRDADDR: /* struct ifreq */
+ error = inctl_ifbrdaddr(ifp, ia, cmd, ifr);
+ break;
+
+ case SIOCGIFNETMASK: /* struct ifreq */
+ case SIOCSIFNETMASK: /* struct ifreq */
+ error = inctl_ifnetmask(ifp, ia, cmd, ifr);
+ break;
+
+ case SIOCGIFADDR: /* struct ifreq */
+ case SIOCSIFADDR: /* struct ifreq */
+ case SIOCAIFADDR: /* struct {if,in_}aliasreq */
+ case SIOCDIFADDR: /* struct ifreq */
+ error = inctl_ifaddr(ifp, ia, cmd, ifr);
+ break;
+
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+
+done:
+ if (ia != NULL) {
+ if (cmd == SIOCDIFADDR) {
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_ifa.ifa_debug &= ~IFD_DETACHING;
+ if (ia->ia_ifa.ifa_del_waiters > 0) {
+ ia->ia_ifa.ifa_del_waiters = 0;
+ wakeup(ia->ia_ifa.ifa_del_wc);
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ }
+ IFA_REMREF(&ia->ia_ifa);
+ }
+ if (so_unlocked) {
+ socket_lock(so, 0);
+ }
+
+ return error;
+}
+
+/*
+ * Delete any existing route for an interface.
+ */
+void
+in_ifscrub(struct ifnet *ifp, struct in_ifaddr *ia, int locked)
+{
+ IFA_LOCK(&ia->ia_ifa);
+ if ((ia->ia_flags & IFA_ROUTE) == 0) {
+ IFA_UNLOCK(&ia->ia_ifa);
+ return;
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ if (!locked) {
+ lck_mtx_lock(rnh_lock);
+ }
+ if (ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) {
+ rtinit_locked(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST);
+ } else {
+ rtinit_locked(&(ia->ia_ifa), (int)RTM_DELETE, 0);
+ }
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_flags &= ~IFA_ROUTE;
+ IFA_UNLOCK(&ia->ia_ifa);
+ if (!locked) {
+ lck_mtx_unlock(rnh_lock);
+ }
+}
+
+/*
+ * Caller must hold in_ifaddr_rwlock as writer.
+ */
+static void
+in_iahash_remove(struct in_ifaddr *ia)
+{
+ LCK_RW_ASSERT(in_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+
+ if (!IA_IS_HASHED(ia)) {
+ panic("attempt to remove wrong ia %p from hash table\n", ia);
+ /* NOTREACHED */
+ }
+ TAILQ_REMOVE(INADDR_HASH(ia->ia_addr.sin_addr.s_addr), ia, ia_hash);
+ IA_HASH_INIT(ia);
+ if (IFA_REMREF_LOCKED(&ia->ia_ifa) == NULL) {
+ panic("%s: unexpected (missing) refcnt ifa=%p", __func__,
+ &ia->ia_ifa);
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Caller must hold in_ifaddr_rwlock as writer.
+ */
+static void
+in_iahash_insert(struct in_ifaddr *ia)
+{
+ LCK_RW_ASSERT(in_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+
+ if (ia->ia_addr.sin_family != AF_INET) {
+ panic("attempt to insert wrong ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ } else if (IA_IS_HASHED(ia)) {
+ panic("attempt to double-insert ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ }
+ TAILQ_INSERT_HEAD(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+}
+
+/*
+ * Some point to point interfaces that are tunnels borrow the address from
+ * an underlying interface (e.g. VPN server). In order for source address
+ * selection logic to find the underlying interface first, we add the address
+ * of borrowing point to point interfaces at the end of the list.
+ * (see rdar://6733789)
+ *
+ * Caller must hold in_ifaddr_rwlock as writer.
+ */
+static void
+in_iahash_insert_ptp(struct in_ifaddr *ia)
+{
+ struct in_ifaddr *tmp_ifa;
+ struct ifnet *tmp_ifp;
+
+ LCK_RW_ASSERT(in_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+
+ if (ia->ia_addr.sin_family != AF_INET) {
+ panic("attempt to insert wrong ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ } else if (IA_IS_HASHED(ia)) {
+ panic("attempt to double-insert ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ TAILQ_FOREACH(tmp_ifa, INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia_hash) {
+ IFA_LOCK(&tmp_ifa->ia_ifa);
+ /* ia->ia_addr won't change, so check without lock */
+ if (IA_SIN(tmp_ifa)->sin_addr.s_addr ==
+ ia->ia_addr.sin_addr.s_addr) {
+ IFA_UNLOCK(&tmp_ifa->ia_ifa);
+ break;
+ }
+ IFA_UNLOCK(&tmp_ifa->ia_ifa);
+ }
+ tmp_ifp = (tmp_ifa == NULL) ? NULL : tmp_ifa->ia_ifp;
+
+ IFA_LOCK(&ia->ia_ifa);
+ if (tmp_ifp == NULL) {
+ TAILQ_INSERT_HEAD(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ } else {
+ TAILQ_INSERT_TAIL(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ }
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+}
+
+/*
+ * Initialize an interface's internet address
+ * and routing table entry.
+ */
+static int
+in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin,
+ int scrub)
+{
+ u_int32_t i = ntohl(sin->sin_addr.s_addr);
+ struct sockaddr_in oldaddr;
+ int flags = RTF_UP, error;
+ struct ifaddr *ifa0;
+ unsigned int cmd;
+ int oldremoved = 0;
+
+ /* Take an extra reference for this routine */
+ IFA_ADDREF(&ia->ia_ifa);
+
+ lck_rw_lock_exclusive(in_ifaddr_rwlock);
+ IFA_LOCK(&ia->ia_ifa);
+ oldaddr = ia->ia_addr;
+ if (IA_IS_HASHED(ia)) {
+ oldremoved = 1;
+ in_iahash_remove(ia);
+ }
+ ia->ia_addr = *sin;
+ /*
+ * Interface addresses should not contain port or sin_zero information.
+ */
+ SIN(&ia->ia_addr)->sin_family = AF_INET;
+ SIN(&ia->ia_addr)->sin_len = sizeof(struct sockaddr_in);
+ SIN(&ia->ia_addr)->sin_port = 0;
+ bzero(&SIN(&ia->ia_addr)->sin_zero, sizeof(sin->sin_zero));
+ if ((ifp->if_flags & IFF_POINTOPOINT)) {
+ in_iahash_insert_ptp(ia);
+ } else {
+ in_iahash_insert(ia);
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
+
+ /*
+ * Give the interface a chance to initialize if this is its first
+ * address, and to validate the address if necessary. Send down
+ * SIOCSIFADDR for first address, and SIOCAIFADDR for alias(es).
+ * We find the first IPV4 address assigned to it and check if this
+ * is the same as the one passed into this routine.
+ */
+ ifa0 = ifa_ifpgetprimary(ifp, AF_INET);
+ cmd = (&ia->ia_ifa == ifa0) ? SIOCSIFADDR : SIOCAIFADDR;
+ error = ifnet_ioctl(ifp, PF_INET, cmd, ia);
+ if (error == EOPNOTSUPP) {
+ error = 0;
+ }
+ /*
+ * If we've just sent down SIOCAIFADDR, send another ioctl down
+ * for SIOCSIFADDR for the first IPV4 address of the interface,
+ * because an address change on one of the addresses will result
+ * in the removal of the previous first IPV4 address. KDP needs
+ * be reconfigured with the current primary IPV4 address.
+ */
+ if (error == 0 && cmd == SIOCAIFADDR) {
+ /*
+ * NOTE: SIOCSIFADDR is defined with struct ifreq
+ * as parameter, but here we are sending it down
+ * to the interface with a pointer to struct ifaddr,
+ * for legacy reasons.
+ */
+ error = ifnet_ioctl(ifp, PF_INET, SIOCSIFADDR, ifa0);
+ if (error == EOPNOTSUPP) {
+ error = 0;
+ }
+ }
+
+ /* Release reference from ifa_ifpgetprimary() */
+ IFA_REMREF(ifa0);
+
+ if (error) {
+ lck_rw_lock_exclusive(in_ifaddr_rwlock);
+ IFA_LOCK(&ia->ia_ifa);
+ if (IA_IS_HASHED(ia)) {
+ in_iahash_remove(ia);
+ }
+ ia->ia_addr = oldaddr;
+ if (oldremoved) {
+ if ((ifp->if_flags & IFF_POINTOPOINT)) {
+ in_iahash_insert_ptp(ia);
+ } else {
+ in_iahash_insert(ia);
+ }
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
+ /* Release extra reference taken above */
+ IFA_REMREF(&ia->ia_ifa);
+ return error;
+ }
+ lck_mtx_lock(rnh_lock);
+ IFA_LOCK(&ia->ia_ifa);
+ /*
+ * Address has been initialized by the link resolver (ARP)
+ * via ifnet_ioctl() above; it may now generate route(s).
+ */
+ ia->ia_ifa.ifa_debug &= ~IFD_NOTREADY;
+ if (scrub) {
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr;
+ IFA_UNLOCK(&ia->ia_ifa);
+ in_ifscrub(ifp, ia, 1);
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr;
+ }
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+ if (IN_CLASSA(i)) {
+ ia->ia_netmask = IN_CLASSA_NET;
+ } else if (IN_CLASSB(i)) {
+ ia->ia_netmask = IN_CLASSB_NET;
+ } else {
+ ia->ia_netmask = IN_CLASSC_NET;
+ }
+ /*
+ * The subnet mask usually includes at least the standard network part,
+ * but may may be smaller in the case of supernetting.
+ * If it is set, we believe it.
+ */
+ if (ia->ia_subnetmask == 0) {
+ ia->ia_subnetmask = ia->ia_netmask;
+ ia->ia_sockmask.sin_addr.s_addr = htonl(ia->ia_subnetmask);
+ } else {
+ ia->ia_netmask &= ia->ia_subnetmask;
+ }
+ ia->ia_net = i & ia->ia_netmask;
+ ia->ia_subnet = i & ia->ia_subnetmask;
+ in_socktrim(&ia->ia_sockmask);
+ /*
+ * Add route for the network.
+ */
+ ia->ia_ifa.ifa_metric = ifp->if_metric;
+ if (ifp->if_flags & IFF_BROADCAST) {
+ ia->ia_broadaddr.sin_addr.s_addr =
+ htonl(ia->ia_subnet | ~ia->ia_subnetmask);
+ ia->ia_netbroadcast.s_addr =
+ htonl(ia->ia_net | ~ia->ia_netmask);
+ } else if (ifp->if_flags & IFF_LOOPBACK) {
+ ia->ia_ifa.ifa_dstaddr = ia->ia_ifa.ifa_addr;
+ flags |= RTF_HOST;
+ } else if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (ia->ia_dstaddr.sin_family != AF_INET) {
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_mtx_unlock(rnh_lock);
+ /* Release extra reference taken above */
+ IFA_REMREF(&ia->ia_ifa);
+ return 0;
+ }
+ ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in);
+ flags |= RTF_HOST;
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+
+ if ((error = rtinit_locked(&(ia->ia_ifa), (int)RTM_ADD, flags)) == 0) {
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_flags |= IFA_ROUTE;
+ IFA_UNLOCK(&ia->ia_ifa);
+ }
+ lck_mtx_unlock(rnh_lock);
+
+ /* XXX check if the subnet route points to the same interface */
+ if (error == EEXIST) {
+ error = 0;
+ }
+
+ /*
+ * If the interface supports multicast, join the "all hosts"
+ * multicast group on that interface.
+ */
+ if (ifp->if_flags & IFF_MULTICAST) {
+ struct in_addr addr;
+
+ lck_mtx_lock(&ifp->if_addrconfig_lock);
+ addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+ if (ifp->if_allhostsinm == NULL) {
+ struct in_multi *inm;
+ inm = in_addmulti(&addr, ifp);
+
+ if (inm != NULL) {
+ /*
+ * Keep the reference on inm added by
+ * in_addmulti above for storing the
+ * pointer in allhostsinm.
+ */
+ ifp->if_allhostsinm = inm;
+ } else {
+ printf("%s: failed to add membership to "
+ "all-hosts multicast address on %s\n",
+ __func__, if_name(ifp));
+ }
+ }
+ lck_mtx_unlock(&ifp->if_addrconfig_lock);
+ }
+
+ /* Release extra reference taken above */
+ IFA_REMREF(&ia->ia_ifa);
+
+ if (error == 0) {
+ /* invalidate route caches */
+ routegenid_inet_update();
+ }
+
+ return error;
+}
+
+/*
+ * Return TRUE if the address might be a local broadcast address.
+ */
+boolean_t
+in_broadcast(struct in_addr in, struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ u_int32_t t;
+
+ if (in.s_addr == INADDR_BROADCAST || in.s_addr == INADDR_ANY) {
+ return TRUE;
+ }
+ if (!(ifp->if_flags & IFF_BROADCAST)) {
+ return FALSE;
+ }
+ t = ntohl(in.s_addr);
+
+ /*
+ * Look through the list of addresses for a match
+ * with a broadcast address.
+ */
+#define ia ((struct in_ifaddr *)ifa)
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ IFA_LOCK(ifa);
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ (in.s_addr == ia->ia_broadaddr.sin_addr.s_addr ||
+ in.s_addr == ia->ia_netbroadcast.s_addr ||
+ /*
+ * Check for old-style (host 0) broadcast.
+ */
+ t == ia->ia_subnet || t == ia->ia_net) &&
+ /*
+ * Check for an all one subnetmask. These
+ * only exist when an interface gets a secondary
+ * address.
+ */
+ ia->ia_subnetmask != (u_int32_t)0xffffffff) {
+ IFA_UNLOCK(ifa);
+ ifnet_lock_done(ifp);
+ return TRUE;
+ }
+ IFA_UNLOCK(ifa);
+ }
+ ifnet_lock_done(ifp);
+ return FALSE;
+#undef ia
+}
+
+void
+in_purgeaddrs(struct ifnet *ifp)
+{
+ struct ifaddr **ifap;
+ int err, i;
+
+ VERIFY(ifp != NULL);
+
+ /*
+ * Be nice, and try the civilized way first. If we can't get
+ * rid of them this way, then do it the rough way. We must
+ * only get here during detach time, after the ifnet has been
+ * removed from the global list and arrays.
+ */
+ err = ifnet_get_address_list_family_internal(ifp, &ifap, AF_INET, 1,
+ M_WAITOK, 0);
+ if (err == 0 && ifap != NULL) {
+ struct ifreq ifr;
+
+ bzero(&ifr, sizeof(ifr));
+ (void) snprintf(ifr.ifr_name, sizeof(ifr.ifr_name),
+ "%s", if_name(ifp));
+
+ for (i = 0; ifap[i] != NULL; i++) {
+ struct ifaddr *ifa;
+
+ ifa = ifap[i];
+ IFA_LOCK(ifa);
+ bcopy(ifa->ifa_addr, &ifr.ifr_addr,
+ sizeof(struct sockaddr_in));
+ IFA_UNLOCK(ifa);
+ err = in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
+ kernproc);
+ /* if we lost the race, ignore it */
+ if (err == EADDRNOTAVAIL) {
+ err = 0;
+ }
+ if (err != 0) {
+ char s_addr[MAX_IPv4_STR_LEN];
+ char s_dstaddr[MAX_IPv4_STR_LEN];
+ struct in_addr *s, *d;
+
+ IFA_LOCK(ifa);
+ s = &((struct sockaddr_in *)
+ (void *)ifa->ifa_addr)->sin_addr;
+ d = &((struct sockaddr_in *)
+ (void *)ifa->ifa_dstaddr)->sin_addr;
+ (void) inet_ntop(AF_INET, &s->s_addr, s_addr,
+ sizeof(s_addr));
+ (void) inet_ntop(AF_INET, &d->s_addr, s_dstaddr,
+ sizeof(s_dstaddr));
+ IFA_UNLOCK(ifa);
+
+ printf("%s: SIOCDIFADDR ifp=%s ifa_addr=%s "
+ "ifa_dstaddr=%s (err=%d)\n", __func__,
+ ifp->if_xname, s_addr, s_dstaddr, err);
+ }
+ }
+ ifnet_free_address_list(ifap);
+ } else if (err != 0 && err != ENXIO) {
+ printf("%s: error retrieving list of AF_INET addresses for "
+ "ifp=%s (err=%d)\n", __func__, ifp->if_xname, err);
+ }
+}
+
+/*
+ * Called as part of ip_init
+ */
+void
+in_ifaddr_init(void)
+{
+ in_multi_init();
+
+ PE_parse_boot_argn("ifa_debug", &inifa_debug, sizeof(inifa_debug));
+
+ inifa_size = (inifa_debug == 0) ? sizeof(struct in_ifaddr) :
+ sizeof(struct in_ifaddr_dbg);
+
+ inifa_zone = zone_create(INIFA_ZONE_NAME, inifa_size, ZC_NONE);
+
+ lck_mtx_init(&inifa_trash_lock, ifa_mtx_grp, ifa_mtx_attr);
+ TAILQ_INIT(&inifa_trash_head);
+}
+
+static struct in_ifaddr *
+in_ifaddr_alloc(int how)
+{
+ struct in_ifaddr *inifa;
+
+ inifa = (how == M_WAITOK) ? zalloc(inifa_zone) :
+ zalloc_noblock(inifa_zone);
+ if (inifa != NULL) {
+ bzero(inifa, inifa_size);
+ inifa->ia_ifa.ifa_free = in_ifaddr_free;
+ inifa->ia_ifa.ifa_debug |= IFD_ALLOC;
+ inifa->ia_ifa.ifa_del_wc = &inifa->ia_ifa.ifa_debug;
+ inifa->ia_ifa.ifa_del_waiters = 0;
+ ifa_lock_init(&inifa->ia_ifa);
+ if (inifa_debug != 0) {
+ struct in_ifaddr_dbg *inifa_dbg =
+ (struct in_ifaddr_dbg *)inifa;
+ inifa->ia_ifa.ifa_debug |= IFD_DEBUG;
+ inifa->ia_ifa.ifa_trace = in_ifaddr_trace;
+ inifa->ia_ifa.ifa_attached = in_ifaddr_attached;
+ inifa->ia_ifa.ifa_detached = in_ifaddr_detached;
+ ctrace_record(&inifa_dbg->inifa_alloc);
+ }
+ }
+ return inifa;
+}
+
+static void
+in_ifaddr_free(struct ifaddr *ifa)
+{
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (ifa->ifa_refcnt != 0) {
+ panic("%s: ifa %p bad ref cnt", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (!(ifa->ifa_debug & IFD_ALLOC)) {
+ panic("%s: ifa %p cannot be freed", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (ifa->ifa_debug & IFD_DEBUG) {
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+ ctrace_record(&inifa_dbg->inifa_free);
+ bcopy(&inifa_dbg->inifa, &inifa_dbg->inifa_old,
+ sizeof(struct in_ifaddr));
+ if (ifa->ifa_debug & IFD_TRASHED) {
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&inifa_trash_lock);
+ TAILQ_REMOVE(&inifa_trash_head, inifa_dbg,
+ inifa_trash_link);
+ lck_mtx_unlock(&inifa_trash_lock);
+ ifa->ifa_debug &= ~IFD_TRASHED;
+ }
+ }
+ IFA_UNLOCK(ifa);
+ ifa_lock_destroy(ifa);
+ bzero(ifa, sizeof(struct in_ifaddr));
+ zfree(inifa_zone, ifa);
+}
+
+static void
+in_ifaddr_attached(struct ifaddr *ifa)
+{
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (ifa->ifa_debug & IFD_TRASHED) {
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&inifa_trash_lock);
+ TAILQ_REMOVE(&inifa_trash_head, inifa_dbg, inifa_trash_link);
+ lck_mtx_unlock(&inifa_trash_lock);
+ ifa->ifa_debug &= ~IFD_TRASHED;
+ }
+}
+
+static void
+in_ifaddr_detached(struct ifaddr *ifa)
+{
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ } else if (ifa->ifa_debug & IFD_TRASHED) {
+ panic("%s: ifa %p is already in trash list", __func__, ifa);
+ /* NOTREACHED */
+ }
+ ifa->ifa_debug |= IFD_TRASHED;
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&inifa_trash_lock);
+ TAILQ_INSERT_TAIL(&inifa_trash_head, inifa_dbg, inifa_trash_link);
+ lck_mtx_unlock(&inifa_trash_lock);
+}
+
+static void
+in_ifaddr_trace(struct ifaddr *ifa, int refhold)
+{
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+ ctrace_t *tr;
+ u_int32_t idx;
+ u_int16_t *cnt;
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */