+ LCK_RW_ASSERT(in_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+
+ if (!IA_IS_HASHED(ia)) {
+ panic("attempt to remove wrong ia %p from hash table\n", ia);
+ /* NOTREACHED */
+ }
+ TAILQ_REMOVE(INADDR_HASH(ia->ia_addr.sin_addr.s_addr), ia, ia_hash);
+ IA_HASH_INIT(ia);
+ if (IFA_REMREF_LOCKED(&ia->ia_ifa) == NULL) {
+ panic("%s: unexpected (missing) refcnt ifa=%p", __func__,
+ &ia->ia_ifa);
+ /* NOTREACHED */
+ }
+}
+
+/*
+ * Caller must hold in_ifaddr_rwlock as writer.
+ */
+static void
+in_iahash_insert(struct in_ifaddr *ia)
+{
+ LCK_RW_ASSERT(in_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+
+ if (ia->ia_addr.sin_family != AF_INET) {
+ panic("attempt to insert wrong ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ } else if (IA_IS_HASHED(ia)) {
+ panic("attempt to double-insert ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ }
+ TAILQ_INSERT_HEAD(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+}
+
+/*
+ * Some point to point interfaces that are tunnels borrow the address from
+ * an underlying interface (e.g. VPN server). In order for source address
+ * selection logic to find the underlying interface first, we add the address
+ * of borrowing point to point interfaces at the end of the list.
+ * (see rdar://6733789)
+ *
+ * Caller must hold in_ifaddr_rwlock as writer.
+ */
+static void
+in_iahash_insert_ptp(struct in_ifaddr *ia)
+{
+ struct in_ifaddr *tmp_ifa;
+ struct ifnet *tmp_ifp;
+
+ LCK_RW_ASSERT(in_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE);
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+
+ if (ia->ia_addr.sin_family != AF_INET) {
+ panic("attempt to insert wrong ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ } else if (IA_IS_HASHED(ia)) {
+ panic("attempt to double-insert ia %p into hash table\n", ia);
+ /* NOTREACHED */
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ TAILQ_FOREACH(tmp_ifa, INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia_hash) {
+ IFA_LOCK(&tmp_ifa->ia_ifa);
+ /* ia->ia_addr won't change, so check without lock */
+ if (IA_SIN(tmp_ifa)->sin_addr.s_addr ==
+ ia->ia_addr.sin_addr.s_addr) {
+ IFA_UNLOCK(&tmp_ifa->ia_ifa);
+ break;
+ }
+ IFA_UNLOCK(&tmp_ifa->ia_ifa);
+ }
+ tmp_ifp = (tmp_ifa == NULL) ? NULL : tmp_ifa->ia_ifp;
+
+ IFA_LOCK(&ia->ia_ifa);
+ if (tmp_ifp == NULL) {
+ TAILQ_INSERT_HEAD(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ } else {
+ TAILQ_INSERT_TAIL(INADDR_HASH(ia->ia_addr.sin_addr.s_addr),
+ ia, ia_hash);
+ }
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+}
+
+/*
+ * Initialize an interface's internet address
+ * and routing table entry.
+ */
+static int
+in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin,
+ int scrub)
+{
+ u_int32_t i = ntohl(sin->sin_addr.s_addr);
+ struct sockaddr_in oldaddr;
+ int flags = RTF_UP, error;
+ struct ifaddr *ifa0;
+ unsigned int cmd;
+ int oldremoved = 0;
+
+ /* Take an extra reference for this routine */
+ IFA_ADDREF(&ia->ia_ifa);
+
+ lck_rw_lock_exclusive(in_ifaddr_rwlock);
+ IFA_LOCK(&ia->ia_ifa);
+ oldaddr = ia->ia_addr;
+ if (IA_IS_HASHED(ia)) {
+ oldremoved = 1;
+ in_iahash_remove(ia);
+ }
+ ia->ia_addr = *sin;
+ /*
+ * Interface addresses should not contain port or sin_zero information.
+ */
+ SIN(&ia->ia_addr)->sin_family = AF_INET;
+ SIN(&ia->ia_addr)->sin_len = sizeof(struct sockaddr_in);
+ SIN(&ia->ia_addr)->sin_port = 0;
+ bzero(&SIN(&ia->ia_addr)->sin_zero, sizeof(sin->sin_zero));
+ if ((ifp->if_flags & IFF_POINTOPOINT)) {
+ in_iahash_insert_ptp(ia);
+ } else {
+ in_iahash_insert(ia);
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
+
+ /*
+ * Give the interface a chance to initialize if this is its first
+ * address, and to validate the address if necessary. Send down
+ * SIOCSIFADDR for first address, and SIOCAIFADDR for alias(es).
+ * We find the first IPV4 address assigned to it and check if this
+ * is the same as the one passed into this routine.
+ */
+ ifa0 = ifa_ifpgetprimary(ifp, AF_INET);
+ cmd = (&ia->ia_ifa == ifa0) ? SIOCSIFADDR : SIOCAIFADDR;
+ error = ifnet_ioctl(ifp, PF_INET, cmd, ia);
+ if (error == EOPNOTSUPP) {
+ error = 0;
+ }
+ /*
+ * If we've just sent down SIOCAIFADDR, send another ioctl down
+ * for SIOCSIFADDR for the first IPV4 address of the interface,
+ * because an address change on one of the addresses will result
+ * in the removal of the previous first IPV4 address. KDP needs
+ * be reconfigured with the current primary IPV4 address.
+ */
+ if (error == 0 && cmd == SIOCAIFADDR) {
+ /*
+ * NOTE: SIOCSIFADDR is defined with struct ifreq
+ * as parameter, but here we are sending it down
+ * to the interface with a pointer to struct ifaddr,
+ * for legacy reasons.
+ */
+ error = ifnet_ioctl(ifp, PF_INET, SIOCSIFADDR, ifa0);
+ if (error == EOPNOTSUPP) {
+ error = 0;
+ }
+ }
+
+ /* Release reference from ifa_ifpgetprimary() */
+ IFA_REMREF(ifa0);
+
+ if (error) {
+ lck_rw_lock_exclusive(in_ifaddr_rwlock);
+ IFA_LOCK(&ia->ia_ifa);
+ if (IA_IS_HASHED(ia)) {
+ in_iahash_remove(ia);
+ }
+ ia->ia_addr = oldaddr;
+ if (oldremoved) {
+ if ((ifp->if_flags & IFF_POINTOPOINT)) {
+ in_iahash_insert_ptp(ia);
+ } else {
+ in_iahash_insert(ia);
+ }
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
+ /* Release extra reference taken above */
+ IFA_REMREF(&ia->ia_ifa);
+ return error;
+ }
+ lck_mtx_lock(rnh_lock);
+ IFA_LOCK(&ia->ia_ifa);
+ /*
+ * Address has been initialized by the link resolver (ARP)
+ * via ifnet_ioctl() above; it may now generate route(s).
+ */
+ ia->ia_ifa.ifa_debug &= ~IFD_NOTREADY;
+ if (scrub) {
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr;
+ IFA_UNLOCK(&ia->ia_ifa);
+ in_ifscrub(ifp, ia, 1);
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr;
+ }
+ IFA_LOCK_ASSERT_HELD(&ia->ia_ifa);
+ if (IN_CLASSA(i)) {
+ ia->ia_netmask = IN_CLASSA_NET;
+ } else if (IN_CLASSB(i)) {
+ ia->ia_netmask = IN_CLASSB_NET;
+ } else {
+ ia->ia_netmask = IN_CLASSC_NET;
+ }
+ /*
+ * The subnet mask usually includes at least the standard network part,
+ * but may may be smaller in the case of supernetting.
+ * If it is set, we believe it.
+ */
+ if (ia->ia_subnetmask == 0) {
+ ia->ia_subnetmask = ia->ia_netmask;
+ ia->ia_sockmask.sin_addr.s_addr = htonl(ia->ia_subnetmask);
+ } else {
+ ia->ia_netmask &= ia->ia_subnetmask;
+ }
+ ia->ia_net = i & ia->ia_netmask;
+ ia->ia_subnet = i & ia->ia_subnetmask;
+ in_socktrim(&ia->ia_sockmask);
+ /*
+ * Add route for the network.
+ */
+ ia->ia_ifa.ifa_metric = ifp->if_metric;
+ if (ifp->if_flags & IFF_BROADCAST) {
+ ia->ia_broadaddr.sin_addr.s_addr =
+ htonl(ia->ia_subnet | ~ia->ia_subnetmask);
+ ia->ia_netbroadcast.s_addr =
+ htonl(ia->ia_net | ~ia->ia_netmask);
+ } else if (ifp->if_flags & IFF_LOOPBACK) {
+ ia->ia_ifa.ifa_dstaddr = ia->ia_ifa.ifa_addr;
+ flags |= RTF_HOST;
+ } else if (ifp->if_flags & IFF_POINTOPOINT) {
+ if (ia->ia_dstaddr.sin_family != AF_INET) {
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_mtx_unlock(rnh_lock);
+ /* Release extra reference taken above */
+ IFA_REMREF(&ia->ia_ifa);
+ return 0;
+ }
+ ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in);
+ flags |= RTF_HOST;
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
+
+ if ((error = rtinit_locked(&(ia->ia_ifa), (int)RTM_ADD, flags)) == 0) {
+ IFA_LOCK(&ia->ia_ifa);
+ ia->ia_flags |= IFA_ROUTE;
+ IFA_UNLOCK(&ia->ia_ifa);
+ }
+ lck_mtx_unlock(rnh_lock);
+
+ /* XXX check if the subnet route points to the same interface */
+ if (error == EEXIST) {
+ error = 0;
+ }
+
+ /*
+ * If the interface supports multicast, join the "all hosts"
+ * multicast group on that interface.
+ */
+ if (ifp->if_flags & IFF_MULTICAST) {
+ struct in_addr addr;
+
+ lck_mtx_lock(&ifp->if_addrconfig_lock);
+ addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP);
+ if (ifp->if_allhostsinm == NULL) {
+ struct in_multi *inm;
+ inm = in_addmulti(&addr, ifp);
+
+ if (inm != NULL) {
+ /*
+ * Keep the reference on inm added by
+ * in_addmulti above for storing the
+ * pointer in allhostsinm.
+ */
+ ifp->if_allhostsinm = inm;
+ } else {
+ printf("%s: failed to add membership to "
+ "all-hosts multicast address on %s\n",
+ __func__, if_name(ifp));
+ }
+ }
+ lck_mtx_unlock(&ifp->if_addrconfig_lock);
+ }
+
+ /* Release extra reference taken above */
+ IFA_REMREF(&ia->ia_ifa);
+
+ if (error == 0) {
+ /* invalidate route caches */
+ routegenid_inet_update();
+ }
+
+ return error;
+}
+
+/*
+ * Return TRUE if the address might be a local broadcast address.
+ */
+boolean_t
+in_broadcast(struct in_addr in, struct ifnet *ifp)
+{
+ struct ifaddr *ifa;
+ u_int32_t t;
+
+ if (in.s_addr == INADDR_BROADCAST || in.s_addr == INADDR_ANY) {
+ return TRUE;
+ }
+ if (!(ifp->if_flags & IFF_BROADCAST)) {
+ return FALSE;
+ }
+ t = ntohl(in.s_addr);
+
+ /*
+ * Look through the list of addresses for a match
+ * with a broadcast address.
+ */
+#define ia ((struct in_ifaddr *)ifa)
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ IFA_LOCK(ifa);
+ if (ifa->ifa_addr->sa_family == AF_INET &&
+ (in.s_addr == ia->ia_broadaddr.sin_addr.s_addr ||
+ in.s_addr == ia->ia_netbroadcast.s_addr ||
+ /*
+ * Check for old-style (host 0) broadcast.
+ */
+ t == ia->ia_subnet || t == ia->ia_net) &&
+ /*
+ * Check for an all one subnetmask. These
+ * only exist when an interface gets a secondary
+ * address.
+ */
+ ia->ia_subnetmask != (u_int32_t)0xffffffff) {
+ IFA_UNLOCK(ifa);
+ ifnet_lock_done(ifp);
+ return TRUE;
+ }
+ IFA_UNLOCK(ifa);
+ }
+ ifnet_lock_done(ifp);
+ return FALSE;
+#undef ia
+}
+
+void
+in_purgeaddrs(struct ifnet *ifp)
+{
+ struct ifaddr **ifap;
+ int err, i;
+
+ VERIFY(ifp != NULL);
+
+ /*
+ * Be nice, and try the civilized way first. If we can't get
+ * rid of them this way, then do it the rough way. We must
+ * only get here during detach time, after the ifnet has been
+ * removed from the global list and arrays.
+ */
+ err = ifnet_get_address_list_family_internal(ifp, &ifap, AF_INET, 1,
+ M_WAITOK, 0);
+ if (err == 0 && ifap != NULL) {
+ struct ifreq ifr;
+
+ bzero(&ifr, sizeof(ifr));
+ (void) snprintf(ifr.ifr_name, sizeof(ifr.ifr_name),
+ "%s", if_name(ifp));
+
+ for (i = 0; ifap[i] != NULL; i++) {
+ struct ifaddr *ifa;
+
+ ifa = ifap[i];
+ IFA_LOCK(ifa);
+ bcopy(ifa->ifa_addr, &ifr.ifr_addr,
+ sizeof(struct sockaddr_in));
+ IFA_UNLOCK(ifa);
+ err = in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
+ kernproc);
+ /* if we lost the race, ignore it */
+ if (err == EADDRNOTAVAIL) {
+ err = 0;
+ }
+ if (err != 0) {
+ char s_addr[MAX_IPv4_STR_LEN];
+ char s_dstaddr[MAX_IPv4_STR_LEN];
+ struct in_addr *s, *d;
+
+ IFA_LOCK(ifa);
+ s = &((struct sockaddr_in *)
+ (void *)ifa->ifa_addr)->sin_addr;
+ d = &((struct sockaddr_in *)
+ (void *)ifa->ifa_dstaddr)->sin_addr;
+ (void) inet_ntop(AF_INET, &s->s_addr, s_addr,
+ sizeof(s_addr));
+ (void) inet_ntop(AF_INET, &d->s_addr, s_dstaddr,
+ sizeof(s_dstaddr));
+ IFA_UNLOCK(ifa);
+
+ printf("%s: SIOCDIFADDR ifp=%s ifa_addr=%s "
+ "ifa_dstaddr=%s (err=%d)\n", __func__,
+ ifp->if_xname, s_addr, s_dstaddr, err);
+ }
+ }
+ ifnet_free_address_list(ifap);
+ } else if (err != 0 && err != ENXIO) {
+ printf("%s: error retrieving list of AF_INET addresses for "
+ "ifp=%s (err=%d)\n", __func__, ifp->if_xname, err);
+ }
+}
+
+/*
+ * Called as part of ip_init
+ */
+void
+in_ifaddr_init(void)
+{
+ in_multi_init();
+
+ PE_parse_boot_argn("ifa_debug", &inifa_debug, sizeof(inifa_debug));
+
+ inifa_size = (inifa_debug == 0) ? sizeof(struct in_ifaddr) :
+ sizeof(struct in_ifaddr_dbg);
+
+ inifa_zone = zinit(inifa_size, INIFA_ZONE_MAX * inifa_size,
+ 0, INIFA_ZONE_NAME);
+ if (inifa_zone == NULL) {
+ panic("%s: failed allocating %s", __func__, INIFA_ZONE_NAME);
+ /* NOTREACHED */
+ }
+ zone_change(inifa_zone, Z_EXPAND, TRUE);
+ zone_change(inifa_zone, Z_CALLERACCT, FALSE);
+
+ lck_mtx_init(&inifa_trash_lock, ifa_mtx_grp, ifa_mtx_attr);
+ TAILQ_INIT(&inifa_trash_head);
+}
+
+static struct in_ifaddr *
+in_ifaddr_alloc(int how)
+{
+ struct in_ifaddr *inifa;
+
+ inifa = (how == M_WAITOK) ? zalloc(inifa_zone) :
+ zalloc_noblock(inifa_zone);
+ if (inifa != NULL) {
+ bzero(inifa, inifa_size);
+ inifa->ia_ifa.ifa_free = in_ifaddr_free;
+ inifa->ia_ifa.ifa_debug |= IFD_ALLOC;
+ inifa->ia_ifa.ifa_del_wc = &inifa->ia_ifa.ifa_debug;
+ inifa->ia_ifa.ifa_del_waiters = 0;
+ ifa_lock_init(&inifa->ia_ifa);
+ if (inifa_debug != 0) {
+ struct in_ifaddr_dbg *inifa_dbg =
+ (struct in_ifaddr_dbg *)inifa;
+ inifa->ia_ifa.ifa_debug |= IFD_DEBUG;
+ inifa->ia_ifa.ifa_trace = in_ifaddr_trace;
+ inifa->ia_ifa.ifa_attached = in_ifaddr_attached;
+ inifa->ia_ifa.ifa_detached = in_ifaddr_detached;
+ ctrace_record(&inifa_dbg->inifa_alloc);
+ }
+ }
+ return inifa;
+}
+
+static void
+in_ifaddr_free(struct ifaddr *ifa)
+{
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (ifa->ifa_refcnt != 0) {
+ panic("%s: ifa %p bad ref cnt", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (!(ifa->ifa_debug & IFD_ALLOC)) {
+ panic("%s: ifa %p cannot be freed", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (ifa->ifa_debug & IFD_DEBUG) {
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+ ctrace_record(&inifa_dbg->inifa_free);
+ bcopy(&inifa_dbg->inifa, &inifa_dbg->inifa_old,
+ sizeof(struct in_ifaddr));
+ if (ifa->ifa_debug & IFD_TRASHED) {
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&inifa_trash_lock);
+ TAILQ_REMOVE(&inifa_trash_head, inifa_dbg,
+ inifa_trash_link);
+ lck_mtx_unlock(&inifa_trash_lock);
+ ifa->ifa_debug &= ~IFD_TRASHED;
+ }
+ }
+ IFA_UNLOCK(ifa);
+ ifa_lock_destroy(ifa);
+ bzero(ifa, sizeof(struct in_ifaddr));
+ zfree(inifa_zone, ifa);
+}
+
+static void
+in_ifaddr_attached(struct ifaddr *ifa)
+{
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (ifa->ifa_debug & IFD_TRASHED) {
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&inifa_trash_lock);
+ TAILQ_REMOVE(&inifa_trash_head, inifa_dbg, inifa_trash_link);
+ lck_mtx_unlock(&inifa_trash_lock);
+ ifa->ifa_debug &= ~IFD_TRASHED;
+ }
+}
+
+static void
+in_ifaddr_detached(struct ifaddr *ifa)
+{
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ } else if (ifa->ifa_debug & IFD_TRASHED) {
+ panic("%s: ifa %p is already in trash list", __func__, ifa);
+ /* NOTREACHED */
+ }
+ ifa->ifa_debug |= IFD_TRASHED;
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&inifa_trash_lock);
+ TAILQ_INSERT_TAIL(&inifa_trash_head, inifa_dbg, inifa_trash_link);
+ lck_mtx_unlock(&inifa_trash_lock);
+}
+
+static void
+in_ifaddr_trace(struct ifaddr *ifa, int refhold)
+{
+ struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa;
+ ctrace_t *tr;
+ u_int32_t idx;
+ u_int16_t *cnt;
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (refhold) {
+ cnt = &inifa_dbg->inifa_refhold_cnt;
+ tr = inifa_dbg->inifa_refhold;
+ } else {
+ cnt = &inifa_dbg->inifa_refrele_cnt;
+ tr = inifa_dbg->inifa_refrele;
+ }
+
+ idx = atomic_add_16_ov(cnt, 1) % INIFA_TRACE_HIST_SIZE;
+ ctrace_record(&tr[idx]);
+}
+
+/*
+ * Handle SIOCGASSOCIDS ioctl for PF_INET domain.
+ */
+static int
+in_getassocids(struct socket *so, uint32_t *cnt, user_addr_t aidp)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ sae_associd_t aid;
+
+ if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) {
+ return EINVAL;
+ }
+
+ /* INPCB has no concept of association */
+ aid = SAE_ASSOCID_ANY;
+ *cnt = 0;
+
+ /* just asking how many there are? */
+ if (aidp == USER_ADDR_NULL) {
+ return 0;
+ }
+
+ return copyout(&aid, aidp, sizeof(aid));
+}
+
+/*
+ * Handle SIOCGCONNIDS ioctl for PF_INET domain.
+ */
+static int
+in_getconnids(struct socket *so, sae_associd_t aid, uint32_t *cnt,
+ user_addr_t cidp)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ sae_connid_t cid;
+
+ if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) {
+ return EINVAL;
+ }
+
+ if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) {
+ return EINVAL;
+ }
+
+ /* if connected, return 1 connection count */
+ *cnt = ((so->so_state & SS_ISCONNECTED) ? 1 : 0);
+
+ /* just asking how many there are? */
+ if (cidp == USER_ADDR_NULL) {
+ return 0;
+ }
+
+ /* if INPCB is connected, assign it connid 1 */
+ cid = ((*cnt != 0) ? 1 : SAE_CONNID_ANY);
+
+ return copyout(&cid, cidp, sizeof(cid));
+}
+
+/*
+ * Handle SIOCGCONNINFO ioctl for PF_INET domain.
+ */
+int
+in_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags,
+ uint32_t *ifindex, int32_t *soerror, user_addr_t src, socklen_t *src_len,
+ user_addr_t dst, socklen_t *dst_len, uint32_t *aux_type,
+ user_addr_t aux_data, uint32_t *aux_len)
+{
+ struct inpcb *inp = sotoinpcb(so);
+ struct sockaddr_in sin;
+ struct ifnet *ifp = NULL;
+ int error = 0;
+ u_int32_t copy_len = 0;
+
+ /*
+ * Don't test for INPCB_STATE_DEAD since this may be called
+ * after SOF_PCBCLEARING is set, e.g. after tcp_close().
+ */
+ if (inp == NULL) {
+ error = EINVAL;
+ goto out;
+ }
+
+ if (cid != SAE_CONNID_ANY && cid != SAE_CONNID_ALL && cid != 1) {
+ error = EINVAL;
+ goto out;
+ }
+
+ ifp = inp->inp_last_outifp;
+ *ifindex = ((ifp != NULL) ? ifp->if_index : 0);
+ *soerror = so->so_error;
+ *flags = 0;
+ if (so->so_state & SS_ISCONNECTED) {
+ *flags |= (CIF_CONNECTED | CIF_PREFERRED);
+ }
+ if (inp->inp_flags & INP_BOUND_IF) {
+ *flags |= CIF_BOUND_IF;
+ }
+ if (!(inp->inp_flags & INP_INADDR_ANY)) {
+ *flags |= CIF_BOUND_IP;
+ }
+ if (!(inp->inp_flags & INP_ANONPORT)) {
+ *flags |= CIF_BOUND_PORT;
+ }
+
+ bzero(&sin, sizeof(sin));
+ sin.sin_len = sizeof(sin);
+ sin.sin_family = AF_INET;
+
+ /* source address and port */
+ sin.sin_port = inp->inp_lport;
+ sin.sin_addr.s_addr = inp->inp_laddr.s_addr;
+ if (*src_len == 0) {
+ *src_len = sin.sin_len;
+ } else {
+ if (src != USER_ADDR_NULL) {
+ copy_len = min(*src_len, sizeof(sin));
+ error = copyout(&sin, src, copy_len);
+ if (error != 0) {
+ goto out;
+ }
+ *src_len = copy_len;
+ }
+ }
+
+ /* destination address and port */
+ sin.sin_port = inp->inp_fport;
+ sin.sin_addr.s_addr = inp->inp_faddr.s_addr;
+ if (*dst_len == 0) {
+ *dst_len = sin.sin_len;
+ } else {
+ if (dst != USER_ADDR_NULL) {
+ copy_len = min(*dst_len, sizeof(sin));
+ error = copyout(&sin, dst, copy_len);
+ if (error != 0) {
+ goto out;
+ }
+ *dst_len = copy_len;
+ }
+ }
+
+ if (SOCK_PROTO(so) == IPPROTO_TCP) {
+ struct conninfo_tcp tcp_ci;
+
+ *aux_type = CIAUX_TCP;
+ if (*aux_len == 0) {
+ *aux_len = sizeof(tcp_ci);
+ } else {
+ if (aux_data != USER_ADDR_NULL) {
+ copy_len = min(*aux_len, sizeof(tcp_ci));
+ bzero(&tcp_ci, sizeof(tcp_ci));
+ tcp_getconninfo(so, &tcp_ci);
+ error = copyout(&tcp_ci, aux_data, copy_len);
+ if (error != 0) {
+ goto out;
+ }
+ *aux_len = copy_len;
+ }
+ }
+ } else {
+ *aux_type = 0;
+ *aux_len = 0;
+ }
+
+out:
+ return error;
+}
+
+struct in_llentry {
+ struct llentry base;
+};
+
+#define IN_LLTBL_DEFAULT_HSIZE 32
+#define IN_LLTBL_HASH(k, h) \
+ ((((((((k) >> 8) ^ (k)) >> 8) ^ (k)) >> 8) ^ (k)) & ((h) - 1))
+
+/*
+ * Do actual deallocation of @lle.
+ */
+static void
+in_lltable_destroy_lle_unlocked(struct llentry *lle)
+{
+ LLE_LOCK_DESTROY(lle);
+ LLE_REQ_DESTROY(lle);
+ FREE(lle, M_LLTABLE);
+}
+
+/*
+ * Called by LLE_FREE_LOCKED when number of references
+ * drops to zero.
+ */
+static void
+in_lltable_destroy_lle(struct llentry *lle)
+{
+ LLE_WUNLOCK(lle);
+ in_lltable_destroy_lle_unlocked(lle);
+}
+
+static struct llentry *
+in_lltable_new(struct in_addr addr4, u_int flags)
+{
+#pragma unused(flags)
+ struct in_llentry *lle;
+
+ MALLOC(lle, struct in_llentry *, sizeof(struct in_llentry), M_LLTABLE, M_NOWAIT | M_ZERO);
+ if (lle == NULL) { /* NB: caller generates msg */
+ return NULL;
+ }
+
+ /*
+ * For IPv4 this will trigger "arpresolve" to generate
+ * an ARP request.
+ */
+ lle->base.la_expire = net_uptime(); /* mark expired */
+ lle->base.r_l3addr.addr4 = addr4;
+ lle->base.lle_refcnt = 1;
+ lle->base.lle_free = in_lltable_destroy_lle;
+
+ LLE_LOCK_INIT(&lle->base);
+ LLE_REQ_INIT(&lle->base);
+ //callout_init(&lle->base.lle_timer, 1);
+
+ return &lle->base;
+}
+
+#define IN_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \
+ ((((d).s_addr ^ (a).s_addr) & (m).s_addr)) == 0 )
+
+static int
+in_lltable_match_prefix(const struct sockaddr *saddr,
+ const struct sockaddr *smask, u_int flags, struct llentry *lle)
+{
+ struct in_addr addr, mask, lle_addr;
+
+ addr = ((const struct sockaddr_in *)(const void *)saddr)->sin_addr;
+ mask = ((const struct sockaddr_in *)(const void *)smask)->sin_addr;
+ lle_addr.s_addr = ntohl(lle->r_l3addr.addr4.s_addr);
+
+ if (IN_ARE_MASKED_ADDR_EQUAL(lle_addr, addr, mask) == 0) {
+ return 0;
+ }
+
+ if (lle->la_flags & LLE_IFADDR) {
+ /*
+ * Delete LLE_IFADDR records IFF address & flag matches.
+ * Note that addr is the interface address within prefix
+ * being matched.
+ * Note also we should handle 'ifdown' cases without removing
+ * ifaddr macs.
+ */
+ if (addr.s_addr == lle_addr.s_addr && (flags & LLE_STATIC) != 0) {
+ return 1;
+ }
+ return 0;
+ }
+
+ /* flags & LLE_STATIC means deleting both dynamic and static entries */
+ if ((flags & LLE_STATIC) || !(lle->la_flags & LLE_STATIC)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+in_lltable_free_entry(struct lltable *llt, struct llentry *lle)
+{
+ struct ifnet *ifp;
+ size_t pkts_dropped;
+
+ LLE_WLOCK_ASSERT(lle);
+ KASSERT(llt != NULL, ("lltable is NULL"));
+
+ /* Unlink entry from table if not already */
+ if ((lle->la_flags & LLE_LINKED) != 0) {
+ ifp = llt->llt_ifp;
+ IF_AFDATA_WLOCK_ASSERT(ifp, llt->llt_af);
+ lltable_unlink_entry(llt, lle);
+ }
+
+#if 0
+ /* cancel timer */
+ if (callout_stop(&lle->lle_timer) > 0) {
+ LLE_REMREF(lle);
+ }
+#endif
+ /* Drop hold queue */
+ pkts_dropped = llentry_free(lle);
+ arpstat.dropped += pkts_dropped;
+}
+
+
+static int
+in_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr)
+{
+#pragma unused(flags)
+ struct rtentry *rt;
+
+ KASSERT(l3addr->sa_family == AF_INET,
+ ("sin_family %d", l3addr->sa_family));
+
+ /* XXX rtalloc1 should take a const param */
+ rt = rtalloc1(__DECONST(struct sockaddr *, l3addr), 0, 0);
+ if (rt == NULL || (rt->rt_flags & RTF_GATEWAY) || rt->rt_ifp != ifp) {
+ log(LOG_INFO, "IPv4 address: \"%s\" is not on the network\n",
+ inet_ntoa(((const struct sockaddr_in *)(const void *)l3addr)->sin_addr));
+ if (rt != NULL) {
+ rtfree_locked(rt);
+ }
+ return EINVAL;
+ }
+ rtfree_locked(rt);
+ return 0;
+}
+
+static inline uint32_t
+in_lltable_hash_dst(const struct in_addr dst, uint32_t hsize)
+{
+ return IN_LLTBL_HASH(dst.s_addr, hsize);
+}
+
+static uint32_t
+in_lltable_hash(const struct llentry *lle, uint32_t hsize)
+{
+ return in_lltable_hash_dst(lle->r_l3addr.addr4, hsize);
+}
+
+
+static void
+in_lltable_fill_sa_entry(const struct llentry *lle, struct sockaddr *sa)
+{
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)(void *)sa;
+ bzero(sin, sizeof(*sin));
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof(*sin);
+ sin->sin_addr = lle->r_l3addr.addr4;
+}
+
+static inline struct llentry *
+in_lltable_find_dst(struct lltable *llt, struct in_addr dst)
+{
+ struct llentry *lle;
+ struct llentries *lleh;
+ u_int hashidx;
+
+ hashidx = in_lltable_hash_dst(dst, llt->llt_hsize);
+ lleh = &llt->lle_head[hashidx];
+ LIST_FOREACH(lle, lleh, lle_next) {
+ if (lle->la_flags & LLE_DELETED) {
+ continue;
+ }
+ if (lle->r_l3addr.addr4.s_addr == dst.s_addr) {
+ break;
+ }
+ }
+
+ return lle;
+}
+
+static void
+in_lltable_delete_entry(struct lltable *llt, struct llentry *lle)
+{
+#pragma unused(llt)
+ lle->la_flags |= LLE_DELETED;
+ //EVENTHANDLER_INVOKE(lle_event, lle, LLENTRY_DELETED);
+#ifdef DIAGNOSTIC
+ log(LOG_INFO, "ifaddr cache = %p is deleted\n", lle);
+#endif
+ llentry_free(lle);
+}
+
+static struct llentry *
+in_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr)
+{
+ const struct sockaddr_in *sin = (const struct sockaddr_in *) (const void *)l3addr;
+ struct ifnet *ifp = llt->llt_ifp;
+ struct llentry *lle;
+
+ KASSERT(l3addr->sa_family == AF_INET,
+ ("sin_family %d", l3addr->sa_family));
+
+ /*
+ * A route that covers the given address must have
+ * been installed 1st because we are doing a resolution,
+ * verify this.
+ */
+ if (!(flags & LLE_IFADDR) &&
+ in_lltable_rtcheck(ifp, flags, l3addr) != 0) {
+ return NULL;
+ }
+
+ lle = in_lltable_new(sin->sin_addr, flags);
+ if (lle == NULL) {
+ log(LOG_INFO, "lla_lookup: new lle malloc failed\n");
+ return NULL;
+ }
+ lle->la_flags = flags & ~LLE_CREATE;
+ if (flags & LLE_STATIC) {
+ lle->r_flags |= RLLE_VALID;
+ }
+ if ((flags & LLE_IFADDR) == LLE_IFADDR) {
+ lltable_set_entry_addr(ifp, lle, LLADDR(SDL(ifp->if_lladdr->ifa_addr)));
+ lle->la_flags |= LLE_STATIC;
+ lle->r_flags |= (RLLE_VALID | RLLE_IFADDR);
+ }
+ return lle;
+}
+
+/*
+ * Return NULL if not found or marked for deletion.
+ * If found return lle read locked.
+ */
+static struct llentry *
+in_lltable_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr)
+{
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)(const void *)l3addr;
+ struct llentry *lle;
+
+ IF_AFDATA_WLOCK_ASSERT(llt->llt_ifp, llt->llt_af);
+
+ KASSERT(l3addr->sa_family == AF_INET,
+ ("sin_family %d", l3addr->sa_family));
+ lle = in_lltable_find_dst(llt, sin->sin_addr);
+
+ if (lle == NULL) {
+ return NULL;
+ }
+
+ KASSERT((flags & (LLE_UNLOCKED | LLE_EXCLUSIVE)) !=
+ (LLE_UNLOCKED | LLE_EXCLUSIVE), ("wrong lle request flags: 0x%X",
+ flags));
+
+ if (flags & LLE_UNLOCKED) {
+ return lle;
+ }
+
+ if (flags & LLE_EXCLUSIVE) {
+ LLE_WLOCK(lle);
+ } else {
+ LLE_RLOCK(lle);
+ }
+
+ return lle;
+}
+
+static int
+in_lltable_dump_entry(struct lltable *llt, struct llentry *lle,
+ struct sysctl_req *wr)
+{
+ struct ifnet *ifp = llt->llt_ifp;
+ /* XXX stack use */
+ struct {
+ struct rt_msghdr rtm;
+ struct sockaddr_in sin;
+ struct sockaddr_dl sdl;
+ } arpc;
+ struct sockaddr_dl *sdl;
+ int error;
+
+ bzero(&arpc, sizeof(arpc));
+ /* skip deleted entries */
+ if ((lle->la_flags & LLE_DELETED) == LLE_DELETED) {
+ return 0;
+ }
+ /* Skip if jailed and not a valid IP of the prison. */
+ lltable_fill_sa_entry(lle, (struct sockaddr *)&arpc.sin);
+ /*
+ * produce a msg made of:
+ * struct rt_msghdr;
+ * struct sockaddr_in; (IPv4)
+ * struct sockaddr_dl;
+ */
+ arpc.rtm.rtm_msglen = sizeof(arpc);
+ arpc.rtm.rtm_version = RTM_VERSION;
+ arpc.rtm.rtm_type = RTM_GET;
+ arpc.rtm.rtm_flags = RTF_UP;
+ arpc.rtm.rtm_addrs = RTA_DST | RTA_GATEWAY;
+
+ /* publish */
+ if (lle->la_flags & LLE_PUB) {
+ arpc.rtm.rtm_flags |= RTF_ANNOUNCE;
+ }
+
+ sdl = &arpc.sdl;
+ sdl->sdl_family = AF_LINK;
+ sdl->sdl_len = sizeof(*sdl);
+ sdl->sdl_index = ifp->if_index;
+ sdl->sdl_type = ifp->if_type;
+ if ((lle->la_flags & LLE_VALID) == LLE_VALID) {
+ sdl->sdl_alen = ifp->if_addrlen;
+ bcopy(&lle->ll_addr, LLADDR(sdl), ifp->if_addrlen);
+ } else {
+ sdl->sdl_alen = 0;
+ bzero(LLADDR(sdl), ifp->if_addrlen);
+ }
+
+ arpc.rtm.rtm_rmx.rmx_expire =
+ lle->la_flags & LLE_STATIC ? 0 : lle->la_expire;
+ arpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA);
+ if (lle->la_flags & LLE_STATIC) {
+ arpc.rtm.rtm_flags |= RTF_STATIC;
+ }
+ if (lle->la_flags & LLE_IFADDR) {
+ arpc.rtm.rtm_flags |= RTF_PINNED;
+ }
+ arpc.rtm.rtm_flags |= RTF_PINNED;
+ arpc.rtm.rtm_index = ifp->if_index;
+ error = SYSCTL_OUT(wr, &arpc, sizeof(arpc));
+
+ return error;
+}
+
+static struct lltable *
+in_lltattach(struct ifnet *ifp)
+{
+ struct lltable *llt;
+
+ llt = lltable_allocate_htbl(IN_LLTBL_DEFAULT_HSIZE);
+ llt->llt_af = AF_INET;
+ llt->llt_ifp = ifp;
+
+ llt->llt_lookup = in_lltable_lookup;
+ llt->llt_alloc_entry = in_lltable_alloc;
+ llt->llt_delete_entry = in_lltable_delete_entry;
+ llt->llt_dump_entry = in_lltable_dump_entry;
+ llt->llt_hash = in_lltable_hash;
+ llt->llt_fill_sa_entry = in_lltable_fill_sa_entry;
+ llt->llt_free_entry = in_lltable_free_entry;
+ llt->llt_match_prefix = in_lltable_match_prefix;
+ lltable_link(llt);
+
+ return llt;
+}
+
+struct in_ifaddr*
+inifa_ifpwithflag(struct ifnet * ifp, uint32_t flag)
+{
+ struct ifaddr *ifa;
+
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_link)
+ {
+ IFA_LOCK_SPIN(ifa);
+ if (ifa->ifa_addr->sa_family != AF_INET) {
+ IFA_UNLOCK(ifa);
+ continue;
+ }
+ if ((((struct in_ifaddr *)ifa)->ia_flags & flag) == flag) {
+ IFA_ADDREF_LOCKED(ifa);
+ IFA_UNLOCK(ifa);
+ break;
+ }
+ IFA_UNLOCK(ifa);
+ }
+ ifnet_lock_done(ifp);
+
+ return (struct in_ifaddr *)ifa;
+}
+
+struct in_ifaddr *
+inifa_ifpclatv4(struct ifnet * ifp)
+{
+ struct ifaddr *ifa;
+
+ ifnet_lock_shared(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_link)
+ {
+ uint32_t addr = 0;
+ IFA_LOCK_SPIN(ifa);
+ if (ifa->ifa_addr->sa_family != AF_INET) {
+ IFA_UNLOCK(ifa);
+ continue;
+ }
+
+ addr = ntohl(SIN(ifa->ifa_addr)->sin_addr.s_addr);
+ if (!IN_LINKLOCAL(addr) &&
+ !IN_LOOPBACK(addr)) {
+ IFA_ADDREF_LOCKED(ifa);
+ IFA_UNLOCK(ifa);
+ break;
+ }
+ IFA_UNLOCK(ifa);
+ }
+ ifnet_lock_done(ifp);
+
+ return (struct in_ifaddr *)ifa;