+
+ return (0);
+}
+
+/*
+ * Posts in6_event_data message kernel events.
+ *
+ * To get the same size of kev_in6_data between ILP32 and LP64 data models
+ * we are using a special version of the in6_addrlifetime structure that
+ * uses only 32 bits fields to be compatible with Leopard, and that
+ * are large enough to span 68 years.
+ */
+void
+in6_post_msg(struct ifnet *ifp, u_int32_t event_code, struct in6_ifaddr *ifa,
+ uint8_t *mac)
+{
+ struct kev_msg ev_msg;
+ struct kev_in6_data in6_event_data;
+ struct in6_addrlifetime ia6_lt;
+
+ bzero(&in6_event_data, sizeof (struct kev_in6_data));
+ bzero(&ev_msg, sizeof (struct kev_msg));
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_NETWORK_CLASS;
+ ev_msg.kev_subclass = KEV_INET6_SUBCLASS;
+ ev_msg.event_code = event_code;
+
+ IFA_LOCK(&ifa->ia_ifa);
+ in6_event_data.ia_addr = ifa->ia_addr;
+ in6_event_data.ia_net = ifa->ia_net;
+ in6_event_data.ia_dstaddr = ifa->ia_dstaddr;
+ in6_event_data.ia_prefixmask = ifa->ia_prefixmask;
+ in6_event_data.ia_plen = ifa->ia_plen;
+ in6_event_data.ia6_flags = (u_int32_t)ifa->ia6_flags;
+
+ /* retrieve time as calendar time (last arg is 1) */
+ in6ifa_getlifetime(ifa, &ia6_lt, 1);
+ in6_event_data.ia_lifetime.ia6t_expire = ia6_lt.ia6t_expire;
+ in6_event_data.ia_lifetime.ia6t_preferred = ia6_lt.ia6t_preferred;
+ in6_event_data.ia_lifetime.ia6t_vltime = ia6_lt.ia6t_vltime;
+ in6_event_data.ia_lifetime.ia6t_pltime = ia6_lt.ia6t_pltime;
+ IFA_UNLOCK(&ifa->ia_ifa);
+
+ if (ifp != NULL) {
+ (void) strlcpy(&in6_event_data.link_data.if_name[0],
+ ifp->if_name, IFNAMSIZ);
+ in6_event_data.link_data.if_family = ifp->if_family;
+ in6_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit;
+ }
+
+ if (mac != NULL)
+ memcpy(&in6_event_data.ia_mac, mac,
+ sizeof(in6_event_data.ia_mac));
+
+ ev_msg.dv[0].data_ptr = &in6_event_data;
+ ev_msg.dv[0].data_length = sizeof (in6_event_data);
+ ev_msg.dv[1].data_length = 0;
+
+ kev_post_msg(&ev_msg);
+}
+
+/*
+ * Called as part of ip6_init
+ */
+void
+in6_ifaddr_init(void)
+{
+ in6_cga_init();
+ in6_multi_init();
+
+ PE_parse_boot_argn("ifa_debug", &in6ifa_debug, sizeof (in6ifa_debug));
+
+ in6ifa_size = (in6ifa_debug == 0) ? sizeof (struct in6_ifaddr) :
+ sizeof (struct in6_ifaddr_dbg);
+
+ in6ifa_zone = zinit(in6ifa_size, IN6IFA_ZONE_MAX * in6ifa_size,
+ 0, IN6IFA_ZONE_NAME);
+ if (in6ifa_zone == NULL) {
+ panic("%s: failed allocating %s", __func__, IN6IFA_ZONE_NAME);
+ /* NOTREACHED */
+ }
+ zone_change(in6ifa_zone, Z_EXPAND, TRUE);
+ zone_change(in6ifa_zone, Z_CALLERACCT, FALSE);
+
+ lck_mtx_init(&in6ifa_trash_lock, ifa_mtx_grp, ifa_mtx_attr);
+ TAILQ_INIT(&in6ifa_trash_head);
+}
+
+static struct in6_ifaddr *
+in6_ifaddr_alloc(int how)
+{
+ struct in6_ifaddr *in6ifa;
+
+ in6ifa = (how == M_WAITOK) ? zalloc(in6ifa_zone) :
+ zalloc_noblock(in6ifa_zone);
+ if (in6ifa != NULL) {
+ bzero(in6ifa, in6ifa_size);
+ in6ifa->ia_ifa.ifa_free = in6_ifaddr_free;
+ in6ifa->ia_ifa.ifa_debug |= IFD_ALLOC;
+ ifa_lock_init(&in6ifa->ia_ifa);
+ if (in6ifa_debug != 0) {
+ struct in6_ifaddr_dbg *in6ifa_dbg =
+ (struct in6_ifaddr_dbg *)in6ifa;
+ in6ifa->ia_ifa.ifa_debug |= IFD_DEBUG;
+ in6ifa->ia_ifa.ifa_trace = in6_ifaddr_trace;
+ in6ifa->ia_ifa.ifa_attached = in6_ifaddr_attached;
+ in6ifa->ia_ifa.ifa_detached = in6_ifaddr_detached;
+ ctrace_record(&in6ifa_dbg->in6ifa_alloc);
+ }
+ }
+
+ return (in6ifa);
+}
+
+static void
+in6_ifaddr_free(struct ifaddr *ifa)
+{
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (ifa->ifa_refcnt != 0) {
+ panic("%s: ifa %p bad ref cnt", __func__, ifa);
+ /* NOTREACHED */
+ } else if (!(ifa->ifa_debug & IFD_ALLOC)) {
+ panic("%s: ifa %p cannot be freed", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (ifa->ifa_debug & IFD_DEBUG) {
+ struct in6_ifaddr_dbg *in6ifa_dbg =
+ (struct in6_ifaddr_dbg *)ifa;
+ ctrace_record(&in6ifa_dbg->in6ifa_free);
+ bcopy(&in6ifa_dbg->in6ifa, &in6ifa_dbg->in6ifa_old,
+ sizeof (struct in6_ifaddr));
+ if (ifa->ifa_debug & IFD_TRASHED) {
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&in6ifa_trash_lock);
+ TAILQ_REMOVE(&in6ifa_trash_head, in6ifa_dbg,
+ in6ifa_trash_link);
+ lck_mtx_unlock(&in6ifa_trash_lock);
+ ifa->ifa_debug &= ~IFD_TRASHED;
+ }
+ }
+ IFA_UNLOCK(ifa);
+ ifa_lock_destroy(ifa);
+ bzero(ifa, sizeof (struct in6_ifaddr));
+ zfree(in6ifa_zone, ifa);
+}
+
+static void
+in6_ifaddr_attached(struct ifaddr *ifa)
+{
+ struct in6_ifaddr_dbg *in6ifa_dbg = (struct in6_ifaddr_dbg *)ifa;
+
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (ifa->ifa_debug & IFD_TRASHED) {
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&in6ifa_trash_lock);
+ TAILQ_REMOVE(&in6ifa_trash_head, in6ifa_dbg, in6ifa_trash_link);
+ lck_mtx_unlock(&in6ifa_trash_lock);
+ ifa->ifa_debug &= ~IFD_TRASHED;
+ }
+}
+
+static void
+in6_ifaddr_detached(struct ifaddr *ifa)
+{
+ struct in6_ifaddr_dbg *in6ifa_dbg = (struct in6_ifaddr_dbg *)ifa;
+
+ IFA_LOCK_ASSERT_HELD(ifa);
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ } else if (ifa->ifa_debug & IFD_TRASHED) {
+ panic("%s: ifa %p is already in trash list", __func__, ifa);
+ /* NOTREACHED */
+ }
+ ifa->ifa_debug |= IFD_TRASHED;
+ /* Become a regular mutex, just in case */
+ IFA_CONVERT_LOCK(ifa);
+ lck_mtx_lock(&in6ifa_trash_lock);
+ TAILQ_INSERT_TAIL(&in6ifa_trash_head, in6ifa_dbg, in6ifa_trash_link);
+ lck_mtx_unlock(&in6ifa_trash_lock);
+}
+
+static void
+in6_ifaddr_trace(struct ifaddr *ifa, int refhold)
+{
+ struct in6_ifaddr_dbg *in6ifa_dbg = (struct in6_ifaddr_dbg *)ifa;
+ ctrace_t *tr;
+ u_int32_t idx;
+ u_int16_t *cnt;
+
+ if (!(ifa->ifa_debug & IFD_DEBUG)) {
+ panic("%s: ifa %p has no debug structure", __func__, ifa);
+ /* NOTREACHED */
+ }
+ if (refhold) {
+ cnt = &in6ifa_dbg->in6ifa_refhold_cnt;
+ tr = in6ifa_dbg->in6ifa_refhold;
+ } else {
+ cnt = &in6ifa_dbg->in6ifa_refrele_cnt;
+ tr = in6ifa_dbg->in6ifa_refrele;
+ }
+
+ idx = atomic_add_16_ov(cnt, 1) % IN6IFA_TRACE_HIST_SIZE;
+ ctrace_record(&tr[idx]);
+}
+
+static void
+in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
+{
+ struct ifnet* ifp = ia->ia_ifp;
+ uint32_t flags = IN6_IFF_TENTATIVE;
+ uint32_t optdad = nd6_optimistic_dad;
+
+ if (optdad) {
+ if ((ifp->if_eflags & IFEF_IPV6_ROUTER) != 0) {
+ optdad = 0;
+ } else {
+ struct nd_ifinfo *ndi = NULL;
+
+ ndi = ND_IFINFO(ifp);
+ VERIFY (ndi != NULL && ndi->initialized);
+ lck_mtx_lock(&ndi->lock);
+ if ((ndi->flags & ND6_IFF_REPLICATED) != 0) {
+ optdad = 0;
+ }
+ lck_mtx_unlock(&ndi->lock);
+ }
+ }
+
+ if (optdad) {
+ if ((optdad & ND6_OPTIMISTIC_DAD_LINKLOCAL) &&
+ IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
+ flags = IN6_IFF_OPTIMISTIC;
+ else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) &&
+ (ia->ia6_flags & IN6_IFF_AUTOCONF)) {
+ if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
+ if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
+ flags = IN6_IFF_OPTIMISTIC;
+ } else if (ia->ia6_flags & IN6_IFF_SECURED) {
+ if (optdad & ND6_OPTIMISTIC_DAD_SECURED)
+ flags = IN6_IFF_OPTIMISTIC;
+ } else {
+ /*
+ * Keeping the behavior for temp and CGA
+ * SLAAC addresses to have a knob for optimistic
+ * DAD.
+ * Other than that if ND6_OPTIMISTIC_DAD_AUTOCONF
+ * is set, we should default to optimistic
+ * DAD.
+ * For now this means SLAAC addresses with interface
+ * identifier derived from modified EUI-64 bit
+ * identifiers.
+ */
+ flags = IN6_IFF_OPTIMISTIC;
+ }
+ } else if ((optdad & ND6_OPTIMISTIC_DAD_DYNAMIC) &&
+ (ia->ia6_flags & IN6_IFF_DYNAMIC)) {
+ if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
+ if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
+ flags = IN6_IFF_OPTIMISTIC;
+ } else {
+ flags = IN6_IFF_OPTIMISTIC;
+ }
+ } else if ((optdad & ND6_OPTIMISTIC_DAD_MANUAL) &&
+ (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) {
+ /*
+ * rdar://17483438
+ * Bypass tentative for address assignments
+ * not covered above (e.g. manual) upon request
+ */
+ if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr) &&
+ !(ia->ia6_flags & IN6_IFF_AUTOCONF) &&
+ !(ia->ia6_flags & IN6_IFF_DYNAMIC))
+ flags = IN6_IFF_OPTIMISTIC;
+ }
+ }
+
+ ia->ia6_flags &= ~(IN6_IFF_DUPLICATED | IN6_IFF_DADPROGRESS);
+ ia->ia6_flags |= flags;
+
+ nd6log2((LOG_DEBUG, "%s - %s ifp %s ia6_flags 0x%x\n",
+ __func__,
+ ip6_sprintf(&ia->ia_addr.sin6_addr),
+ if_name(ia->ia_ifp),
+ ia->ia6_flags));
+}
+
+/*
+ * Handle SIOCGASSOCIDS ioctl for PF_INET6 domain.
+ */
+static int
+in6_getassocids(struct socket *so, uint32_t *cnt, user_addr_t aidp)
+{
+ struct in6pcb *in6p = sotoin6pcb(so);
+ sae_associd_t aid;
+
+ if (in6p == NULL || in6p->inp_state == INPCB_STATE_DEAD)
+ return (EINVAL);
+
+ /* IN6PCB has no concept of association */
+ aid = SAE_ASSOCID_ANY;
+ *cnt = 0;
+
+ /* just asking how many there are? */
+ if (aidp == USER_ADDR_NULL)
+ return (0);
+
+ return (copyout(&aid, aidp, sizeof (aid)));
+}
+
+/*
+ * Handle SIOCGCONNIDS ioctl for PF_INET6 domain.
+ */
+static int
+in6_getconnids(struct socket *so, sae_associd_t aid, uint32_t *cnt,
+ user_addr_t cidp)
+{
+ struct in6pcb *in6p = sotoin6pcb(so);
+ sae_connid_t cid;
+
+ if (in6p == NULL || in6p->inp_state == INPCB_STATE_DEAD)
+ return (EINVAL);
+
+ if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL)
+ return (EINVAL);
+
+ /* if connected, return 1 connection count */
+ *cnt = ((so->so_state & SS_ISCONNECTED) ? 1 : 0);
+
+ /* just asking how many there are? */
+ if (cidp == USER_ADDR_NULL)
+ return (0);
+
+ /* if IN6PCB is connected, assign it connid 1 */
+ cid = ((*cnt != 0) ? 1 : SAE_CONNID_ANY);
+
+ return (copyout(&cid, cidp, sizeof (cid)));
+}
+
+/*
+ * Handle SIOCGCONNINFO ioctl for PF_INET6 domain.
+ */
+static int
+in6_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags,
+ uint32_t *ifindex, int32_t *soerror, user_addr_t src, socklen_t *src_len,
+ user_addr_t dst, socklen_t *dst_len, uint32_t *aux_type,
+ user_addr_t aux_data, uint32_t *aux_len)
+{
+#pragma unused(aux_data)
+ struct in6pcb *in6p = sotoin6pcb(so);
+ struct sockaddr_in6 sin6;
+ struct ifnet *ifp = NULL;
+ int error = 0;
+ u_int32_t copy_len = 0;
+
+ /*
+ * Don't test for INPCB_STATE_DEAD since this may be called
+ * after SOF_PCBCLEARING is set, e.g. after tcp_close().
+ */
+ if (in6p == NULL) {
+ error = EINVAL;
+ goto out;
+ }
+
+ if (cid != SAE_CONNID_ANY && cid != SAE_CONNID_ALL && cid != 1) {
+ error = EINVAL;
+ goto out;
+ }
+
+ ifp = in6p->in6p_last_outifp;
+ *ifindex = ((ifp != NULL) ? ifp->if_index : 0);
+ *soerror = so->so_error;
+ *flags = 0;
+ if (so->so_state & SS_ISCONNECTED)
+ *flags |= (CIF_CONNECTED | CIF_PREFERRED);
+ if (in6p->in6p_flags & INP_BOUND_IF)
+ *flags |= CIF_BOUND_IF;
+ if (!(in6p->in6p_flags & INP_IN6ADDR_ANY))
+ *flags |= CIF_BOUND_IP;
+ if (!(in6p->in6p_flags & INP_ANONPORT))
+ *flags |= CIF_BOUND_PORT;
+
+ bzero(&sin6, sizeof (sin6));
+ sin6.sin6_len = sizeof (sin6);
+ sin6.sin6_family = AF_INET6;
+
+ /* source address and port */
+ sin6.sin6_port = in6p->in6p_lport;
+ in6_recoverscope(&sin6, &in6p->in6p_laddr, NULL);
+ if (*src_len == 0) {
+ *src_len = sin6.sin6_len;
+ } else {
+ if (src != USER_ADDR_NULL) {
+ copy_len = min(*src_len, sizeof (sin6));
+ error = copyout(&sin6, src, copy_len);
+ if (error != 0)
+ goto out;
+ *src_len = copy_len;
+ }
+ }
+
+ /* destination address and port */
+ sin6.sin6_port = in6p->in6p_fport;
+ in6_recoverscope(&sin6, &in6p->in6p_faddr, NULL);
+ if (*dst_len == 0) {
+ *dst_len = sin6.sin6_len;
+ } else {
+ if (dst != USER_ADDR_NULL) {
+ copy_len = min(*dst_len, sizeof (sin6));
+ error = copyout(&sin6, dst, copy_len);
+ if (error != 0)
+ goto out;
+ *dst_len = copy_len;
+ }
+ }
+
+ *aux_type = 0;
+ *aux_len = 0;
+ if (SOCK_PROTO(so) == IPPROTO_TCP) {
+ struct conninfo_tcp tcp_ci;
+
+ *aux_type = CIAUX_TCP;
+ if (*aux_len == 0) {
+ *aux_len = sizeof (tcp_ci);
+ } else {
+ if (aux_data != USER_ADDR_NULL) {
+ copy_len = min(*aux_len, sizeof (tcp_ci));
+ bzero(&tcp_ci, sizeof (tcp_ci));
+ tcp_getconninfo(so, &tcp_ci);
+ error = copyout(&tcp_ci, aux_data, copy_len);
+ if (error != 0)
+ goto out;
+ *aux_len = copy_len;
+ }
+ }
+ }
+
+out:
+ return (error);