/*
- * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*-
SLIST_HEAD(mld_in6m_relhead, in6_multi);
-static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
+static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
static struct mld_ifinfo *mli_alloc(int);
-static void mli_free(struct mld_ifinfo *);
-static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
-static void mld_dispatch_packet(struct mbuf *);
-static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *,
- struct mld_tparams *);
-static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *,
- struct mld_tparams *);
-static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
- struct mld_tparams *, const int);
+static void mli_free(struct mld_ifinfo *);
+static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
+static void mld_dispatch_packet(struct mbuf *);
+static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *,
+ struct mld_tparams *);
+static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *,
+ struct mld_tparams *);
+static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
+ struct mld_tparams *, const int);
#ifdef MLD_DEBUG
-static const char * mld_rec_type_to_str(const int);
+static const char * mld_rec_type_to_str(const int);
#endif
-static uint32_t mld_set_version(struct mld_ifinfo *, const int);
-static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
-static void mld_dispatch_queue(struct mld_ifinfo *, struct ifqueue *, int);
-static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
- /*const*/ struct mld_hdr *);
-static int mld_v1_input_report(struct ifnet *, struct mbuf *,
- const struct ip6_hdr *, /*const*/ struct mld_hdr *);
-static void mld_v1_process_group_timer(struct in6_multi *, const int);
-static void mld_v1_process_querier_timers(struct mld_ifinfo *);
-static int mld_v1_transmit_report(struct in6_multi *, const int);
-static uint32_t mld_v1_update_group(struct in6_multi *, const int);
-static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
-static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *);
+static uint32_t mld_set_version(struct mld_ifinfo *, const int);
+static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
+static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int);
+static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
+ /*const*/ struct mld_hdr *);
+static int mld_v1_input_report(struct ifnet *, struct mbuf *,
+ const struct ip6_hdr *, /*const*/ struct mld_hdr *);
+static void mld_v1_process_group_timer(struct in6_multi *, const int);
+static void mld_v1_process_querier_timers(struct mld_ifinfo *);
+static int mld_v1_transmit_report(struct in6_multi *, const int);
+static uint32_t mld_v1_update_group(struct in6_multi *, const int);
+static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
+static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *);
static struct mbuf *
- mld_v2_encap_report(struct ifnet *, struct mbuf *);
-static int mld_v2_enqueue_filter_change(struct ifqueue *,
- struct in6_multi *);
-static int mld_v2_enqueue_group_record(struct ifqueue *,
- struct in6_multi *, const int, const int, const int,
- const int);
-static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
- struct mbuf *, const int, const int);
-static int mld_v2_merge_state_changes(struct in6_multi *,
- struct ifqueue *);
-static void mld_v2_process_group_timers(struct mld_ifinfo *,
- struct ifqueue *, struct ifqueue *,
- struct in6_multi *, const int);
-static int mld_v2_process_group_query(struct in6_multi *,
- int, struct mbuf *, const int);
-static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
-static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
-static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS;
-
-static int mld_timeout_run; /* MLD timer is scheduled to run */
+mld_v2_encap_report(struct ifnet *, struct mbuf *);
+static int mld_v2_enqueue_filter_change(struct ifqueue *,
+ struct in6_multi *);
+static int mld_v2_enqueue_group_record(struct ifqueue *,
+ struct in6_multi *, const int, const int, const int,
+ const int);
+static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
+ struct mbuf *, const int, const int);
+static int mld_v2_merge_state_changes(struct in6_multi *,
+ struct ifqueue *);
+static void mld_v2_process_group_timers(struct mld_ifinfo *,
+ struct ifqueue *, struct ifqueue *,
+ struct in6_multi *, const int);
+static int mld_v2_process_group_query(struct in6_multi *,
+ int, struct mbuf *, const int);
+static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
+static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
+static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS;
+
+static int mld_timeout_run; /* MLD timer is scheduled to run */
static void mld_timeout(void *);
static void mld_sched_timeout(void);
static int state_change_timers_running6;
static int current_state_timers_running6;
+static unsigned int mld_mli_list_genid;
/*
* Subsystem lock macros.
*/
-#define MLD_LOCK() \
+#define MLD_LOCK() \
lck_mtx_lock(&mld_mtx)
-#define MLD_LOCK_ASSERT_HELD() \
- lck_mtx_assert(&mld_mtx, LCK_MTX_ASSERT_OWNED)
-#define MLD_LOCK_ASSERT_NOTHELD() \
- lck_mtx_assert(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
-#define MLD_UNLOCK() \
+#define MLD_LOCK_ASSERT_HELD() \
+ LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
+#define MLD_LOCK_ASSERT_NOTHELD() \
+ LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
+#define MLD_UNLOCK() \
lck_mtx_unlock(&mld_mtx)
-#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
- SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
+#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
+ SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
}
-#define MLD_REMOVE_DETACHED_IN6M(_head) { \
- struct in6_multi *_in6m, *_inm_tmp; \
- SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
- SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
- IN6M_REMREF(_in6m); \
- } \
- VERIFY(SLIST_EMPTY(_head)); \
+#define MLD_REMOVE_DETACHED_IN6M(_head) { \
+ struct in6_multi *_in6m, *_inm_tmp; \
+ SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
+ SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
+ IN6M_REMREF(_in6m); \
+ } \
+ VERIFY(SLIST_EMPTY(_head)); \
}
-#define MLI_ZONE_MAX 64 /* maximum elements in zone */
-#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
+#define MLI_ZONE_MAX 64 /* maximum elements in zone */
+#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
-static unsigned int mli_size; /* size of zone element */
-static struct zone *mli_zone; /* zone for mld_ifinfo */
+static unsigned int mli_size; /* size of zone element */
+static struct zone *mli_zone; /* zone for mld_ifinfo */
-SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
+SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
"IPv6 Multicast Listener Discovery");
"Rate limit for MLDv2 Group-and-Source queries in seconds");
SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
- sysctl_mld_ifinfo, "Per-interface MLDv2 state");
+ sysctl_mld_ifinfo, "Per-interface MLDv2 state");
-static int mld_v1enable = 1;
+static int mld_v1enable = 1;
SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
&mld_v1enable, 0, "Enable fallback to MLDv1");
-static int mld_v2enable = 1;
+static int mld_v2enable = 1;
SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&mld_v2enable, 0, sysctl_mld_v2enable, "I",
"Enable MLDv2 (debug purposes only)");
-static int mld_use_allow = 1;
+static int mld_use_allow = 1;
SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED,
&mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
#ifdef MLD_DEBUG
int mld_debug = 0;
SYSCTL_INT(_net_inet6_mld, OID_AUTO,
- debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
+ debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
#endif
/*
* Packed Router Alert option structure declaration.
*/
struct mld_raopt {
- struct ip6_hbh hbh;
- struct ip6_opt pad;
- struct ip6_opt_router ra;
+ struct ip6_hbh hbh;
+ struct ip6_opt pad;
+ struct ip6_opt_router ra;
} __packed;
/*
.hbh = { 0, 0 },
.pad = { .ip6o_type = IP6OPT_PADN, 0 },
.ra = {
- .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
- .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
- .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
- (IP6OPT_RTALERT_MLD & 0xFF) }
+ .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
+ .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
+ .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
+ (IP6OPT_RTALERT_MLD & 0xFF) }
}
};
static struct ip6_pktopts mld_po;
/* Store MLDv2 record count in the module private scratch space */
-#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
+#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
static __inline void
mld_save_context(struct mbuf *m, struct ifnet *ifp)
static __inline struct ifnet *
mld_restore_context(struct mbuf *m)
{
- return (m->m_pkthdr.rcvif);
+ return m->m_pkthdr.rcvif;
}
/*
i = mld_gsrdelay.tv_sec;
error = sysctl_handle_int(oidp, &i, 0, req);
- if (error || !req->newptr)
+ if (error || !req->newptr) {
goto out_locked;
+ }
if (i < -1 || i >= 60) {
error = EINVAL;
out_locked:
MLD_UNLOCK();
- return (error);
+ return error;
}
/*
* Expose struct mld_ifinfo to userland, keyed by ifindex.
sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp)
- int *name;
- int error;
- u_int namelen;
- struct ifnet *ifp;
- struct mld_ifinfo *mli;
- struct mld_ifinfo_u mli_u;
+ int *name;
+ int error;
+ u_int namelen;
+ struct ifnet *ifp;
+ struct mld_ifinfo *mli;
+ struct mld_ifinfo_u mli_u;
name = (int *)arg1;
namelen = arg2;
- if (req->newptr != USER_ADDR_NULL)
- return (EPERM);
+ if (req->newptr != USER_ADDR_NULL) {
+ return EPERM;
+ }
- if (namelen != 1)
- return (EINVAL);
+ if (namelen != 1) {
+ return EINVAL;
+ }
MLD_LOCK();
ifnet_head_lock_shared();
ifp = ifindex2ifnet[name[0]];
ifnet_head_done();
- if (ifp == NULL)
+ if (ifp == NULL) {
goto out_locked;
+ }
- bzero(&mli_u, sizeof (mli_u));
+ bzero(&mli_u, sizeof(mli_u));
LIST_FOREACH(mli, &mli_head, mli_link) {
MLI_LOCK(mli);
mli_u.mli_uri = mli->mli_uri;
MLI_UNLOCK(mli);
- error = SYSCTL_OUT(req, &mli_u, sizeof (mli_u));
+ error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u));
break;
}
out_locked:
MLD_UNLOCK();
- return (error);
+ return error;
}
static int
i = mld_v2enable;
error = sysctl_handle_int(oidp, &i, 0, req);
- if (error || !req->newptr)
+ if (error || !req->newptr) {
goto out_locked;
+ }
if (i < 0 || i > 1) {
error = EINVAL;
* the MLD version back to v2. Otherwise, we have to explicitly
* downgrade. Note that this functionality is to be used for debugging.
*/
- if (mld_v2enable == 1)
+ if (mld_v2enable == 1) {
goto out_locked;
+ }
LIST_FOREACH(mli, &mli_head, mli_link) {
MLI_LOCK(mli);
- if (mld_set_version(mli, MLD_VERSION_1) > 0)
+ if (mld_set_version(mli, MLD_VERSION_1) > 0) {
mtp.qpt = 1;
+ }
MLI_UNLOCK(mli);
}
mld_set_timeout(&mtp);
- return (error);
+ return error;
}
/*
* Dispatch an entire queue of pending packet chains.
*
* Must not be called with in6m_lock held.
+ * XXX This routine unlocks MLD global lock and also mli locks.
+ * Make sure that the calling routine takes reference on the mli
+ * before calling this routine.
+ * Also if we are traversing mli_head, remember to check for
+ * mli list generation count and restart the loop if generation count
+ * has changed.
*/
static void
-mld_dispatch_queue(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
+mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
{
struct mbuf *m;
- if (mli != NULL)
+ MLD_LOCK_ASSERT_HELD();
+
+ if (mli != NULL) {
MLI_LOCK_ASSERT_HELD(mli);
+ }
for (;;) {
IF_DEQUEUE(ifq, m);
- if (m == NULL)
+ if (m == NULL) {
break;
+ }
MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
(uint64_t)VM_KERNEL_ADDRPERM(ifq),
(uint64_t)VM_KERNEL_ADDRPERM(m)));
- if (mli != NULL)
+
+ if (mli != NULL) {
MLI_UNLOCK(mli);
+ }
+ MLD_UNLOCK();
+
mld_dispatch_packet(m);
- if (mli != NULL)
+
+ MLD_LOCK();
+ if (mli != NULL) {
MLI_LOCK(mli);
- if (--limit == 0)
+ }
+
+ if (--limit == 0) {
break;
+ }
}
- if (mli != NULL)
+ if (mli != NULL) {
MLI_LOCK_ASSERT_HELD(mli);
+ }
}
/*
static __inline__ int
mld_is_addr_reported(const struct in6_addr *addr)
{
-
VERIFY(IN6_IS_ADDR_MULTICAST(addr));
- if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
- return (0);
+ if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) {
+ return 0;
+ }
if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
struct in6_addr tmp = *addr;
in6_clearscope(&tmp);
- if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
- return (0);
+ if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) {
+ return 0;
+ }
}
- return (1);
+ return 1;
}
/*
(uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
mli = mli_alloc(how);
- if (mli == NULL)
- return (NULL);
+ if (mli == NULL) {
+ return NULL;
+ }
MLD_LOCK();
ifnet_lock_done(ifp);
LIST_INSERT_HEAD(&mli_head, mli, mli_link);
+ mld_mli_list_genid++;
MLD_UNLOCK();
MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n",
__func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
- return (mli);
+ return mli;
}
/*
ifnet_lock_done(ifp);
LIST_INSERT_HEAD(&mli_head, mli, mli_link);
+ mld_mli_list_genid++;
MLD_UNLOCK();
void
mld_domifdetach(struct ifnet *ifp)
{
- SLIST_HEAD(, in6_multi) in6m_dthead;
+ SLIST_HEAD(, in6_multi) in6m_dthead;
SLIST_INIT(&in6m_dthead);
LIST_REMOVE(mli, mli_link);
MLI_REMREF(mli); /* release mli_head reference */
+ mld_mli_list_genid++;
return;
}
MLI_UNLOCK(mli);
MLI_LOCK_ASSERT_NOTHELD(mli);
MLI_LOCK(mli);
if (!(ifp->if_flags & IFF_MULTICAST) &&
- (ifp->if_eflags & (IFEF_IPV6_ND6ALT|IFEF_LOCALNET_PRIVATE)))
+ (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) {
mli->mli_flags |= MLIF_SILENT;
- else
+ } else {
mli->mli_flags &= ~MLIF_SILENT;
+ }
MLI_UNLOCK(mli);
}
MLI_LOCK_ASSERT_HELD(mli);
mli->mli_ifp = ifp;
- if (mld_v2enable)
+ if (mld_v2enable) {
mli->mli_version = MLD_VERSION_2;
- else
+ } else {
mli->mli_version = MLD_VERSION_1;
+ }
mli->mli_flags = 0;
mli->mli_rv = MLD_RV_INIT;
mli->mli_qi = MLD_QI_INIT;
mli->mli_qri = MLD_QRI_INIT;
mli->mli_uri = MLD_URI_INIT;
- if (mld_use_allow)
+ if (mld_use_allow) {
mli->mli_flags |= MLIF_USEALLOW;
- if (!reattach)
+ }
+ if (!reattach) {
SLIST_INIT(&mli->mli_relinmhead);
+ }
/*
* Responses to general queries are subject to bounds.
lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr);
mli->mli_debug |= IFD_ALLOC;
}
- return (mli);
+ return mli;
}
static void
void
mli_addref(struct mld_ifinfo *mli, int locked)
{
- if (!locked)
+ if (!locked) {
MLI_LOCK_SPIN(mli);
- else
+ } else {
MLI_LOCK_ASSERT_HELD(mli);
+ }
if (++mli->mli_refcnt == 0) {
panic("%s: mli=%p wraparound refcnt", __func__, mli);
/* NOTREACHED */
}
- if (!locked)
+ if (!locked) {
MLI_UNLOCK(mli);
+ }
}
void
mli_remref(struct mld_ifinfo *mli)
{
- SLIST_HEAD(, in6_multi) in6m_dthead;
+ SLIST_HEAD(, in6_multi) in6m_dthead;
struct ifnet *ifp;
MLI_LOCK_SPIN(mli);
mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
/*const*/ struct mld_hdr *mld)
{
- struct mld_ifinfo *mli;
- struct in6_multi *inm;
- int err = 0, is_general_query;
- uint16_t timer;
- struct mld_tparams mtp = { 0, 0, 0, 0 };
+ struct mld_ifinfo *mli;
+ struct in6_multi *inm;
+ int err = 0, is_general_query;
+ uint16_t timer;
+ struct mld_tparams mtp = { 0, 0, 0, 0 };
MLD_LOCK_ASSERT_NOTHELD();
* MLDv1 General Query.
* If this was not sent to the all-nodes group, ignore it.
*/
- struct in6_addr dst;
+ struct in6_addr dst;
dst = ip6->ip6_dst;
in6_clearscope(&dst);
MLI_UNLOCK(mli);
timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE;
- if (timer == 0)
+ if (timer == 0) {
timer = 1;
+ }
if (is_general_query) {
struct in6_multistep step;
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp == ifp)
+ if (inm->in6m_ifp == ifp) {
mtp.cst += mld_v1_update_group(inm, timer);
+ }
IN6M_UNLOCK(inm);
IN6_NEXT_MULTI(step, inm);
}
done:
mld_set_timeout(&mtp);
- return (err);
+ return err;
}
/*
* We may be updating the group for the first time since we switched
* to MLDv2. If we are, then we must clear any recorded source lists,
* and transition to REPORTING state; the group timer is overloaded
- * for group and group-source query responses.
+ * for group and group-source query responses.
*
* Unlike MLDv2, the delay per group should be jittered
* to avoid bursts of MLDv1 reports.
"skipping.\n", __func__));
break;
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_SG_QUERY_PENDING_MEMBER:
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_IDLE_MEMBER:
break;
}
- return (inm->in6m_timer);
+ return inm->in6m_timer;
}
/*
mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
struct mbuf *m, const int off, const int icmp6len)
{
- struct mld_ifinfo *mli;
- struct mldv2_query *mld;
- struct in6_multi *inm;
- uint32_t maxdelay, nsrc, qqi;
- int err = 0, is_general_query;
- uint16_t timer;
- uint8_t qrv;
- struct mld_tparams mtp = { 0, 0, 0, 0 };
+ struct mld_ifinfo *mli;
+ struct mldv2_query *mld;
+ struct in6_multi *inm;
+ uint32_t maxdelay, nsrc, qqi;
+ int err = 0, is_general_query;
+ uint16_t timer;
+ uint8_t qrv;
+ struct mld_tparams mtp = { 0, 0, 0, 0 };
MLD_LOCK_ASSERT_NOTHELD();
mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
- maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
+ maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
if (maxdelay >= 32768) {
maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
- (MLD_MRC_EXP(maxdelay) + 3);
+ (MLD_MRC_EXP(maxdelay) + 3);
}
timer = maxdelay / MLD_TIMER_SCALE;
- if (timer == 0)
+ if (timer == 0) {
timer = 1;
+ }
qrv = MLD_QRV(mld->mld_misc);
if (qrv < 2) {
qqi = mld->mld_qqi;
if (qqi >= 128) {
qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
- (MLD_QQIC_EXP(mld->mld_qqi) + 3);
+ (MLD_QQIC_EXP(mld->mld_qqi) + 3);
}
nsrc = ntohs(mld->mld_numsrc);
in6_multihead_lock_shared();
IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
in6_multihead_lock_done();
- if (inm == NULL)
+ if (inm == NULL) {
goto done;
+ }
IN6M_LOCK(inm);
if (nsrc > 0) {
}
mld_set_timeout(&mtp);
- return (err);
+ return err;
}
/*
mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0,
const int off)
{
- struct mldv2_query *mld;
- int retval;
- uint16_t nsrc;
+ struct mldv2_query *mld;
+ int retval;
+ uint16_t nsrc;
IN6M_LOCK_ASSERT_HELD(inm);
case MLD_AWAKENING_MEMBER:
case MLD_IDLE_MEMBER:
case MLD_LEAVING_MEMBER:
- return (retval);
- break;
+ return retval;
case MLD_REPORTING_MEMBER:
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_SG_QUERY_PENDING_MEMBER:
}
inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
- return (retval);
+ return retval;
}
/*
if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
timer = min(inm->in6m_timer, timer);
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
- return (retval);
+ return retval;
}
/*
* report for those sources.
*/
if (inm->in6m_nsrc > 0) {
- struct mbuf *m;
- uint8_t *sp;
- int i, nrecorded;
- int soff;
+ struct mbuf *m;
+ uint8_t *sp;
+ int i, nrecorded;
+ int soff;
m = m0;
soff = off + sizeof(struct mldv2_query);
sp = mtod(m, uint8_t *) + soff;
retval = in6m_record_source(inm,
(const struct in6_addr *)(void *)sp);
- if (retval < 0)
+ if (retval < 0) {
break;
+ }
nrecorded += retval;
soff += sizeof(struct in6_addr);
if (soff >= m->m_len) {
soff = soff - m->m_len;
m = m->m_next;
- if (m == NULL)
+ if (m == NULL) {
break;
+ }
}
}
if (nrecorded > 0) {
- MLD_PRINTF(( "%s: schedule response to SG query\n",
+ MLD_PRINTF(("%s: schedule response to SG query\n",
__func__));
inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
}
}
- return (retval);
+ return retval;
}
/*
mld_v1_input_report(struct ifnet *ifp, struct mbuf *m,
const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld)
{
- struct in6_addr src, dst;
- struct in6_ifaddr *ia;
- struct in6_multi *inm;
+ struct in6_addr src, dst;
+ struct in6_ifaddr *ia;
+ struct in6_multi *inm;
if (!mld_v1enable) {
MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n",
__func__, ip6_sprintf(&mld->mld_addr),
(uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
- return (0);
+ return 0;
}
if ((ifp->if_flags & IFF_LOOPBACK) ||
- (m->m_pkthdr.pkt_flags & PKTF_LOOP))
- return (0);
+ (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
+ return 0;
+ }
/*
* MLDv1 reports must originate from a host's link-local address,
MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
__func__, ip6_sprintf(&ip6->ip6_src),
(uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
- return (EINVAL);
+ return EINVAL;
}
/*
MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n",
__func__, ip6_sprintf(&ip6->ip6_dst),
(uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
- return (EINVAL);
+ return EINVAL;
}
/*
* returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
* performed for the on-wire address.
*/
- ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
if (ia != NULL) {
IFA_LOCK(&ia->ia_ifa);
- if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))){
+ if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) {
IFA_UNLOCK(&ia->ia_ifa);
IFA_REMREF(&ia->ia_ifa);
- return (0);
+ return 0;
}
IFA_UNLOCK(&ia->ia_ifa);
IFA_REMREF(&ia->ia_ifa);
} else if (IN6_IS_ADDR_UNSPECIFIED(&src)) {
- return (0);
+ return 0;
}
MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n",
* Embed scope ID of receiving interface in MLD query for lookup
* whilst we don't hold other locks (due to KAME locking lameness).
*/
- if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
+ if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
in6_setscope(&mld->mld_addr, ifp, NULL);
+ }
/*
* MLDv1 report suppression.
/* XXX Clear embedded scope ID as userland won't expect it. */
in6_clearscope(&mld->mld_addr);
- return (0);
+ return 0;
}
/*
int
mld_input(struct mbuf *m, int off, int icmp6len)
{
- struct ifnet *ifp;
- struct ip6_hdr *ip6;
- struct mld_hdr *mld;
- int mldlen;
+ struct ifnet *ifp;
+ struct ip6_hdr *ip6;
+ struct mld_hdr *mld;
+ int mldlen;
MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
(uint64_t)VM_KERNEL_ADDRPERM(m), off));
} else {
mldlen = sizeof(struct mld_hdr);
}
+ // check if mldv2_query/mld_hdr fits in the first mbuf
+ IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE);
IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
if (mld == NULL) {
icmp6stat.icp6s_badlen++;
- return (IPPROTO_DONE);
+ return IPPROTO_DONE;
}
/*
case MLD_LISTENER_QUERY:
icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
if (icmp6len == sizeof(struct mld_hdr)) {
- if (mld_v1_input_query(ifp, ip6, mld) != 0)
- return (0);
+ if (mld_v1_input_query(ifp, ip6, mld) != 0) {
+ return 0;
+ }
} else if (icmp6len >= sizeof(struct mldv2_query)) {
if (mld_v2_input_query(ifp, ip6, m, off,
- icmp6len) != 0)
- return (0);
+ icmp6len) != 0) {
+ return 0;
+ }
}
break;
case MLD_LISTENER_REPORT:
icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
- if (mld_v1_input_report(ifp, m, ip6, mld) != 0)
- return (0);
+ if (mld_v1_input_report(ifp, m, ip6, mld) != 0) {
+ return 0;
+ }
break;
case MLDV2_LISTENER_REPORT:
icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
break;
}
- return (0);
+ return 0;
}
/*
if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) {
MLD_LOCK();
- if (mtp->qpt != 0)
+ if (mtp->qpt != 0) {
querier_present_timers_running6 = 1;
- if (mtp->it != 0)
+ }
+ if (mtp->it != 0) {
interface_timers_running6 = 1;
- if (mtp->cst != 0)
+ }
+ if (mtp->cst != 0) {
current_state_timers_running6 = 1;
- if (mtp->sct != 0)
+ }
+ if (mtp->sct != 0) {
state_change_timers_running6 = 1;
+ }
mld_sched_timeout();
MLD_UNLOCK();
}
mld_timeout(void *arg)
{
#pragma unused(arg)
- struct ifqueue scq; /* State-change packets */
- struct ifqueue qrq; /* Query response packets */
- struct ifnet *ifp;
- struct mld_ifinfo *mli;
- struct in6_multi *inm;
- int uri_sec = 0;
- SLIST_HEAD(, in6_multi) in6m_dthead;
+ struct ifqueue scq; /* State-change packets */
+ struct ifqueue qrq; /* Query response packets */
+ struct ifnet *ifp;
+ struct mld_ifinfo *mli;
+ struct in6_multi *inm;
+ int uri_sec = 0;
+ unsigned int genid = mld_mli_list_genid;
+
+ SLIST_HEAD(, in6_multi) in6m_dthead;
SLIST_INIT(&in6m_dthead);
LIST_FOREACH(mli, &mli_head, mli_link) {
MLI_LOCK(mli);
mld_v1_process_querier_timers(mli);
- if (mli->mli_v1_timer > 0)
+ if (mli->mli_v1_timer > 0) {
querier_present_timers_running6 = 1;
+ }
MLI_UNLOCK(mli);
}
}
if (interface_timers_running6) {
MLD_PRINTF(("%s: interface timers running\n", __func__));
interface_timers_running6 = 0;
- LIST_FOREACH(mli, &mli_head, mli_link) {
+ mli = LIST_FIRST(&mli_head);
+
+ while (mli != NULL) {
+ if (mli->mli_flags & MLIF_PROCESSED) {
+ mli = LIST_NEXT(mli, mli_link);
+ continue;
+ }
+
MLI_LOCK(mli);
if (mli->mli_version != MLD_VERSION_2) {
MLI_UNLOCK(mli);
+ mli = LIST_NEXT(mli, mli_link);
continue;
}
+ /*
+ * XXX The logic below ends up calling
+ * mld_dispatch_packet which can unlock mli
+ * and the global MLD lock.
+ * Therefore grab a reference on MLI and also
+ * check for generation count to see if we should
+ * iterate the list again.
+ */
+ MLI_ADDREF_LOCKED(mli);
+
if (mli->mli_v2_timer == 0) {
/* Do nothing. */
} else if (--mli->mli_v2_timer == 0) {
- if (mld_v2_dispatch_general_query(mli) > 0)
+ if (mld_v2_dispatch_general_query(mli) > 0) {
interface_timers_running6 = 1;
+ }
} else {
interface_timers_running6 = 1;
}
+ mli->mli_flags |= MLIF_PROCESSED;
MLI_UNLOCK(mli);
+ MLI_REMREF(mli);
+
+ if (genid != mld_mli_list_genid) {
+ MLD_PRINTF(("%s: MLD information list changed "
+ "in the middle of iteration! Restart iteration.\n",
+ __func__));
+ mli = LIST_FIRST(&mli_head);
+ genid = mld_mli_list_genid;
+ } else {
+ mli = LIST_NEXT(mli, mli_link);
+ }
}
+
+ LIST_FOREACH(mli, &mli_head, mli_link)
+ mli->mli_flags &= ~MLIF_PROCESSED;
}
+
+
if (!current_state_timers_running6 &&
- !state_change_timers_running6)
+ !state_change_timers_running6) {
goto out_locked;
+ }
current_state_timers_running6 = 0;
state_change_timers_running6 = 0;
* MLD host report and state-change timer processing.
* Note: Processing a v2 group timer may remove a node.
*/
- LIST_FOREACH(mli, &mli_head, mli_link) {
+ mli = LIST_FIRST(&mli_head);
+
+ while (mli != NULL) {
struct in6_multistep step;
+ if (mli->mli_flags & MLIF_PROCESSED) {
+ mli = LIST_NEXT(mli, mli_link);
+ continue;
+ }
+
MLI_LOCK(mli);
ifp = mli->mli_ifp;
uri_sec = MLD_RANDOM_DELAY(mli->mli_uri);
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp != ifp)
+ if (inm->in6m_ifp != ifp) {
goto next;
+ }
MLI_LOCK(mli);
switch (mli->mli_version) {
}
in6_multihead_lock_done();
+ /*
+ * XXX The logic below ends up calling
+ * mld_dispatch_packet which can unlock mli
+ * and the global MLD lock.
+ * Therefore grab a reference on MLI and also
+ * check for generation count to see if we should
+ * iterate the list again.
+ */
MLI_LOCK(mli);
+ MLI_ADDREF_LOCKED(mli);
if (mli->mli_version == MLD_VERSION_1) {
- mld_dispatch_queue(mli, &mli->mli_v1q, 0);
+ mld_dispatch_queue_locked(mli, &mli->mli_v1q, 0);
} else if (mli->mli_version == MLD_VERSION_2) {
MLI_UNLOCK(mli);
- mld_dispatch_queue(NULL, &qrq, 0);
- mld_dispatch_queue(NULL, &scq, 0);
+ mld_dispatch_queue_locked(NULL, &qrq, 0);
+ mld_dispatch_queue_locked(NULL, &scq, 0);
VERIFY(qrq.ifq_len == 0);
VERIFY(scq.ifq_len == 0);
MLI_LOCK(mli);
*/
mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
+ mli->mli_flags |= MLIF_PROCESSED;
MLI_UNLOCK(mli);
+ MLI_REMREF(mli);
IF_DRAIN(&qrq);
IF_DRAIN(&scq);
+
+ if (genid != mld_mli_list_genid) {
+ MLD_PRINTF(("%s: MLD information list changed "
+ "in the middle of iteration! Restart iteration.\n",
+ __func__));
+ mli = LIST_FIRST(&mli_head);
+ genid = mld_mli_list_genid;
+ } else {
+ mli = LIST_NEXT(mli, mli_link);
+ }
}
+ LIST_FOREACH(mli, &mli_head, mli_link)
+ mli->mli_flags &= ~MLIF_PROCESSED;
+
out_locked:
/* re-arm the timer if there's work to do */
mld_timeout_run = 0;
if (report_timer_expired) {
inm->in6m_state = MLD_IDLE_MEMBER;
(void) mld_v1_transmit_report(inm,
- MLD_LISTENER_REPORT);
+ MLD_LISTENER_REPORT);
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
}
/* We are in timer callback, so be quick about it. */
if (!state_change_retransmit_timer_expired &&
- !query_response_timer_expired)
+ !query_response_timer_expired) {
return;
+ }
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
inm->in6m_state = MLD_REPORTING_MEMBER;
in6m_clear_recorded(inm);
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_REPORTING_MEMBER:
case MLD_LEAVING_MEMBER:
if (state_change_retransmit_timer_expired) {
MLI_LOCK_ASSERT_HELD(mli);
- return (mli->mli_v1_timer);
+ return mli->mli_v1_timer;
}
/*
static void
mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
{
- struct ifnet *ifp;
- struct in6_multi *inm;
- struct in6_multistep step;
+ struct ifnet *ifp;
+ struct in6_multi *inm;
+ struct in6_multistep step;
MLI_LOCK_ASSERT_HELD(mli);
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp != ifp)
+ if (inm->in6m_ifp != ifp) {
goto next;
+ }
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
in6m_nrele);
MLI_UNLOCK(mli);
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_SG_QUERY_PENDING_MEMBER:
in6m_clear_recorded(inm);
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_REPORTING_MEMBER:
inm->in6m_state = MLD_REPORTING_MEMBER;
break;
static int
mld_v1_transmit_report(struct in6_multi *in6m, const int type)
{
- struct ifnet *ifp;
- struct in6_ifaddr *ia;
- struct ip6_hdr *ip6;
- struct mbuf *mh, *md;
- struct mld_hdr *mld;
- int error = 0;
+ struct ifnet *ifp;
+ struct in6_ifaddr *ia;
+ struct ip6_hdr *ip6;
+ struct mbuf *mh, *md;
+ struct mld_hdr *mld;
+ int error = 0;
IN6M_LOCK_ASSERT_HELD(in6m);
MLI_LOCK_ASSERT_HELD(in6m->in6m_mli);
ifp = in6m->in6m_ifp;
/* ia may be NULL if link-local address is tentative. */
- ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
MGETHDR(mh, M_DONTWAIT, MT_HEADER);
if (mh == NULL) {
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
- return (ENOMEM);
+ }
+ return ENOMEM;
}
MGET(md, M_DONTWAIT, MT_DATA);
if (md == NULL) {
m_free(mh);
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
- return (ENOMEM);
+ }
+ return ENOMEM;
}
mh->m_next = md;
ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
ip6->ip6_vfc |= IPV6_VERSION;
ip6->ip6_nxt = IPPROTO_ICMPV6;
- if (ia != NULL)
+ if (ia != NULL) {
IFA_LOCK(&ia->ia_ifa);
+ }
ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
if (ia != NULL) {
IFA_UNLOCK(&ia->ia_ifa);
* Instead we defer the work to the mld_timeout() thread, thus
* avoiding unlocking in_multihead_lock here.
*/
- if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
- MLD_PRINTF(("%s: v1 outbound queue full\n", __func__));
- error = ENOMEM;
- m_freem(mh);
- } else {
- IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
+ if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
+ MLD_PRINTF(("%s: v1 outbound queue full\n", __func__));
+ error = ENOMEM;
+ m_freem(mh);
+ } else {
+ IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
VERIFY(error == 0);
}
- return (error);
+ return error;
}
/*
int error = 0;
VERIFY(mtp != NULL);
- bzero(mtp, sizeof (*mtp));
+ bzero(mtp, sizeof(*mtp));
IN6M_LOCK_ASSERT_HELD(inm);
VERIFY(inm->in6m_mli != NULL);
error = mld_handle_state_change(inm, mli, mtp);
out:
- return (error);
+ return error;
}
/*
mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
struct mld_tparams *mtp, const int delay)
{
- struct ifnet *ifp;
- struct ifqueue *ifq;
- int error, retval, syncstates;
- int odelay;
+ struct ifnet *ifp;
+ struct ifqueue *ifq;
+ int error, retval, syncstates;
+ int odelay;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_NOTHELD(mli);
if ((ifp->if_flags & IFF_LOOPBACK) ||
(mli->mli_flags & MLIF_SILENT) ||
(IFNET_IS_CELLULAR(ifp) &&
- IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) ||
+ IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) ||
!mld_is_addr_reported(&inm->in6m_addr)) {
MLD_PRINTF(("%s: not kicking state machine for silent group\n",
__func__));
} else {
inm->in6m_state = MLD_IDLE_MEMBER;
error = mld_v1_transmit_report(inm,
- MLD_LISTENER_REPORT);
+ MLD_LISTENER_REPORT);
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(mli);
if (inm->in6m_sctimer > 1) {
inm->in6m_sctimer =
min(inm->in6m_sctimer, delay);
- } else
+ } else {
inm->in6m_sctimer = delay;
+ }
} else {
inm->in6m_sctimer = 1;
}
if_name(inm->in6m_ifp)));
}
- return (error);
+ return error;
}
/*
mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli,
struct mld_tparams *mtp)
{
- struct ifnet *ifp;
- int retval = 0;
+ struct ifnet *ifp;
+ int retval = 0;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_NOTHELD(mli);
MLI_UNLOCK(mli);
done:
- return (retval);
+ return retval;
}
/*
VERIFY(inm->in6m_nrelecnt != 0);
retval = mld_v2_enqueue_group_record(
- &inm->in6m_scq, inm, 1, 0, 0,
- (mli->mli_flags & MLIF_USEALLOW));
+ &inm->in6m_scq, inm, 1, 0, 0,
+ (mli->mli_flags & MLIF_USEALLOW));
mtp->cst = (inm->in6m_scq.ifq_len > 0);
KASSERT(retval != 0,
("%s: enqueue record = %d\n", __func__,
- retval));
+ retval));
inm->in6m_state = MLD_LEAVING_MEMBER;
inm->in6m_sctimer = 1;
const int is_state_change, const int is_group_query,
const int is_source_query, const int use_block_allow)
{
- struct mldv2_record mr;
- struct mldv2_record *pmr;
- struct ifnet *ifp;
- struct ip6_msource *ims, *nims;
- struct mbuf *m0, *m, *md;
- int error, is_filter_list_change;
- int minrec0len, m0srcs, msrcs, nbytes, off;
- int record_has_sources;
- int now;
- int type;
- uint8_t mode;
+ struct mldv2_record mr;
+ struct mldv2_record *pmr;
+ struct ifnet *ifp;
+ struct ip6_msource *ims, *nims;
+ struct mbuf *m0, *m, *md;
+ int error, is_filter_list_change;
+ int minrec0len, m0srcs, msrcs, nbytes, off;
+ int record_has_sources;
+ int now;
+ int type;
+ uint8_t mode;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
* the generation of source records.
*/
if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
- inm->in6m_nsrc == 0)
+ inm->in6m_nsrc == 0) {
record_has_sources = 0;
+ }
if (is_state_change) {
/*
}
} else {
type = MLD_CHANGE_TO_INCLUDE_MODE;
- if (mode == MCAST_UNDEFINED)
+ if (mode == MCAST_UNDEFINED) {
record_has_sources = 0;
+ }
}
}
} else {
/*
* Generate the filter list changes using a separate function.
*/
- if (is_filter_list_change)
- return (mld_v2_enqueue_filter_change(ifq, inm));
+ if (is_filter_list_change) {
+ return mld_v2_enqueue_filter_change(ifq, inm);
+ }
if (type == MLD_DO_NOTHING) {
MLD_PRINTF(("%s: nothing to do for %s/%s\n",
__func__, ip6_sprintf(&inm->in6m_addr),
if_name(inm->in6m_ifp)));
- return (0);
+ return 0;
}
/*
* ideally more.
*/
minrec0len = sizeof(struct mldv2_record);
- if (record_has_sources)
+ if (record_has_sources) {
minrec0len += sizeof(struct in6_addr);
+ }
MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__,
mld_rec_type_to_str(type),
ip6_sprintf(&inm->in6m_addr),
m0 != NULL &&
(m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
(m0->m_pkthdr.len + minrec0len) <
- (ifp->if_mtu - MLD_MTUSPACE)) {
+ (ifp->if_mtu - MLD_MTUSPACE)) {
m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
- sizeof(struct mldv2_record)) /
- sizeof(struct in6_addr);
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
m = m0;
MLD_PRINTF(("%s: use existing packet\n", __func__));
} else {
if (IF_QFULL(ifq)) {
MLD_PRINTF(("%s: outbound queue full\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m = NULL;
m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
- if (!is_state_change && !is_group_query)
+ if (!is_state_change && !is_group_query) {
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL)
+ }
+ if (m == NULL) {
m = m_gethdr(M_DONTWAIT, MT_DATA);
- if (m == NULL)
- return (-ENOMEM);
+ }
+ if (m == NULL) {
+ return -ENOMEM;
+ }
mld_save_context(m, ifp);
mr.mr_addr = inm->in6m_addr;
in6_clearscope(&mr.mr_addr);
if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
nbytes += sizeof(struct mldv2_record);
MLD_PRINTF(("%s: node is %d\n", __func__, now));
if ((now != mode) ||
(now == mode &&
- (!use_block_allow && mode == MCAST_UNDEFINED))) {
+ (!use_block_allow && mode == MCAST_UNDEFINED))) {
MLD_PRINTF(("%s: skip node\n", __func__));
continue;
}
MLD_PRINTF(("%s: append node\n", __func__));
if (!m_append(m, sizeof(struct in6_addr),
(void *)&ims->im6s_addr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
nbytes += sizeof(struct in6_addr);
++msrcs;
- if (msrcs == m0srcs)
+ if (msrcs == m0srcs) {
break;
+ }
}
MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__,
msrcs));
if (is_source_query && msrcs == 0) {
MLD_PRINTF(("%s: no recorded sources to report\n", __func__));
- if (m != m0)
+ if (m != m0) {
m_freem(m);
- return (0);
+ }
+ return 0;
}
/*
/*
* No further work needed if no source list in packet(s).
*/
- if (!record_has_sources)
- return (nbytes);
+ if (!record_has_sources) {
+ return nbytes;
+ }
/*
* Whilst sources remain to be announced, we need to allocate
while (nims != NULL) {
if (IF_QFULL(ifq)) {
MLD_PRINTF(("%s: outbound queue full\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL)
+ if (m == NULL) {
m = m_gethdr(M_DONTWAIT, MT_DATA);
- if (m == NULL)
- return (-ENOMEM);
+ }
+ if (m == NULL) {
+ return -ENOMEM;
+ }
mld_save_context(m, ifp);
md = m_getptr(m, 0, &off);
pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
MLD_PRINTF(("%s: allocated next packet\n", __func__));
if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m->m_pkthdr.vt_nrecs = 1;
nbytes += sizeof(struct mldv2_record);
now = im6s_get_mode(inm, ims, 1);
if ((now != mode) ||
(now == mode &&
- (!use_block_allow && mode == MCAST_UNDEFINED))) {
+ (!use_block_allow && mode == MCAST_UNDEFINED))) {
MLD_PRINTF(("%s: skip node\n", __func__));
continue;
}
MLD_PRINTF(("%s: append node\n", __func__));
if (!m_append(m, sizeof(struct in6_addr),
(void *)&ims->im6s_addr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
++msrcs;
- if (msrcs == m0srcs)
+ if (msrcs == m0srcs) {
break;
+ }
}
pmr->mr_numsrc = htons(msrcs);
nbytes += (msrcs * sizeof(struct in6_addr));
IF_ENQUEUE(ifq, m);
}
- return (nbytes);
+ return nbytes;
}
/*
* current filter modes on each ip_msource node.
*/
typedef enum {
- REC_NONE = 0x00, /* MCAST_UNDEFINED */
- REC_ALLOW = 0x01, /* MCAST_INCLUDE */
- REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
+ REC_NONE = 0x00, /* MCAST_UNDEFINED */
+ REC_ALLOW = 0x01, /* MCAST_INCLUDE */
+ REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
REC_FULL = REC_ALLOW | REC_BLOCK
} rectype_t;
{
static const int MINRECLEN =
sizeof(struct mldv2_record) + sizeof(struct in6_addr);
- struct ifnet *ifp;
- struct mldv2_record mr;
- struct mldv2_record *pmr;
- struct ip6_msource *ims, *nims;
- struct mbuf *m, *m0, *md;
- int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
- int nallow, nblock;
- uint8_t mode, now, then;
- rectype_t crt, drt, nrt;
+ struct ifnet *ifp;
+ struct mldv2_record mr;
+ struct mldv2_record *pmr;
+ struct ip6_msource *ims, *nims;
+ struct mbuf *m, *m0, *md;
+ int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
+ int nallow, nblock;
+ uint8_t mode, now, then;
+ rectype_t crt, drt, nrt;
IN6M_LOCK_ASSERT_HELD(inm);
if (inm->in6m_nsrc == 0 ||
- (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
- return (0);
-
- ifp = inm->in6m_ifp; /* interface */
- mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
- crt = REC_NONE; /* current group record type */
- drt = REC_NONE; /* mask of completed group record types */
- nrt = REC_NONE; /* record type for current node */
- m0srcs = 0; /* # source which will fit in current mbuf chain */
- npbytes = 0; /* # of bytes appended this packet */
- nbytes = 0; /* # of bytes appended to group's state-change queue */
- rsrcs = 0; /* # sources encoded in current record */
- schanged = 0; /* # nodes encoded in overall filter change */
- nallow = 0; /* # of source entries in ALLOW_NEW */
- nblock = 0; /* # of source entries in BLOCK_OLD */
- nims = NULL; /* next tree node pointer */
+ (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) {
+ return 0;
+ }
+
+ ifp = inm->in6m_ifp; /* interface */
+ mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
+ crt = REC_NONE; /* current group record type */
+ drt = REC_NONE; /* mask of completed group record types */
+ nrt = REC_NONE; /* record type for current node */
+ m0srcs = 0; /* # source which will fit in current mbuf chain */
+ npbytes = 0; /* # of bytes appended this packet */
+ nbytes = 0; /* # of bytes appended to group's state-change queue */
+ rsrcs = 0; /* # sources encoded in current record */
+ schanged = 0; /* # nodes encoded in overall filter change */
+ nallow = 0; /* # of source entries in ALLOW_NEW */
+ nblock = 0; /* # of source entries in BLOCK_OLD */
+ nims = NULL; /* next tree node pointer */
/*
* For each possible filter record mode.
m0 = ifq->ifq_tail;
if (m0 != NULL &&
(m0->m_pkthdr.vt_nrecs + 1 <=
- MLD_V2_REPORT_MAXRECS) &&
+ MLD_V2_REPORT_MAXRECS) &&
(m0->m_pkthdr.len + MINRECLEN) <
- (ifp->if_mtu - MLD_MTUSPACE)) {
+ (ifp->if_mtu - MLD_MTUSPACE)) {
m = m0;
m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
- sizeof(struct mldv2_record)) /
- sizeof(struct in6_addr);
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
MLD_PRINTF(("%s: use previous packet\n",
__func__));
} else {
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL)
+ if (m == NULL) {
m = m_gethdr(M_DONTWAIT, MT_DATA);
+ }
if (m == NULL) {
MLD_PRINTF(("%s: m_get*() failed\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m->m_pkthdr.vt_nrecs = 0;
mld_save_context(m, ifp);
mr.mr_addr = inm->in6m_addr;
in6_clearscope(&mr.mr_addr);
if (!m_append(m, sizeof(mr), (void *)&mr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
npbytes += sizeof(struct mldv2_record);
if (m != m0) {
continue;
}
nrt = (rectype_t)now;
- if (nrt == REC_NONE)
+ if (nrt == REC_NONE) {
nrt = (rectype_t)(~mode & REC_FULL);
+ }
if (schanged++ == 0) {
crt = nrt;
- } else if (crt != nrt)
+ } else if (crt != nrt) {
continue;
+ }
if (!m_append(m, sizeof(struct in6_addr),
(void *)&ims->im6s_addr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
nallow += !!(crt == REC_ALLOW);
nblock += !!(crt == REC_BLOCK);
- if (++rsrcs == m0srcs)
+ if (++rsrcs == m0srcs) {
break;
+ }
}
/*
* If we did not append any tree nodes on this
MLD_PRINTF(("%s: m_adj(m, -mr)\n",
__func__));
m_adj(m, -((int)sizeof(
- struct mldv2_record)));
+ struct mldv2_record)));
}
continue;
}
npbytes += (rsrcs * sizeof(struct in6_addr));
- if (crt == REC_ALLOW)
+ if (crt == REC_ALLOW) {
pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
- else if (crt == REC_BLOCK)
+ } else if (crt == REC_BLOCK) {
pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
+ }
pmr->mr_numsrc = htons(rsrcs);
/*
* Count the new group record, and enqueue this
* packet if it wasn't already queued.
*/
m->m_pkthdr.vt_nrecs++;
- if (m != m0)
+ if (m != m0) {
IF_ENQUEUE(ifq, m);
+ }
nbytes += npbytes;
} while (nims != NULL);
drt |= crt;
MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
nallow, nblock));
- return (nbytes);
+ return nbytes;
}
static int
mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
{
- struct ifqueue *gq;
- struct mbuf *m; /* pending state-change */
- struct mbuf *m0; /* copy of pending state-change */
- struct mbuf *mt; /* last state-change in packet */
- struct mbuf *n;
- int docopy, domerge;
- u_int recslen;
+ struct ifqueue *gq;
+ struct mbuf *m; /* pending state-change */
+ struct mbuf *m0; /* copy of pending state-change */
+ struct mbuf *mt; /* last state-change in packet */
+ struct mbuf *n;
+ int docopy, domerge;
+ u_int recslen;
IN6M_LOCK_ASSERT_HELD(inm);
* If there are further pending retransmissions, make a writable
* copy of each queued state-change message before merging.
*/
- if (inm->in6m_scrv > 0)
+ if (inm->in6m_scrv > 0) {
docopy = 1;
+ }
gq = &inm->in6m_scq;
#ifdef MLD_DEBUG
m->m_pkthdr.vt_nrecs <=
MLD_V2_REPORT_MAXRECS) &&
(mt->m_pkthdr.len + recslen <=
- (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
+ (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) {
domerge = 1;
+ }
}
if (!domerge && IF_QFULL(gq)) {
MLD_PRINTF(("%s: copying 0x%llx\n", __func__,
(uint64_t)VM_KERNEL_ADDRPERM(m)));
m0 = m_dup(m, M_NOWAIT);
- if (m0 == NULL)
- return (ENOMEM);
+ if (m0 == NULL) {
+ return ENOMEM;
+ }
m0->m_nextpkt = NULL;
m = m->m_nextpkt;
}
(uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
IF_ENQUEUE(ifscq, m0);
} else {
- struct mbuf *mtl; /* last mbuf of packet mt */
+ struct mbuf *mtl; /* last mbuf of packet mt */
MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
"0x%llx)\n", __func__,
}
}
- return (0);
+ return 0;
}
/*
static uint32_t
mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
{
- struct ifnet *ifp;
- struct in6_multi *inm;
- struct in6_multistep step;
- int retval;
+ struct ifnet *ifp;
+ struct in6_multi *inm;
+ struct in6_multistep step;
+ int retval;
MLI_LOCK_ASSERT_HELD(mli);
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp != ifp)
+ if (inm->in6m_ifp != ifp) {
goto next;
+ }
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
in6_multihead_lock_done();
MLI_LOCK(mli);
- mld_dispatch_queue(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
+ mld_dispatch_queue_locked(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
MLI_LOCK_ASSERT_HELD(mli);
/*
*/
if (mli->mli_gq.ifq_head != NULL) {
mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
- MLD_RESPONSE_BURST_INTERVAL);
+ MLD_RESPONSE_BURST_INTERVAL);
}
- return (mli->mli_v2_timer);
+ return mli->mli_v2_timer;
}
/*
static void
mld_dispatch_packet(struct mbuf *m)
{
- struct ip6_moptions *im6o;
- struct ifnet *ifp;
- struct ifnet *oifp = NULL;
- struct mbuf *m0;
- struct mbuf *md;
- struct ip6_hdr *ip6;
- struct mld_hdr *mld;
- int error;
- int off;
- int type;
+ struct ip6_moptions *im6o;
+ struct ifnet *ifp;
+ struct ifnet *oifp = NULL;
+ struct mbuf *m0;
+ struct mbuf *md;
+ struct ip6_hdr *ip6;
+ struct mld_hdr *mld;
+ int error;
+ int off;
+ int type;
MLD_PRINTF(("%s: transmit 0x%llx\n", __func__,
(uint64_t)VM_KERNEL_ADDRPERM(m)));
type = mld->mld_type;
if (ifp->if_eflags & IFEF_TXSTART) {
- /*
- * Use control service class if the outgoing
+ /*
+ * Use control service class if the outgoing
* interface supports transmit-start model.
*/
(void) m_set_service_class(m0, MBUF_SC_CTL);
if (error) {
MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__,
(uint64_t)VM_KERNEL_ADDRPERM(m0), error));
- if (oifp != NULL)
+ if (oifp != NULL) {
ifnet_release(oifp);
+ }
return;
}
static struct mbuf *
mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
{
- struct mbuf *mh;
- struct mldv2_report *mld;
- struct ip6_hdr *ip6;
- struct in6_ifaddr *ia;
- int mldreclen;
+ struct mbuf *mh;
+ struct mldv2_report *mld;
+ struct ip6_hdr *ip6;
+ struct in6_ifaddr *ia;
+ int mldreclen;
VERIFY(m->m_flags & M_PKTHDR);
/*
* RFC3590: OK to send as :: or tentative during DAD.
*/
- ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
- if (ia == NULL)
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
+ if (ia == NULL) {
MLD_PRINTF(("%s: warning: ia is NULL\n", __func__));
+ }
MGETHDR(mh, M_DONTWAIT, MT_HEADER);
if (mh == NULL) {
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
+ }
m_freem(m);
- return (NULL);
+ return NULL;
}
MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
ip6->ip6_vfc |= IPV6_VERSION;
ip6->ip6_nxt = IPPROTO_ICMPV6;
- if (ia != NULL)
+ if (ia != NULL) {
IFA_LOCK(&ia->ia_ifa);
+ }
ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
if (ia != NULL) {
IFA_UNLOCK(&ia->ia_ifa);
mh->m_next = m;
mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
- return (mh);
+ return mh;
}
#ifdef MLD_DEBUG
mld_rec_type_to_str(const int type)
{
switch (type) {
- case MLD_CHANGE_TO_EXCLUDE_MODE:
- return "TO_EX";
- break;
- case MLD_CHANGE_TO_INCLUDE_MODE:
- return "TO_IN";
- break;
- case MLD_MODE_IS_EXCLUDE:
- return "MODE_EX";
- break;
- case MLD_MODE_IS_INCLUDE:
- return "MODE_IN";
- break;
- case MLD_ALLOW_NEW_SOURCES:
- return "ALLOW_NEW";
- break;
- case MLD_BLOCK_OLD_SOURCES:
- return "BLOCK_OLD";
- break;
- default:
- break;
+ case MLD_CHANGE_TO_EXCLUDE_MODE:
+ return "TO_EX";
+ case MLD_CHANGE_TO_INCLUDE_MODE:
+ return "TO_IN";
+ case MLD_MODE_IS_EXCLUDE:
+ return "MODE_EX";
+ case MLD_MODE_IS_INCLUDE:
+ return "MODE_IN";
+ case MLD_ALLOW_NEW_SOURCES:
+ return "ALLOW_NEW";
+ case MLD_BLOCK_OLD_SOURCES:
+ return "BLOCK_OLD";
+ default:
+ break;
}
return "unknown";
}
void
mld_init(void)
{
-
MLD_PRINTF(("%s: initializing\n", __func__));
- /* Setup lock group and attribute for mld_mtx */
- mld_mtx_grp_attr = lck_grp_attr_alloc_init();
- mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr);
- mld_mtx_attr = lck_attr_alloc_init();
- lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr);
+ /* Setup lock group and attribute for mld_mtx */
+ mld_mtx_grp_attr = lck_grp_attr_alloc_init();
+ mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr);
+ mld_mtx_attr = lck_attr_alloc_init();
+ lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr);
ip6_initpktopts(&mld_po);
mld_po.ip6po_hlim = 1;
mld_po.ip6po_flags = IP6PO_DONTFRAG;
LIST_INIT(&mli_head);
- mli_size = sizeof (struct mld_ifinfo);
+ mli_size = sizeof(struct mld_ifinfo);
mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size,
0, MLI_ZONE_NAME);
if (mli_zone == NULL) {