/*
- * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*-
#include <sys/malloc.h>
#include <sys/mcache.h>
+#include <dev/random/randomdev.h>
+
#include <kern/zalloc.h>
#include <net/if.h>
#include <netinet6/mld6.h>
#include <netinet6/mld6_var.h>
-/* Lock group and attribute for mld6_mtx */
+/* Lock group and attribute for mld_mtx */
static lck_attr_t *mld_mtx_attr;
static lck_grp_t *mld_mtx_grp;
static lck_grp_attr_t *mld_mtx_grp_attr;
* the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
* handler. Each in6_multi holds a reference to the underlying mld_ifinfo.
*
- * Thus, the permitted lock oder is:
+ * Thus, the permitted lock order is:
*
* mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
*
SLIST_HEAD(mld_in6m_relhead, in6_multi);
-static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
+static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
static struct mld_ifinfo *mli_alloc(int);
-static void mli_free(struct mld_ifinfo *);
-static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
-static void mld_dispatch_packet(struct mbuf *);
-static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *);
-static int mld_handle_state_change(struct in6_multi *,
- struct mld_ifinfo *);
-static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
- const int);
+static void mli_free(struct mld_ifinfo *);
+static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *);
+static void mld_dispatch_packet(struct mbuf *);
+static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *,
+ struct mld_tparams *);
+static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *,
+ struct mld_tparams *);
+static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
+ struct mld_tparams *, const int);
#ifdef MLD_DEBUG
-static const char * mld_rec_type_to_str(const int);
+static const char * mld_rec_type_to_str(const int);
#endif
-static void mld_set_version(struct mld_ifinfo *, const int);
-static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
-static void mld_dispatch_queue(struct mld_ifinfo *, struct ifqueue *, int);
-static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
- /*const*/ struct mld_hdr *);
-static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
- /*const*/ struct mld_hdr *);
-static void mld_v1_process_group_timer(struct in6_multi *, const int);
-static void mld_v1_process_querier_timers(struct mld_ifinfo *);
-static int mld_v1_transmit_report(struct in6_multi *, const int);
-static void mld_v1_update_group(struct in6_multi *, const int);
-static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
-static void mld_v2_dispatch_general_query(struct mld_ifinfo *);
+static uint32_t mld_set_version(struct mld_ifinfo *, const int);
+static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *);
+static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int);
+static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
+ /*const*/ struct mld_hdr *);
+static int mld_v1_input_report(struct ifnet *, struct mbuf *,
+ const struct ip6_hdr *, /*const*/ struct mld_hdr *);
+static void mld_v1_process_group_timer(struct in6_multi *, const int);
+static void mld_v1_process_querier_timers(struct mld_ifinfo *);
+static int mld_v1_transmit_report(struct in6_multi *, const int);
+static uint32_t mld_v1_update_group(struct in6_multi *, const int);
+static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
+static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *);
static struct mbuf *
- mld_v2_encap_report(struct ifnet *, struct mbuf *);
-static int mld_v2_enqueue_filter_change(struct ifqueue *,
- struct in6_multi *);
-static int mld_v2_enqueue_group_record(struct ifqueue *,
- struct in6_multi *, const int, const int, const int,
- const int);
-static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
- struct mbuf *, const int, const int);
-static int mld_v2_merge_state_changes(struct in6_multi *,
- struct ifqueue *);
-static void mld_v2_process_group_timers(struct mld_ifinfo *,
- struct ifqueue *, struct ifqueue *,
- struct in6_multi *, const int);
-static int mld_v2_process_group_query(struct in6_multi *,
- int, struct mbuf *, const int);
-static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
-static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
+mld_v2_encap_report(struct ifnet *, struct mbuf *);
+static int mld_v2_enqueue_filter_change(struct ifqueue *,
+ struct in6_multi *);
+static int mld_v2_enqueue_group_record(struct ifqueue *,
+ struct in6_multi *, const int, const int, const int,
+ const int);
+static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
+ struct mbuf *, const int, const int);
+static int mld_v2_merge_state_changes(struct in6_multi *,
+ struct ifqueue *);
+static void mld_v2_process_group_timers(struct mld_ifinfo *,
+ struct ifqueue *, struct ifqueue *,
+ struct in6_multi *, const int);
+static int mld_v2_process_group_query(struct in6_multi *,
+ int, struct mbuf *, const int);
+static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
+static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
+static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS;
+
+static int mld_timeout_run; /* MLD timer is scheduled to run */
+static void mld_timeout(void *);
+static void mld_sched_timeout(void);
/*
* Normative references: RFC 2710, RFC 3590, RFC 3810.
- *
- * XXX LOR PREVENTION
- * A special case for IPv6 is the in6_setscope() routine. ip6_output()
- * will not accept an ifp; it wants an embedded scope ID, unlike
- * ip_output(), which happily takes the ifp given to it. The embedded
- * scope ID is only used by MLD to select the outgoing interface.
- *
- * As such, we exploit the fact that the scope ID is just the interface
- * index, and embed it in the IPv6 destination address accordingly.
- * This is potentially NOT VALID for MLDv1 reports, as they
- * are always sent to the multicast group itself; as MLDv2
- * reports are always sent to ff02::16, this is not an issue
- * when MLDv2 is in use.
*/
-
-#define MLD_EMBEDSCOPE(pin6, zoneid) \
- (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF)
-
static struct timeval mld_gsrdelay = {10, 0};
static LIST_HEAD(, mld_ifinfo) mli_head;
+static int querier_present_timers_running6;
static int interface_timers_running6;
static int state_change_timers_running6;
static int current_state_timers_running6;
-static decl_lck_mtx_data(, mld6_mtx);
-
-#define MLD_LOCK() \
- lck_mtx_lock(&mld6_mtx)
-#define MLD_LOCK_ASSERT_HELD() \
- lck_mtx_assert(&mld6_mtx, LCK_MTX_ASSERT_OWNED)
-#define MLD_LOCK_ASSERT_NOTHELD() \
- lck_mtx_assert(&mld6_mtx, LCK_MTX_ASSERT_NOTOWNED)
-#define MLD_UNLOCK() \
- lck_mtx_unlock(&mld6_mtx)
-
-#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
- SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
+static unsigned int mld_mli_list_genid;
+/*
+ * Subsystem lock macros.
+ */
+#define MLD_LOCK() \
+ lck_mtx_lock(&mld_mtx)
+#define MLD_LOCK_ASSERT_HELD() \
+ LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
+#define MLD_LOCK_ASSERT_NOTHELD() \
+ LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
+#define MLD_UNLOCK() \
+ lck_mtx_unlock(&mld_mtx)
+
+#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
+ SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
}
-#define MLD_REMOVE_DETACHED_IN6M(_head) { \
- struct in6_multi *_in6m, *_inm_tmp; \
- SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
- SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
- IN6M_REMREF(_in6m); \
- } \
- VERIFY(SLIST_EMPTY(_head)); \
+#define MLD_REMOVE_DETACHED_IN6M(_head) { \
+ struct in6_multi *_in6m, *_inm_tmp; \
+ SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
+ SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
+ IN6M_REMREF(_in6m); \
+ } \
+ VERIFY(SLIST_EMPTY(_head)); \
}
-#define MLI_ZONE_MAX 64 /* maximum elements in zone */
-#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
+#define MLI_ZONE_MAX 64 /* maximum elements in zone */
+#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
-static unsigned int mli_size; /* size of zone element */
-static struct zone *mli_zone; /* zone for mld_ifinfo */
+static unsigned int mli_size; /* size of zone element */
+static struct zone *mli_zone; /* zone for mld_ifinfo */
-SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
+SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
"IPv6 Multicast Listener Discovery");
"Rate limit for MLDv2 Group-and-Source queries in seconds");
SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
- sysctl_mld_ifinfo, "Per-interface MLDv2 state");
+ sysctl_mld_ifinfo, "Per-interface MLDv2 state");
-static int mld_v1enable = 1;
+static int mld_v1enable = 1;
SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
&mld_v1enable, 0, "Enable fallback to MLDv1");
-static int mld_use_allow = 1;
+static int mld_v2enable = 1;
+SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &mld_v2enable, 0, sysctl_mld_v2enable, "I",
+ "Enable MLDv2 (debug purposes only)");
+
+static int mld_use_allow = 1;
SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED,
&mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
#ifdef MLD_DEBUG
int mld_debug = 0;
SYSCTL_INT(_net_inet6_mld, OID_AUTO,
- debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
+ debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
#endif
/*
* Packed Router Alert option structure declaration.
*/
struct mld_raopt {
- struct ip6_hbh hbh;
- struct ip6_opt pad;
- struct ip6_opt_router ra;
+ struct ip6_hbh hbh;
+ struct ip6_opt pad;
+ struct ip6_opt_router ra;
} __packed;
/*
.hbh = { 0, 0 },
.pad = { .ip6o_type = IP6OPT_PADN, 0 },
.ra = {
- .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
- .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
- .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
- (IP6OPT_RTALERT_MLD & 0xFF) }
+ .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
+ .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
+ .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
+ (IP6OPT_RTALERT_MLD & 0xFF) }
}
};
static struct ip6_pktopts mld_po;
+/* Store MLDv2 record count in the module private scratch space */
+#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
+
+static __inline void
+mld_save_context(struct mbuf *m, struct ifnet *ifp)
+{
+ m->m_pkthdr.rcvif = ifp;
+}
+
+static __inline void
+mld_scrub_context(struct mbuf *m)
+{
+ m->m_pkthdr.rcvif = NULL;
+}
+
+/*
+ * Restore context from a queued output chain.
+ * Return saved ifp.
+ */
+static __inline struct ifnet *
+mld_restore_context(struct mbuf *m)
+{
+ return m->m_pkthdr.rcvif;
+}
+
/*
* Retrieve or set threshold between group-source queries in seconds.
*/
i = mld_gsrdelay.tv_sec;
error = sysctl_handle_int(oidp, &i, 0, req);
- if (error || !req->newptr)
+ if (error || !req->newptr) {
goto out_locked;
+ }
if (i < -1 || i >= 60) {
error = EINVAL;
out_locked:
MLD_UNLOCK();
- return (error);
+ return error;
}
/*
* Expose struct mld_ifinfo to userland, keyed by ifindex.
sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp)
- int *name;
- int error;
- u_int namelen;
- struct ifnet *ifp;
- struct mld_ifinfo *mli;
- struct mld_ifinfo_u mli_u;
+ int *name;
+ int error;
+ u_int namelen;
+ struct ifnet *ifp;
+ struct mld_ifinfo *mli;
+ struct mld_ifinfo_u mli_u;
name = (int *)arg1;
namelen = arg2;
- if (req->newptr != USER_ADDR_NULL)
- return (EPERM);
+ if (req->newptr != USER_ADDR_NULL) {
+ return EPERM;
+ }
- if (namelen != 1)
- return (EINVAL);
+ if (namelen != 1) {
+ return EINVAL;
+ }
MLD_LOCK();
ifnet_head_lock_shared();
ifp = ifindex2ifnet[name[0]];
ifnet_head_done();
- if (ifp == NULL)
+ if (ifp == NULL) {
goto out_locked;
+ }
- bzero(&mli_u, sizeof (mli_u));
+ bzero(&mli_u, sizeof(mli_u));
LIST_FOREACH(mli, &mli_head, mli_link) {
MLI_LOCK(mli);
mli_u.mli_uri = mli->mli_uri;
MLI_UNLOCK(mli);
- error = SYSCTL_OUT(req, &mli_u, sizeof (mli_u));
+ error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u));
break;
}
out_locked:
MLD_UNLOCK();
- return (error);
+ return error;
+}
+
+static int
+sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error;
+ int i;
+ struct mld_ifinfo *mli;
+ struct mld_tparams mtp = { 0, 0, 0, 0 };
+
+ MLD_LOCK();
+
+ i = mld_v2enable;
+
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || !req->newptr) {
+ goto out_locked;
+ }
+
+ if (i < 0 || i > 1) {
+ error = EINVAL;
+ goto out_locked;
+ }
+
+ mld_v2enable = i;
+ /*
+ * If we enabled v2, the state transition will take care of upgrading
+ * the MLD version back to v2. Otherwise, we have to explicitly
+ * downgrade. Note that this functionality is to be used for debugging.
+ */
+ if (mld_v2enable == 1) {
+ goto out_locked;
+ }
+
+ LIST_FOREACH(mli, &mli_head, mli_link) {
+ MLI_LOCK(mli);
+ if (mld_set_version(mli, MLD_VERSION_1) > 0) {
+ mtp.qpt = 1;
+ }
+ MLI_UNLOCK(mli);
+ }
+
+out_locked:
+ MLD_UNLOCK();
+
+ mld_set_timeout(&mtp);
+
+ return error;
}
/*
* Dispatch an entire queue of pending packet chains.
*
* Must not be called with in6m_lock held.
+ * XXX This routine unlocks MLD global lock and also mli locks.
+ * Make sure that the calling routine takes reference on the mli
+ * before calling this routine.
+ * Also if we are traversing mli_head, remember to check for
+ * mli list generation count and restart the loop if generation count
+ * has changed.
*/
static void
-mld_dispatch_queue(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
+mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
{
struct mbuf *m;
- if (mli != NULL)
+ MLD_LOCK_ASSERT_HELD();
+
+ if (mli != NULL) {
MLI_LOCK_ASSERT_HELD(mli);
+ }
for (;;) {
IF_DEQUEUE(ifq, m);
- if (m == NULL)
+ if (m == NULL) {
break;
- MLD_PRINTF(("%s: dispatch %p from %p\n", __func__, ifq, m));
- if (mli != NULL)
+ }
+ MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifq),
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
+
+ if (mli != NULL) {
MLI_UNLOCK(mli);
+ }
+ MLD_UNLOCK();
+
mld_dispatch_packet(m);
- if (mli != NULL)
+
+ MLD_LOCK();
+ if (mli != NULL) {
MLI_LOCK(mli);
- if (--limit == 0)
+ }
+
+ if (--limit == 0) {
break;
+ }
}
- if (mli != NULL)
+ if (mli != NULL) {
MLI_LOCK_ASSERT_HELD(mli);
+ }
}
/*
static __inline__ int
mld_is_addr_reported(const struct in6_addr *addr)
{
-
VERIFY(IN6_IS_ADDR_MULTICAST(addr));
- if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
- return (0);
+ if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) {
+ return 0;
+ }
if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
struct in6_addr tmp = *addr;
in6_clearscope(&tmp);
- if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
- return (0);
+ if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) {
+ return 0;
+ }
}
- return (1);
+ return 1;
}
/*
{
struct mld_ifinfo *mli;
- MLD_PRINTF(("%s: called for ifp %p(%s%d)\n",
- __func__, ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
mli = mli_alloc(how);
- if (mli == NULL)
- return (NULL);
+ if (mli == NULL) {
+ return NULL;
+ }
MLD_LOCK();
MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */
MLI_UNLOCK(mli);
+ ifnet_lock_shared(ifp);
+ mld6_initsilent(ifp, mli);
+ ifnet_lock_done(ifp);
LIST_INSERT_HEAD(&mli_head, mli, mli_link);
+ mld_mli_list_genid++;
MLD_UNLOCK();
- MLD_PRINTF(("allocate mld_ifinfo for ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
- return (mli);
+ return mli;
}
/*
mli->mli_debug |= IFD_ATTACHED;
MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
MLI_UNLOCK(mli);
+ ifnet_lock_shared(ifp);
+ mld6_initsilent(ifp, mli);
+ ifnet_lock_done(ifp);
LIST_INSERT_HEAD(&mli_head, mli, mli_link);
+ mld_mli_list_genid++;
MLD_UNLOCK();
- MLD_PRINTF(("reattached mld_ifinfo for ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: reattached mld_ifinfo for ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
}
/*
void
mld_domifdetach(struct ifnet *ifp)
{
- SLIST_HEAD(, in6_multi) in6m_dthead;
+ SLIST_HEAD(, in6_multi) in6m_dthead;
SLIST_INIT(&in6m_dthead);
- MLD_PRINTF(("%s: called for ifp %p(%s%d)\n",
- __func__, ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
MLD_LOCK();
mli_delete(ifp, (struct mld_in6m_relhead *)&in6m_dthead);
LIST_REMOVE(mli, mli_link);
MLI_REMREF(mli); /* release mli_head reference */
+ mld_mli_list_genid++;
return;
}
MLI_UNLOCK(mli);
}
- panic("%s: mld_ifinfo not found for ifp %p\n", __func__, ifp);
+ panic("%s: mld_ifinfo not found for ifp %p(%s)\n", __func__,
+ ifp, ifp->if_xname);
+}
+
+__private_extern__ void
+mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli)
+{
+ ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
+
+ MLI_LOCK_ASSERT_NOTHELD(mli);
+ MLI_LOCK(mli);
+ if (!(ifp->if_flags & IFF_MULTICAST) &&
+ (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) {
+ mli->mli_flags |= MLIF_SILENT;
+ } else {
+ mli->mli_flags &= ~MLIF_SILENT;
+ }
+ MLI_UNLOCK(mli);
}
static void
MLI_LOCK_ASSERT_HELD(mli);
mli->mli_ifp = ifp;
- mli->mli_version = MLD_VERSION_2;
+ if (mld_v2enable) {
+ mli->mli_version = MLD_VERSION_2;
+ } else {
+ mli->mli_version = MLD_VERSION_1;
+ }
mli->mli_flags = 0;
mli->mli_rv = MLD_RV_INIT;
mli->mli_qi = MLD_QI_INIT;
mli->mli_qri = MLD_QRI_INIT;
mli->mli_uri = MLD_URI_INIT;
- /* ifnet is not yet attached; no need to hold ifnet lock */
- if (!(ifp->if_flags & IFF_MULTICAST))
- mli->mli_flags |= MLIF_SILENT;
- if (mld_use_allow)
+ if (mld_use_allow) {
mli->mli_flags |= MLIF_USEALLOW;
- if (!reattach)
+ }
+ if (!reattach) {
SLIST_INIT(&mli->mli_relinmhead);
+ }
/*
* Responses to general queries are subject to bounds.
lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr);
mli->mli_debug |= IFD_ALLOC;
}
- return (mli);
+ return mli;
}
static void
void
mli_addref(struct mld_ifinfo *mli, int locked)
{
- if (!locked)
+ if (!locked) {
MLI_LOCK_SPIN(mli);
- else
+ } else {
MLI_LOCK_ASSERT_HELD(mli);
+ }
if (++mli->mli_refcnt == 0) {
panic("%s: mli=%p wraparound refcnt", __func__, mli);
/* NOTREACHED */
}
- if (!locked)
+ if (!locked) {
MLI_UNLOCK(mli);
+ }
}
void
mli_remref(struct mld_ifinfo *mli)
{
- SLIST_HEAD(, in6_multi) in6m_dthead;
+ SLIST_HEAD(, in6_multi) in6m_dthead;
struct ifnet *ifp;
MLI_LOCK_SPIN(mli);
/* Now that we're dropped all locks, release detached records */
MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
- MLD_PRINTF(("%s: freeing mld_ifinfo for ifp %p(%s%d)\n",
- __func__, ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: freeing mld_ifinfo for ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
mli_free(mli);
}
mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
/*const*/ struct mld_hdr *mld)
{
- struct mld_ifinfo *mli;
- struct in6_multi *inm;
- int is_general_query;
- uint16_t timer;
+ struct mld_ifinfo *mli;
+ struct in6_multi *inm;
+ int err = 0, is_general_query;
+ uint16_t timer;
+ struct mld_tparams mtp = { 0, 0, 0, 0 };
+
+ MLD_LOCK_ASSERT_NOTHELD();
is_general_query = 0;
if (!mld_v1enable) {
- MLD_PRINTF(("ignore v1 query %s on ifp %p(%s%d)\n",
- ip6_sprintf(&mld->mld_addr),
- ifp, ifp->if_name, ifp->if_unit));
- return (0);
+ MLD_PRINTF(("%s: ignore v1 query %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&mld->mld_addr),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ goto done;
}
/*
* a router's link-local address.
*/
if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
- MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n",
- ip6_sprintf(&ip6->ip6_src),
- ifp, ifp->if_name, ifp->if_unit));
- return (0);
+ MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&ip6->ip6_src),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ goto done;
}
/*
* MLDv1 General Query.
* If this was not sent to the all-nodes group, ignore it.
*/
- struct in6_addr dst;
+ struct in6_addr dst;
dst = ip6->ip6_dst;
in6_clearscope(&dst);
- if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
- return (EINVAL);
+ if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) {
+ err = EINVAL;
+ goto done;
+ }
is_general_query = 1;
} else {
/*
VERIFY(mli != NULL);
MLI_LOCK(mli);
- mld_set_version(mli, MLD_VERSION_1);
+ mtp.qpt = mld_set_version(mli, MLD_VERSION_1);
MLI_UNLOCK(mli);
- timer = (ntohs(mld->mld_maxdelay) * PR_SLOWHZ) / MLD_TIMER_SCALE;
- if (timer == 0)
+ timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE;
+ if (timer == 0) {
timer = 1;
+ }
if (is_general_query) {
struct in6_multistep step;
- MLD_PRINTF(("process v1 general query on ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: process v1 general query on ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* For each reporting group joined on this
* interface, kick the report timer.
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp == ifp)
- mld_v1_update_group(inm, timer);
+ if (inm->in6m_ifp == ifp) {
+ mtp.cst += mld_v1_update_group(inm, timer);
+ }
IN6M_UNLOCK(inm);
IN6_NEXT_MULTI(step, inm);
}
if (inm != NULL) {
IN6M_LOCK(inm);
- MLD_PRINTF(("process v1 query %s on ifp %p(%s%d)\n",
+ MLD_PRINTF(("%s: process v1 query %s on "
+ "ifp 0x%llx(%s)\n", __func__,
ip6_sprintf(&mld->mld_addr),
- ifp, ifp->if_name, ifp->if_unit));
- mld_v1_update_group(inm, timer);
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ mtp.cst = mld_v1_update_group(inm, timer);
IN6M_UNLOCK(inm);
IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
}
/* XXX Clear embedded scope ID as userland won't expect it. */
in6_clearscope(&mld->mld_addr);
}
+done:
+ mld_set_timeout(&mtp);
- return (0);
+ return err;
}
/*
* We may be updating the group for the first time since we switched
* to MLDv2. If we are, then we must clear any recorded source lists,
* and transition to REPORTING state; the group timer is overloaded
- * for group and group-source query responses.
+ * for group and group-source query responses.
*
* Unlike MLDv2, the delay per group should be jittered
* to avoid bursts of MLDv1 reports.
*/
-static void
+static uint32_t
mld_v1_update_group(struct in6_multi *inm, const int timer)
{
IN6M_LOCK_ASSERT_HELD(inm);
- MLD_PRINTF(("%s: %s/%s%d timer=%d\n", __func__,
+ MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__,
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit, timer));
+ if_name(inm->in6m_ifp), timer));
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
"skipping.\n", __func__));
break;
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_SG_QUERY_PENDING_MEMBER:
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_IDLE_MEMBER:
MLD_PRINTF(("%s: ->REPORTING\n", __func__));
inm->in6m_state = MLD_REPORTING_MEMBER;
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
- current_state_timers_running6 = 1;
break;
case MLD_SLEEPING_MEMBER:
MLD_PRINTF(("%s: ->AWAKENING\n", __func__));
case MLD_LEAVING_MEMBER:
break;
}
+
+ return inm->in6m_timer;
}
/*
mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
struct mbuf *m, const int off, const int icmp6len)
{
- struct mld_ifinfo *mli;
- struct mldv2_query *mld;
- struct in6_multi *inm;
- uint32_t maxdelay, nsrc, qqi;
- int is_general_query;
- uint16_t timer;
- uint8_t qrv;
+ struct mld_ifinfo *mli;
+ struct mldv2_query *mld;
+ struct in6_multi *inm;
+ uint32_t maxdelay, nsrc, qqi;
+ int err = 0, is_general_query;
+ uint16_t timer;
+ uint8_t qrv;
+ struct mld_tparams mtp = { 0, 0, 0, 0 };
+
+ MLD_LOCK_ASSERT_NOTHELD();
is_general_query = 0;
+ if (!mld_v2enable) {
+ MLD_PRINTF(("%s: ignore v2 query %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&ip6->ip6_src),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ goto done;
+ }
+
/*
* RFC3810 Section 6.2: MLD queries must originate from
* a router's link-local address.
*/
if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
- MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n",
- ip6_sprintf(&ip6->ip6_src),
- ifp, ifp->if_name, ifp->if_unit));
- return (0);
+ MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&ip6->ip6_src),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ goto done;
}
- MLD_PRINTF(("input v2 query on ifp %p(%s%d)\n", ifp, ifp->if_name,
- ifp->if_unit));
+ MLD_PRINTF(("%s: input v2 query on ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
- maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
- if (maxdelay >= 32678) {
+ maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
+ if (maxdelay >= 32768) {
maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
- (MLD_MRC_EXP(maxdelay) + 3);
+ (MLD_MRC_EXP(maxdelay) + 3);
}
- timer = (maxdelay * PR_SLOWHZ) / MLD_TIMER_SCALE;
- if (timer == 0)
+ timer = maxdelay / MLD_TIMER_SCALE;
+ if (timer == 0) {
timer = 1;
+ }
qrv = MLD_QRV(mld->mld_misc);
if (qrv < 2) {
qqi = mld->mld_qqi;
if (qqi >= 128) {
qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
- (MLD_QQIC_EXP(mld->mld_qqi) + 3);
+ (MLD_QQIC_EXP(mld->mld_qqi) + 3);
}
nsrc = ntohs(mld->mld_numsrc);
- if (nsrc > MLD_MAX_GS_SOURCES)
- return (EMSGSIZE);
+ if (nsrc > MLD_MAX_GS_SOURCES) {
+ err = EMSGSIZE;
+ goto done;
+ }
if (icmp6len < sizeof(struct mldv2_query) +
- (nsrc * sizeof(struct in6_addr)))
- return (EMSGSIZE);
+ (nsrc * sizeof(struct in6_addr))) {
+ err = EMSGSIZE;
+ goto done;
+ }
/*
* Do further input validation upfront to avoid resetting timers
*/
if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
/*
- * General Queries SHOULD be directed to ff02::1.
* A general query with a source list has undefined
* behaviour; discard it.
*/
- struct in6_addr dst;
-
- dst = ip6->ip6_dst;
- in6_clearscope(&dst);
- if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes) ||
- nsrc > 0)
- return (EINVAL);
+ if (nsrc > 0) {
+ err = EINVAL;
+ goto done;
+ }
is_general_query = 1;
} else {
/*
*/
if (mli->mli_version != MLD_VERSION_2) {
MLI_UNLOCK(mli);
- return (0);
+ goto done;
}
- mld_set_version(mli, MLD_VERSION_2);
+ mtp.qpt = mld_set_version(mli, MLD_VERSION_2);
mli->mli_rv = qrv;
mli->mli_qi = qqi;
- mli->mli_qri = maxdelay;
+ mli->mli_qri = MAX(timer, MLD_QRI_MIN);
- MLD_PRINTF(("%s: qrv %d qi %d maxdelay %d\n", __func__, qrv, qqi,
- maxdelay));
+ MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, mli->mli_rv,
+ mli->mli_qi, mli->mli_qri));
if (is_general_query) {
/*
* not schedule any other reports.
* Otherwise, reset the interface timer.
*/
- MLD_PRINTF(("process v2 general query on ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
- mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
- interface_timers_running6 = 1;
+ mtp.it = mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
}
MLI_UNLOCK(mli);
} else {
in6_multihead_lock_shared();
IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
in6_multihead_lock_done();
- if (inm == NULL)
- return (0);
+ if (inm == NULL) {
+ goto done;
+ }
IN6M_LOCK(inm);
-#ifndef __APPLE__
- /* TODO: need ratecheck equivalent */
if (nsrc > 0) {
if (!ratecheck(&inm->in6m_lastgsrtv,
&mld_gsrdelay)) {
__func__));
IN6M_UNLOCK(inm);
IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
- return (0);
+ goto done;
}
}
-#endif
- MLD_PRINTF(("process v2 group query on ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: process v2 group query on ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* If there is a pending General Query response
* scheduled sooner than the selected delay, no
* group-specific or group-and-source query.
*/
MLI_LOCK(mli);
- if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
- MLI_UNLOCK(mli);
- mld_v2_process_group_query(inm, timer, m, off);
- } else {
- MLI_UNLOCK(mli);
+ mtp.it = mli->mli_v2_timer;
+ MLI_UNLOCK(mli);
+ if (mtp.it == 0 || mtp.it >= timer) {
+ (void) mld_v2_process_group_query(inm, timer, m, off);
+ mtp.cst = inm->in6m_timer;
}
IN6M_UNLOCK(inm);
IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
/* XXX Clear embedded scope ID as userland won't expect it. */
in6_clearscope(&mld->mld_addr);
}
+done:
+ if (mtp.it > 0) {
+ MLD_PRINTF(("%s: v2 general query response scheduled in "
+ "T+%d seconds on ifp 0x%llx(%s)\n", __func__, mtp.it,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ }
+ mld_set_timeout(&mtp);
- return (0);
+ return err;
}
/*
mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0,
const int off)
{
- struct mldv2_query *mld;
- int retval;
- uint16_t nsrc;
+ struct mldv2_query *mld;
+ int retval;
+ uint16_t nsrc;
IN6M_LOCK_ASSERT_HELD(inm);
case MLD_AWAKENING_MEMBER:
case MLD_IDLE_MEMBER:
case MLD_LEAVING_MEMBER:
- return (retval);
- break;
+ return retval;
case MLD_REPORTING_MEMBER:
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_SG_QUERY_PENDING_MEMBER:
}
inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
- current_state_timers_running6 = 1;
- return (retval);
+ return retval;
}
/*
if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
timer = min(inm->in6m_timer, timer);
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
- current_state_timers_running6 = 1;
- return (retval);
+ return retval;
}
/*
* report for those sources.
*/
if (inm->in6m_nsrc > 0) {
- struct mbuf *m;
- uint8_t *sp;
- int i, nrecorded;
- int soff;
+ struct mbuf *m;
+ uint8_t *sp;
+ int i, nrecorded;
+ int soff;
m = m0;
soff = off + sizeof(struct mldv2_query);
for (i = 0; i < nsrc; i++) {
sp = mtod(m, uint8_t *) + soff;
retval = in6m_record_source(inm,
- (const struct in6_addr *)sp);
- if (retval < 0)
+ (const struct in6_addr *)(void *)sp);
+ if (retval < 0) {
break;
+ }
nrecorded += retval;
soff += sizeof(struct in6_addr);
if (soff >= m->m_len) {
soff = soff - m->m_len;
m = m->m_next;
- if (m == NULL)
+ if (m == NULL) {
break;
+ }
}
}
if (nrecorded > 0) {
- MLD_PRINTF(( "%s: schedule response to SG query\n",
+ MLD_PRINTF(("%s: schedule response to SG query\n",
__func__));
inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
inm->in6m_timer = MLD_RANDOM_DELAY(timer);
- current_state_timers_running6 = 1;
}
}
- return (retval);
+ return retval;
}
/*
* mld_addr. This is OK as we own the mbuf chain.
*/
static int
-mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
- /*const*/ struct mld_hdr *mld)
+mld_v1_input_report(struct ifnet *ifp, struct mbuf *m,
+ const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld)
{
- struct in6_addr src, dst;
- struct in6_ifaddr *ia;
- struct in6_multi *inm;
+ struct in6_addr src, dst;
+ struct in6_ifaddr *ia;
+ struct in6_multi *inm;
if (!mld_v1enable) {
- MLD_PRINTF(("ignore v1 report %s on ifp %p(%s%d)\n",
- ip6_sprintf(&mld->mld_addr),
- ifp, ifp->if_name, ifp->if_unit));
- return (0);
+ MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&mld->mld_addr),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ return 0;
}
- if (ifp->if_flags & IFF_LOOPBACK)
- return (0);
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
+ return 0;
+ }
/*
* MLDv1 reports must originate from a host's link-local address,
src = ip6->ip6_src;
in6_clearscope(&src);
if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
- MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n",
- ip6_sprintf(&ip6->ip6_src),
- ifp, ifp->if_name, ifp->if_unit));
- return (EINVAL);
+ MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&ip6->ip6_src),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ return EINVAL;
}
/*
in6_clearscope(&dst);
if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
!IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
- MLD_PRINTF(("ignore v1 query dst %s on ifp %p(%s%d)\n",
- ip6_sprintf(&ip6->ip6_dst),
- ifp, ifp->if_name, ifp->if_unit));
- return (EINVAL);
+ MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&ip6->ip6_dst),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ return EINVAL;
}
/*
* returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
* performed for the on-wire address.
*/
- ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
if (ia != NULL) {
IFA_LOCK(&ia->ia_ifa);
- if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))){
+ if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) {
IFA_UNLOCK(&ia->ia_ifa);
IFA_REMREF(&ia->ia_ifa);
- return (0);
+ return 0;
}
IFA_UNLOCK(&ia->ia_ifa);
IFA_REMREF(&ia->ia_ifa);
} else if (IN6_IS_ADDR_UNSPECIFIED(&src)) {
- return (0);
+ return 0;
}
- MLD_PRINTF(("process v1 report %s on ifp %p(%s%d)\n",
- ip6_sprintf(&mld->mld_addr), ifp, ifp->if_name, ifp->if_unit));
+ MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n",
+ __func__, ip6_sprintf(&mld->mld_addr),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* Embed scope ID of receiving interface in MLD query for lookup
* whilst we don't hold other locks (due to KAME locking lameness).
*/
- if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
+ if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
in6_setscope(&mld->mld_addr, ifp, NULL);
+ }
/*
* MLDv1 report suppression.
case MLD_REPORTING_MEMBER:
case MLD_IDLE_MEMBER:
case MLD_AWAKENING_MEMBER:
- MLD_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
+ MLD_PRINTF(("%s: report suppressed for %s on "
+ "ifp 0x%llx(%s)\n", __func__,
ip6_sprintf(&mld->mld_addr),
- ifp, ifp->if_name, ifp->if_unit));
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
case MLD_LAZY_MEMBER:
inm->in6m_state = MLD_LAZY_MEMBER;
break;
/* XXX Clear embedded scope ID as userland won't expect it. */
in6_clearscope(&mld->mld_addr);
- return (0);
+ return 0;
}
/*
int
mld_input(struct mbuf *m, int off, int icmp6len)
{
- struct ifnet *ifp;
- struct ip6_hdr *ip6;
- struct mld_hdr *mld;
- int mldlen;
+ struct ifnet *ifp;
+ struct ip6_hdr *ip6;
+ struct mld_hdr *mld;
+ int mldlen;
- MLD_PRINTF(("%s: called w/mbuf (%p,%d)\n", __func__, m, off));
+ MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m), off));
ifp = m->m_pkthdr.rcvif;
} else {
mldlen = sizeof(struct mld_hdr);
}
+ // check if mldv2_query/mld_hdr fits in the first mbuf
+ IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE);
IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
if (mld == NULL) {
icmp6stat.icp6s_badlen++;
- return (IPPROTO_DONE);
+ return IPPROTO_DONE;
}
/*
case MLD_LISTENER_QUERY:
icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
if (icmp6len == sizeof(struct mld_hdr)) {
- if (mld_v1_input_query(ifp, ip6, mld) != 0)
- return (0);
+ if (mld_v1_input_query(ifp, ip6, mld) != 0) {
+ return 0;
+ }
} else if (icmp6len >= sizeof(struct mldv2_query)) {
if (mld_v2_input_query(ifp, ip6, m, off,
- icmp6len) != 0)
- return (0);
+ icmp6len) != 0) {
+ return 0;
+ }
}
break;
case MLD_LISTENER_REPORT:
icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
- if (mld_v1_input_report(ifp, ip6, mld) != 0)
- return (0);
+ if (mld_v1_input_report(ifp, m, ip6, mld) != 0) {
+ return 0;
+ }
break;
case MLDV2_LISTENER_REPORT:
icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
break;
}
- return (0);
+ return 0;
}
/*
- * MLD6 slowtimo handler.
- * Combiles both the slow and fast timer into one. We loose some responsivness but
- * allows the system to avoid having a pr_fasttimo, thus allowing for power savings.
+ * Schedule MLD timer based on various parameters; caller must ensure that
+ * lock ordering is maintained as this routine acquires MLD global lock.
*/
void
-mld_slowtimo(void)
+mld_set_timeout(struct mld_tparams *mtp)
{
- struct ifqueue scq; /* State-change packets */
- struct ifqueue qrq; /* Query response packets */
- struct ifnet *ifp;
- struct mld_ifinfo *mli;
- struct in6_multi *inm;
- int uri_fasthz = 0;
- SLIST_HEAD(, in6_multi) in6m_dthead;
+ MLD_LOCK_ASSERT_NOTHELD();
+ VERIFY(mtp != NULL);
+
+ if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) {
+ MLD_LOCK();
+ if (mtp->qpt != 0) {
+ querier_present_timers_running6 = 1;
+ }
+ if (mtp->it != 0) {
+ interface_timers_running6 = 1;
+ }
+ if (mtp->cst != 0) {
+ current_state_timers_running6 = 1;
+ }
+ if (mtp->sct != 0) {
+ state_change_timers_running6 = 1;
+ }
+ mld_sched_timeout();
+ MLD_UNLOCK();
+ }
+}
+
+/*
+ * MLD6 timer handler (per 1 second).
+ */
+static void
+mld_timeout(void *arg)
+{
+#pragma unused(arg)
+ struct ifqueue scq; /* State-change packets */
+ struct ifqueue qrq; /* Query response packets */
+ struct ifnet *ifp;
+ struct mld_ifinfo *mli;
+ struct in6_multi *inm;
+ int uri_sec = 0;
+ unsigned int genid = mld_mli_list_genid;
+
+ SLIST_HEAD(, in6_multi) in6m_dthead;
SLIST_INIT(&in6m_dthead);
+ /*
+ * Update coarse-grained networking timestamp (in sec.); the idea
+ * is to piggy-back on the timeout callout to update the counter
+ * returnable via net_uptime().
+ */
+ net_update_uptime();
+
MLD_LOCK();
- LIST_FOREACH(mli, &mli_head, mli_link) {
- MLI_LOCK(mli);
- mld_v1_process_querier_timers(mli);
- MLI_UNLOCK(mli);
- }
+ MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d\n", __func__,
+ querier_present_timers_running6, interface_timers_running6,
+ current_state_timers_running6, state_change_timers_running6));
/*
- * Quick check to see if any work needs to be done, in order to
- * minimize the overhead of fasttimo processing.
+ * MLDv1 querier present timer processing.
*/
- if (!current_state_timers_running6 &&
- !interface_timers_running6 &&
- !state_change_timers_running6) {
- MLD_UNLOCK();
- return;
+ if (querier_present_timers_running6) {
+ querier_present_timers_running6 = 0;
+ LIST_FOREACH(mli, &mli_head, mli_link) {
+ MLI_LOCK(mli);
+ mld_v1_process_querier_timers(mli);
+ if (mli->mli_v1_timer > 0) {
+ querier_present_timers_running6 = 1;
+ }
+ MLI_UNLOCK(mli);
+ }
}
/*
* MLDv2 General Query response timer processing.
*/
if (interface_timers_running6) {
-#if 0
MLD_PRINTF(("%s: interface timers running\n", __func__));
-#endif
interface_timers_running6 = 0;
- LIST_FOREACH(mli, &mli_head, mli_link) {
+ mli = LIST_FIRST(&mli_head);
+
+ while (mli != NULL) {
+ if (mli->mli_flags & MLIF_PROCESSED) {
+ mli = LIST_NEXT(mli, mli_link);
+ continue;
+ }
+
MLI_LOCK(mli);
+ if (mli->mli_version != MLD_VERSION_2) {
+ MLI_UNLOCK(mli);
+ mli = LIST_NEXT(mli, mli_link);
+ continue;
+ }
+ /*
+ * XXX The logic below ends up calling
+ * mld_dispatch_packet which can unlock mli
+ * and the global MLD lock.
+ * Therefore grab a reference on MLI and also
+ * check for generation count to see if we should
+ * iterate the list again.
+ */
+ MLI_ADDREF_LOCKED(mli);
+
if (mli->mli_v2_timer == 0) {
/* Do nothing. */
} else if (--mli->mli_v2_timer == 0) {
- mld_v2_dispatch_general_query(mli);
+ if (mld_v2_dispatch_general_query(mli) > 0) {
+ interface_timers_running6 = 1;
+ }
} else {
interface_timers_running6 = 1;
}
+ mli->mli_flags |= MLIF_PROCESSED;
MLI_UNLOCK(mli);
+ MLI_REMREF(mli);
+
+ if (genid != mld_mli_list_genid) {
+ MLD_PRINTF(("%s: MLD information list changed "
+ "in the middle of iteration! Restart iteration.\n",
+ __func__));
+ mli = LIST_FIRST(&mli_head);
+ genid = mld_mli_list_genid;
+ } else {
+ mli = LIST_NEXT(mli, mli_link);
+ }
}
+
+ LIST_FOREACH(mli, &mli_head, mli_link)
+ mli->mli_flags &= ~MLIF_PROCESSED;
}
+
+
if (!current_state_timers_running6 &&
- !state_change_timers_running6)
+ !state_change_timers_running6) {
goto out_locked;
+ }
current_state_timers_running6 = 0;
state_change_timers_running6 = 0;
-#if 0
+
MLD_PRINTF(("%s: state change timers running\n", __func__));
-#endif
memset(&qrq, 0, sizeof(struct ifqueue));
qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS;
* MLD host report and state-change timer processing.
* Note: Processing a v2 group timer may remove a node.
*/
- LIST_FOREACH(mli, &mli_head, mli_link) {
+ mli = LIST_FIRST(&mli_head);
+
+ while (mli != NULL) {
struct in6_multistep step;
+ if (mli->mli_flags & MLIF_PROCESSED) {
+ mli = LIST_NEXT(mli, mli_link);
+ continue;
+ }
+
MLI_LOCK(mli);
ifp = mli->mli_ifp;
- uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri * PR_SLOWHZ);
+ uri_sec = MLD_RANDOM_DELAY(mli->mli_uri);
MLI_UNLOCK(mli);
in6_multihead_lock_shared();
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp != ifp)
+ if (inm->in6m_ifp != ifp) {
goto next;
+ }
MLI_LOCK(mli);
switch (mli->mli_version) {
break;
case MLD_VERSION_2:
mld_v2_process_group_timers(mli, &qrq,
- &scq, inm, uri_fasthz);
+ &scq, inm, uri_sec);
break;
}
MLI_UNLOCK(mli);
}
in6_multihead_lock_done();
+ /*
+ * XXX The logic below ends up calling
+ * mld_dispatch_packet which can unlock mli
+ * and the global MLD lock.
+ * Therefore grab a reference on MLI and also
+ * check for generation count to see if we should
+ * iterate the list again.
+ */
MLI_LOCK(mli);
+ MLI_ADDREF_LOCKED(mli);
if (mli->mli_version == MLD_VERSION_1) {
- mld_dispatch_queue(mli, &mli->mli_v1q, 0);
+ mld_dispatch_queue_locked(mli, &mli->mli_v1q, 0);
} else if (mli->mli_version == MLD_VERSION_2) {
MLI_UNLOCK(mli);
- mld_dispatch_queue(NULL, &qrq, 0);
- mld_dispatch_queue(NULL, &scq, 0);
+ mld_dispatch_queue_locked(NULL, &qrq, 0);
+ mld_dispatch_queue_locked(NULL, &scq, 0);
VERIFY(qrq.ifq_len == 0);
VERIFY(scq.ifq_len == 0);
MLI_LOCK(mli);
*/
mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead);
VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
+ mli->mli_flags |= MLIF_PROCESSED;
MLI_UNLOCK(mli);
+ MLI_REMREF(mli);
IF_DRAIN(&qrq);
IF_DRAIN(&scq);
+
+ if (genid != mld_mli_list_genid) {
+ MLD_PRINTF(("%s: MLD information list changed "
+ "in the middle of iteration! Restart iteration.\n",
+ __func__));
+ mli = LIST_FIRST(&mli_head);
+ genid = mld_mli_list_genid;
+ } else {
+ mli = LIST_NEXT(mli, mli_link);
+ }
}
+ LIST_FOREACH(mli, &mli_head, mli_link)
+ mli->mli_flags &= ~MLIF_PROCESSED;
+
out_locked:
+ /* re-arm the timer if there's work to do */
+ mld_timeout_run = 0;
+ mld_sched_timeout();
MLD_UNLOCK();
/* Now that we're dropped all locks, release detached records */
MLD_REMOVE_DETACHED_IN6M(&in6m_dthead);
}
+static void
+mld_sched_timeout(void)
+{
+ MLD_LOCK_ASSERT_HELD();
+
+ if (!mld_timeout_run &&
+ (querier_present_timers_running6 || current_state_timers_running6 ||
+ interface_timers_running6 || state_change_timers_running6)) {
+ mld_timeout_run = 1;
+ timeout(mld_timeout, NULL, hz);
+ }
+}
+
/*
* Free the in6_multi reference(s) for this MLD lifecycle.
*
#pragma unused(mld_version)
int report_timer_expired;
+ MLD_LOCK_ASSERT_HELD();
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
report_timer_expired = 1;
} else {
current_state_timers_running6 = 1;
+ /* caller will schedule timer */
return;
}
if (report_timer_expired) {
inm->in6m_state = MLD_IDLE_MEMBER;
(void) mld_v1_transmit_report(inm,
- MLD_LISTENER_REPORT);
+ MLD_LISTENER_REPORT);
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
}
static void
mld_v2_process_group_timers(struct mld_ifinfo *mli,
struct ifqueue *qrq, struct ifqueue *scq,
- struct in6_multi *inm, const int uri_fasthz)
+ struct in6_multi *inm, const int uri_sec)
{
int query_response_timer_expired;
int state_change_retransmit_timer_expired;
+ MLD_LOCK_ASSERT_HELD();
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(mli);
VERIFY(mli == inm->in6m_mli);
* During a transition from compatibility mode back to MLDv2,
* a group record in REPORTING state may still have its group
* timer active. This is a no-op in this function; it is easier
- * to deal with it here than to complicate the slow-timeout path.
+ * to deal with it here than to complicate the timeout path.
*/
if (inm->in6m_timer == 0) {
query_response_timer_expired = 0;
query_response_timer_expired = 1;
} else {
current_state_timers_running6 = 1;
+ /* caller will schedule timer */
}
if (inm->in6m_sctimer == 0) {
state_change_retransmit_timer_expired = 1;
} else {
state_change_timers_running6 = 1;
+ /* caller will schedule timer */
}
- /* We are in fasttimo, so be quick about it. */
+ /* We are in timer callback, so be quick about it. */
if (!state_change_retransmit_timer_expired &&
- !query_response_timer_expired)
+ !query_response_timer_expired) {
return;
+ }
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
inm->in6m_state = MLD_REPORTING_MEMBER;
in6m_clear_recorded(inm);
}
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_REPORTING_MEMBER:
case MLD_LEAVING_MEMBER:
if (state_change_retransmit_timer_expired) {
* reset the timer.
*/
if (--inm->in6m_scrv > 0) {
- inm->in6m_sctimer = uri_fasthz;
+ inm->in6m_sctimer = uri_sec;
state_change_timers_running6 = 1;
+ /* caller will schedule timer */
}
/*
* Retransmit the previously computed state-change
(void) mld_v2_merge_state_changes(inm, scq);
in6m_commit(inm);
- MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
+ MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
+ if_name(inm->in6m_ifp)));
/*
* If we are leaving the group for good, make sure
* Switch to a different version on the given interface,
* as per Section 9.12.
*/
-static void
+static uint32_t
mld_set_version(struct mld_ifinfo *mli, const int mld_version)
{
int old_version_timer;
MLI_LOCK_ASSERT_HELD(mli);
- MLD_PRINTF(("%s: switching to v%d on ifp %p(%s%d)\n", __func__,
- mld_version, mli->mli_ifp, mli->mli_ifp->if_name,
- mli->mli_ifp->if_unit));
+ MLD_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__,
+ mld_version, (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp),
+ if_name(mli->mli_ifp)));
if (mld_version == MLD_VERSION_1) {
/*
* Compute the "Older Version Querier Present" timer as per
- * Section 9.12.
+ * Section 9.12, in seconds.
*/
old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
- old_version_timer *= PR_SLOWHZ;
mli->mli_v1_timer = old_version_timer;
}
}
MLI_LOCK_ASSERT_HELD(mli);
+
+ return mli->mli_v1_timer;
}
/*
* Cancel pending MLDv2 timers for the given link and all groups
* joined on it; state-change, general-query, and group-query timers.
+ *
+ * Only ever called on a transition from v2 to Compatibility mode. Kill
+ * the timers stone dead (this may be expensive for large N groups), they
+ * will be restarted if Compatibility Mode deems that they must be due to
+ * query processing.
*/
static void
mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
{
- struct ifnet *ifp;
- struct in6_multi *inm;
- struct in6_multistep step;
+ struct ifnet *ifp;
+ struct in6_multi *inm;
+ struct in6_multistep step;
MLI_LOCK_ASSERT_HELD(mli);
- MLD_PRINTF(("%s: cancel v2 timers on ifp %p(%s%d)\n", __func__,
- mli->mli_ifp, mli->mli_ifp->if_name, mli->mli_ifp->if_unit));
+ MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), if_name(mli->mli_ifp)));
/*
- * Fast-track this potentially expensive operation
- * by checking all the global 'timer pending' flags.
+ * Stop the v2 General Query Response on this link stone dead.
+ * If timer is woken up due to interface_timers_running6,
+ * the flag will be cleared if there are no pending link timers.
*/
- if (!interface_timers_running6 &&
- !state_change_timers_running6 &&
- !current_state_timers_running6)
- return;
-
mli->mli_v2_timer = 0;
+
+ /*
+ * Now clear the current-state and state-change report timers
+ * for all memberships scoped to this link.
+ */
ifp = mli->mli_ifp;
MLI_UNLOCK(mli);
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp != ifp)
+ if (inm->in6m_ifp != ifp) {
goto next;
+ }
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
case MLD_LAZY_MEMBER:
case MLD_SLEEPING_MEMBER:
case MLD_AWAKENING_MEMBER:
+ /*
+ * These states are either not relevant in v2 mode,
+ * or are unreported. Do nothing.
+ */
break;
case MLD_LEAVING_MEMBER:
/*
SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
in6m_nrele);
MLI_UNLOCK(mli);
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_G_QUERY_PENDING_MEMBER:
case MLD_SG_QUERY_PENDING_MEMBER:
in6m_clear_recorded(inm);
- /* FALLTHROUGH */
+ /* FALLTHROUGH */
case MLD_REPORTING_MEMBER:
- inm->in6m_sctimer = 0;
- inm->in6m_timer = 0;
inm->in6m_state = MLD_REPORTING_MEMBER;
- /*
- * Free any pending MLDv2 state-change records.
- */
- IF_DRAIN(&inm->in6m_scq);
break;
}
+ /*
+ * Always clear state-change and group report timers.
+ * Free any pending MLDv2 state-change records.
+ */
+ inm->in6m_sctimer = 0;
+ inm->in6m_timer = 0;
+ IF_DRAIN(&inm->in6m_scq);
next:
IN6M_UNLOCK(inm);
IN6_NEXT_MULTI(step, inm);
{
MLI_LOCK_ASSERT_HELD(mli);
- if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
+ if (mld_v2enable && mli->mli_version != MLD_VERSION_2 &&
+ --mli->mli_v1_timer == 0) {
/*
* MLDv1 Querier Present timer expired; revert to MLDv2.
*/
- MLD_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
+ MLD_PRINTF(("%s: transition from v%d -> v%d on 0x%llx(%s)\n",
__func__, mli->mli_version, MLD_VERSION_2,
- mli->mli_ifp, mli->mli_ifp->if_name, mli->mli_ifp->if_unit));
+ (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp),
+ if_name(mli->mli_ifp)));
mli->mli_version = MLD_VERSION_2;
}
}
static int
mld_v1_transmit_report(struct in6_multi *in6m, const int type)
{
- struct ifnet *ifp;
- struct in6_ifaddr *ia;
- struct ip6_hdr *ip6;
- struct mbuf *mh, *md;
- struct mld_hdr *mld;
- int error = 0;
+ struct ifnet *ifp;
+ struct in6_ifaddr *ia;
+ struct ip6_hdr *ip6;
+ struct mbuf *mh, *md;
+ struct mld_hdr *mld;
+ int error = 0;
IN6M_LOCK_ASSERT_HELD(in6m);
MLI_LOCK_ASSERT_HELD(in6m->in6m_mli);
ifp = in6m->in6m_ifp;
/* ia may be NULL if link-local address is tentative. */
- ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
MGETHDR(mh, M_DONTWAIT, MT_HEADER);
if (mh == NULL) {
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
- return (ENOMEM);
+ }
+ return ENOMEM;
}
MGET(md, M_DONTWAIT, MT_DATA);
if (md == NULL) {
m_free(mh);
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
- return (ENOMEM);
+ }
+ return ENOMEM;
}
mh->m_next = md;
ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
ip6->ip6_vfc |= IPV6_VERSION;
ip6->ip6_nxt = IPPROTO_ICMPV6;
- if (ia != NULL)
+ if (ia != NULL) {
IFA_LOCK(&ia->ia_ifa);
+ }
ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
if (ia != NULL) {
IFA_UNLOCK(&ia->ia_ifa);
mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
+ mld_save_context(mh, ifp);
mh->m_flags |= M_MLDV1;
-
/*
* Due to the fact that at this point we are possibly holding
* in6_multihead_lock in shared or exclusive mode, we can't call
* mld_dispatch_packet() here since that will eventually call
* ip6_output(), which will try to lock in6_multihead_lock and cause
* a deadlock.
- * Instead we defer the work to the mld_slowtimo() thread, thus
+ * Instead we defer the work to the mld_timeout() thread, thus
* avoiding unlocking in_multihead_lock here.
*/
- if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
- MLD_PRINTF(("%s: v1 outbound queue full\n", __func__));
- error = ENOMEM;
- m_freem(mh);
- } else
- IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
-
- return (error);
+ if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
+ MLD_PRINTF(("%s: v1 outbound queue full\n", __func__));
+ error = ENOMEM;
+ m_freem(mh);
+ } else {
+ IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
+ VERIFY(error == 0);
+ }
+
+ return error;
}
/*
*
* If delay is non-zero, and the state change is an initial multicast
* join, the state change report will be delayed by 'delay' ticks
- * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
+ * in units of seconds if MLDv1 is active on the link; otherwise
* the initial MLDv2 state change report will be delayed by whichever
* is sooner, a pending state-change timer or delay itself.
*/
int
-mld_change_state(struct in6_multi *inm, const int delay)
+mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp,
+ const int delay)
{
struct mld_ifinfo *mli;
struct ifnet *ifp;
int error = 0;
+ VERIFY(mtp != NULL);
+ bzero(mtp, sizeof(*mtp));
+
IN6M_LOCK_ASSERT_HELD(inm);
VERIFY(inm->in6m_mli != NULL);
MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli);
inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode));
if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
MLD_PRINTF(("%s: initial join\n", __func__));
- error = mld_initial_join(inm, mli, delay);
+ error = mld_initial_join(inm, mli, mtp, delay);
goto out;
} else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
MLD_PRINTF(("%s: final leave\n", __func__));
- mld_final_leave(inm, mli);
+ mld_final_leave(inm, mli, mtp);
goto out;
}
} else {
MLD_PRINTF(("%s: filter set change\n", __func__));
}
- error = mld_handle_state_change(inm, mli);
-
+ error = mld_handle_state_change(inm, mli, mtp);
out:
- return (error);
+ return error;
}
/*
* initial state of the membership.
*
* If the delay argument is non-zero, then we must delay sending the
- * initial state change for delay ticks (in units of PR_FASTHZ).
+ * initial state change for delay ticks (in units of seconds).
*/
static int
mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
- const int delay)
+ struct mld_tparams *mtp, const int delay)
{
- struct ifnet *ifp;
- struct ifqueue *ifq;
- int error, retval, syncstates;
- int odelay;
+ struct ifnet *ifp;
+ struct ifqueue *ifq;
+ int error, retval, syncstates;
+ int odelay;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_NOTHELD(mli);
+ VERIFY(mtp != NULL);
- MLD_PRINTF(("%s: initial join %s on ifp %p(%s%d)\n",
+ MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n",
__func__, ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp, inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
+ (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
+ if_name(inm->in6m_ifp)));
error = 0;
syncstates = 1;
VERIFY(mli->mli_ifp == ifp);
/*
- * Groups joined on loopback or marked as 'not reported',
- * enter the MLD_SILENT_MEMBER state and
- * are never reported in any protocol exchanges.
+ * Avoid MLD if group is :
+ * 1. Joined on loopback, OR
+ * 2. On a link that is marked MLIF_SILENT
+ * 3. rdar://problem/19227650 Is link local scoped and
+ * on cellular interface
+ * 4. Is a type that should not be reported (node local
+ * or all node link local multicast.
* All other groups enter the appropriate state machine
* for the version in use on this link.
- * A link marked as MLIF_SILENT causes MLD to be completely
- * disabled for the link.
*/
if ((ifp->if_flags & IFF_LOOPBACK) ||
(mli->mli_flags & MLIF_SILENT) ||
+ (IFNET_IS_CELLULAR(ifp) &&
+ IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) ||
!mld_is_addr_reported(&inm->in6m_addr)) {
MLD_PRINTF(("%s: not kicking state machine for silent group\n",
__func__));
* and delay sending the initial MLDv1 report
* by not transitioning to the IDLE state.
*/
- odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_SLOWHZ);
+ odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI);
if (delay) {
inm->in6m_timer = max(delay, odelay);
- current_state_timers_running6 = 1;
+ mtp->cst = 1;
} else {
inm->in6m_state = MLD_IDLE_MEMBER;
error = mld_v1_transmit_report(inm,
- MLD_LISTENER_REPORT);
+ MLD_LISTENER_REPORT);
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(mli);
if (error == 0) {
inm->in6m_timer = odelay;
- current_state_timers_running6 = 1;
+ mtp->cst = 1;
}
}
break;
IF_DRAIN(ifq);
retval = mld_v2_enqueue_group_record(ifq, inm, 1,
0, 0, (mli->mli_flags & MLIF_USEALLOW));
+ mtp->cst = (ifq->ifq_len > 0);
MLD_PRINTF(("%s: enqueue record = %d\n",
__func__, retval));
if (retval <= 0) {
/*
* Schedule transmission of pending state-change
* report up to RV times for this link. The timer
- * will fire at the next mld_fasttimo (~200ms),
+ * will fire at the next mld_timeout (1 second)),
* giving us an opportunity to merge the reports.
*
* If a delay was provided to this function, only
if (inm->in6m_sctimer > 1) {
inm->in6m_sctimer =
min(inm->in6m_sctimer, delay);
- } else
+ } else {
inm->in6m_sctimer = delay;
- } else
+ }
+ } else {
inm->in6m_sctimer = 1;
- state_change_timers_running6 = 1;
-
+ }
+ mtp->sct = 1;
error = 0;
break;
}
*/
if (syncstates) {
in6m_commit(inm);
- MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
+ MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, ifp->if_unit));
+ if_name(inm->in6m_ifp)));
}
- return (error);
+ return error;
}
/*
* Issue an intermediate state change during the life-cycle.
*/
static int
-mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli)
+mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli,
+ struct mld_tparams *mtp)
{
- struct ifnet *ifp;
- int retval;
+ struct ifnet *ifp;
+ int retval = 0;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_NOTHELD(mli);
+ VERIFY(mtp != NULL);
- MLD_PRINTF(("%s: state change for %s on ifp %p(%s%d)\n",
+ MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n",
__func__, ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp, inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
+ (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
+ if_name(inm->in6m_ifp)));
ifp = inm->in6m_ifp;
}
MLD_PRINTF(("%s: nothing to do\n", __func__));
in6m_commit(inm);
- MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
+ MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
- return (0);
+ if_name(inm->in6m_ifp)));
+ goto done;
}
IF_DRAIN(&inm->in6m_scq);
retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
(mli->mli_flags & MLIF_USEALLOW));
+ mtp->cst = (inm->in6m_scq.ifq_len > 0);
MLD_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
if (retval <= 0) {
MLI_UNLOCK(mli);
- return (-retval);
+ retval *= -1;
+ goto done;
+ } else {
+ retval = 0;
}
+
/*
* If record(s) were enqueued, start the state-change
* report timer for this group.
*/
inm->in6m_scrv = mli->mli_rv;
inm->in6m_sctimer = 1;
- state_change_timers_running6 = 1;
+ mtp->sct = 1;
MLI_UNLOCK(mli);
- return (0);
+done:
+ return retval;
}
/*
* to INCLUDE {} for immediate transmission.
*/
static void
-mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli)
+mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli,
+ struct mld_tparams *mtp)
{
int syncstates = 1;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_NOTHELD(mli);
+ VERIFY(mtp != NULL);
- MLD_PRINTF(("%s: final leave %s on ifp %p(%s%d)\n",
+ MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n",
__func__, ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp, inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
+ (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
+ if_name(inm->in6m_ifp)));
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
"mode\n", __func__);
/* NOTREACHED */
}
- mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
+ /* scheduler timer if enqueue is successful */
+ mtp->cst = (mld_v1_transmit_report(inm,
+ MLD_LISTENER_DONE) == 0);
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(mli);
/*
* Stop group timer and all pending reports.
* Immediately enqueue a state-change report
- * TO_IN {} to be sent on the next fast timeout,
+ * TO_IN {} to be sent on the next timeout,
* giving us an opportunity to merge reports.
*/
IF_DRAIN(&inm->in6m_scq);
inm->in6m_timer = 0;
inm->in6m_scrv = mli->mli_rv;
- MLD_PRINTF(("%s: Leaving %s/%s%d with %d "
+ MLD_PRINTF(("%s: Leaving %s/%s with %d "
"pending retransmissions.\n", __func__,
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit,
+ if_name(inm->in6m_ifp),
inm->in6m_scrv));
if (inm->in6m_scrv == 0) {
inm->in6m_state = MLD_NOT_MEMBER;
VERIFY(inm->in6m_nrelecnt != 0);
retval = mld_v2_enqueue_group_record(
- &inm->in6m_scq, inm, 1, 0, 0,
- (mli->mli_flags & MLIF_USEALLOW));
+ &inm->in6m_scq, inm, 1, 0, 0,
+ (mli->mli_flags & MLIF_USEALLOW));
+ mtp->cst = (inm->in6m_scq.ifq_len > 0);
KASSERT(retval != 0,
("%s: enqueue record = %d\n", __func__,
- retval));
+ retval));
inm->in6m_state = MLD_LEAVING_MEMBER;
inm->in6m_sctimer = 1;
- state_change_timers_running6 = 1;
+ mtp->sct = 1;
syncstates = 0;
}
}
if (syncstates) {
in6m_commit(inm);
- MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
+ MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
+ if_name(inm->in6m_ifp)));
inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
- MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for %p/%s%d\n",
- __func__, &inm->in6m_addr, inm->in6m_ifp->if_name,
- inm->in6m_ifp->if_unit));
+ MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(&inm->in6m_addr),
+ if_name(inm->in6m_ifp)));
}
}
const int is_state_change, const int is_group_query,
const int is_source_query, const int use_block_allow)
{
- struct mldv2_record mr;
- struct mldv2_record *pmr;
- struct ifnet *ifp;
- struct ip6_msource *ims, *nims;
- struct mbuf *m0, *m, *md;
- int error, is_filter_list_change;
- int minrec0len, m0srcs, msrcs, nbytes, off;
- int record_has_sources;
- int now;
- int type;
- uint8_t mode;
+ struct mldv2_record mr;
+ struct mldv2_record *pmr;
+ struct ifnet *ifp;
+ struct ip6_msource *ims, *nims;
+ struct mbuf *m0, *m, *md;
+ int error, is_filter_list_change;
+ int minrec0len, m0srcs, msrcs, nbytes, off;
+ int record_has_sources;
+ int now;
+ int type;
+ uint8_t mode;
IN6M_LOCK_ASSERT_HELD(inm);
MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
* the generation of source records.
*/
if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
- inm->in6m_nsrc == 0)
+ inm->in6m_nsrc == 0) {
record_has_sources = 0;
+ }
if (is_state_change) {
/*
}
} else {
type = MLD_CHANGE_TO_INCLUDE_MODE;
- if (mode == MCAST_UNDEFINED)
+ if (mode == MCAST_UNDEFINED) {
record_has_sources = 0;
+ }
}
}
} else {
/*
* Generate the filter list changes using a separate function.
*/
- if (is_filter_list_change)
- return (mld_v2_enqueue_filter_change(ifq, inm));
+ if (is_filter_list_change) {
+ return mld_v2_enqueue_filter_change(ifq, inm);
+ }
if (type == MLD_DO_NOTHING) {
- MLD_PRINTF(("%s: nothing to do for %s/%s%d\n",
+ MLD_PRINTF(("%s: nothing to do for %s/%s\n",
__func__, ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
- return (0);
+ if_name(inm->in6m_ifp)));
+ return 0;
}
/*
* ideally more.
*/
minrec0len = sizeof(struct mldv2_record);
- if (record_has_sources)
+ if (record_has_sources) {
minrec0len += sizeof(struct in6_addr);
- MLD_PRINTF(("%s: queueing %s for %s/%s%d\n", __func__,
+ }
+ MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__,
mld_rec_type_to_str(type),
ip6_sprintf(&inm->in6m_addr),
- inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
+ if_name(inm->in6m_ifp)));
/*
* Check if we have a packet in the tail of the queue for this
m0 != NULL &&
(m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
(m0->m_pkthdr.len + minrec0len) <
- (ifp->if_mtu - MLD_MTUSPACE)) {
+ (ifp->if_mtu - MLD_MTUSPACE)) {
m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
- sizeof(struct mldv2_record)) /
- sizeof(struct in6_addr);
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
m = m0;
MLD_PRINTF(("%s: use existing packet\n", __func__));
} else {
if (IF_QFULL(ifq)) {
MLD_PRINTF(("%s: outbound queue full\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m = NULL;
m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
- if (!is_state_change && !is_group_query)
+ if (!is_state_change && !is_group_query) {
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL)
+ }
+ if (m == NULL) {
m = m_gethdr(M_DONTWAIT, MT_DATA);
- if (m == NULL)
- return (-ENOMEM);
+ }
+ if (m == NULL) {
+ return -ENOMEM;
+ }
+
+ mld_save_context(m, ifp);
MLD_PRINTF(("%s: allocated first packet\n", __func__));
}
mr.mr_addr = inm->in6m_addr;
in6_clearscope(&mr.mr_addr);
if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
nbytes += sizeof(struct mldv2_record);
MLD_PRINTF(("%s: node is %d\n", __func__, now));
if ((now != mode) ||
(now == mode &&
- (!use_block_allow && mode == MCAST_UNDEFINED))) {
+ (!use_block_allow && mode == MCAST_UNDEFINED))) {
MLD_PRINTF(("%s: skip node\n", __func__));
continue;
}
MLD_PRINTF(("%s: append node\n", __func__));
if (!m_append(m, sizeof(struct in6_addr),
(void *)&ims->im6s_addr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
nbytes += sizeof(struct in6_addr);
++msrcs;
- if (msrcs == m0srcs)
+ if (msrcs == m0srcs) {
break;
+ }
}
MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__,
msrcs));
if (is_source_query && msrcs == 0) {
MLD_PRINTF(("%s: no recorded sources to report\n", __func__));
- if (m != m0)
+ if (m != m0) {
m_freem(m);
- return (0);
+ }
+ return 0;
}
/*
if (m != m0) {
MLD_PRINTF(("%s: enqueueing first packet\n", __func__));
m->m_pkthdr.vt_nrecs = 1;
- m->m_pkthdr.rcvif = ifp;
IF_ENQUEUE(ifq, m);
} else {
m->m_pkthdr.vt_nrecs++;
/*
* No further work needed if no source list in packet(s).
*/
- if (!record_has_sources)
- return (nbytes);
+ if (!record_has_sources) {
+ return nbytes;
+ }
/*
* Whilst sources remain to be announced, we need to allocate
while (nims != NULL) {
if (IF_QFULL(ifq)) {
MLD_PRINTF(("%s: outbound queue full\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL)
+ if (m == NULL) {
m = m_gethdr(M_DONTWAIT, MT_DATA);
- if (m == NULL)
- return (-ENOMEM);
+ }
+ if (m == NULL) {
+ return -ENOMEM;
+ }
+ mld_save_context(m, ifp);
md = m_getptr(m, 0, &off);
pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
MLD_PRINTF(("%s: allocated next packet\n", __func__));
if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n", __func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m->m_pkthdr.vt_nrecs = 1;
nbytes += sizeof(struct mldv2_record);
now = im6s_get_mode(inm, ims, 1);
if ((now != mode) ||
(now == mode &&
- (!use_block_allow && mode == MCAST_UNDEFINED))) {
+ (!use_block_allow && mode == MCAST_UNDEFINED))) {
MLD_PRINTF(("%s: skip node\n", __func__));
continue;
}
MLD_PRINTF(("%s: append node\n", __func__));
if (!m_append(m, sizeof(struct in6_addr),
(void *)&ims->im6s_addr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed.\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
++msrcs;
- if (msrcs == m0srcs)
+ if (msrcs == m0srcs) {
break;
+ }
}
pmr->mr_numsrc = htons(msrcs);
nbytes += (msrcs * sizeof(struct in6_addr));
MLD_PRINTF(("%s: enqueueing next packet\n", __func__));
- m->m_pkthdr.rcvif = ifp;
IF_ENQUEUE(ifq, m);
}
- return (nbytes);
+ return nbytes;
}
/*
* current filter modes on each ip_msource node.
*/
typedef enum {
- REC_NONE = 0x00, /* MCAST_UNDEFINED */
- REC_ALLOW = 0x01, /* MCAST_INCLUDE */
- REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
+ REC_NONE = 0x00, /* MCAST_UNDEFINED */
+ REC_ALLOW = 0x01, /* MCAST_INCLUDE */
+ REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
REC_FULL = REC_ALLOW | REC_BLOCK
} rectype_t;
{
static const int MINRECLEN =
sizeof(struct mldv2_record) + sizeof(struct in6_addr);
- struct ifnet *ifp;
- struct mldv2_record mr;
- struct mldv2_record *pmr;
- struct ip6_msource *ims, *nims;
- struct mbuf *m, *m0, *md;
- int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
- int nallow, nblock;
- uint8_t mode, now, then;
- rectype_t crt, drt, nrt;
+ struct ifnet *ifp;
+ struct mldv2_record mr;
+ struct mldv2_record *pmr;
+ struct ip6_msource *ims, *nims;
+ struct mbuf *m, *m0, *md;
+ int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
+ int nallow, nblock;
+ uint8_t mode, now, then;
+ rectype_t crt, drt, nrt;
IN6M_LOCK_ASSERT_HELD(inm);
if (inm->in6m_nsrc == 0 ||
- (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
- return (0);
-
- ifp = inm->in6m_ifp; /* interface */
- mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
- crt = REC_NONE; /* current group record type */
- drt = REC_NONE; /* mask of completed group record types */
- nrt = REC_NONE; /* record type for current node */
- m0srcs = 0; /* # source which will fit in current mbuf chain */
- npbytes = 0; /* # of bytes appended this packet */
- nbytes = 0; /* # of bytes appended to group's state-change queue */
- rsrcs = 0; /* # sources encoded in current record */
- schanged = 0; /* # nodes encoded in overall filter change */
- nallow = 0; /* # of source entries in ALLOW_NEW */
- nblock = 0; /* # of source entries in BLOCK_OLD */
- nims = NULL; /* next tree node pointer */
+ (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) {
+ return 0;
+ }
+
+ ifp = inm->in6m_ifp; /* interface */
+ mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
+ crt = REC_NONE; /* current group record type */
+ drt = REC_NONE; /* mask of completed group record types */
+ nrt = REC_NONE; /* record type for current node */
+ m0srcs = 0; /* # source which will fit in current mbuf chain */
+ npbytes = 0; /* # of bytes appended this packet */
+ nbytes = 0; /* # of bytes appended to group's state-change queue */
+ rsrcs = 0; /* # sources encoded in current record */
+ schanged = 0; /* # nodes encoded in overall filter change */
+ nallow = 0; /* # of source entries in ALLOW_NEW */
+ nblock = 0; /* # of source entries in BLOCK_OLD */
+ nims = NULL; /* next tree node pointer */
/*
* For each possible filter record mode.
m0 = ifq->ifq_tail;
if (m0 != NULL &&
(m0->m_pkthdr.vt_nrecs + 1 <=
- MLD_V2_REPORT_MAXRECS) &&
+ MLD_V2_REPORT_MAXRECS) &&
(m0->m_pkthdr.len + MINRECLEN) <
- (ifp->if_mtu - MLD_MTUSPACE)) {
+ (ifp->if_mtu - MLD_MTUSPACE)) {
m = m0;
m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
- sizeof(struct mldv2_record)) /
- sizeof(struct in6_addr);
+ sizeof(struct mldv2_record)) /
+ sizeof(struct in6_addr);
MLD_PRINTF(("%s: use previous packet\n",
__func__));
} else {
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL)
+ if (m == NULL) {
m = m_gethdr(M_DONTWAIT, MT_DATA);
+ }
if (m == NULL) {
MLD_PRINTF(("%s: m_get*() failed\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
m->m_pkthdr.vt_nrecs = 0;
+ mld_save_context(m, ifp);
m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
sizeof(struct mldv2_record)) /
sizeof(struct in6_addr);
mr.mr_addr = inm->in6m_addr;
in6_clearscope(&mr.mr_addr);
if (!m_append(m, sizeof(mr), (void *)&mr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
npbytes += sizeof(struct mldv2_record);
if (m != m0) {
continue;
}
nrt = (rectype_t)now;
- if (nrt == REC_NONE)
+ if (nrt == REC_NONE) {
nrt = (rectype_t)(~mode & REC_FULL);
+ }
if (schanged++ == 0) {
crt = nrt;
- } else if (crt != nrt)
+ } else if (crt != nrt) {
continue;
+ }
if (!m_append(m, sizeof(struct in6_addr),
(void *)&ims->im6s_addr)) {
- if (m != m0)
+ if (m != m0) {
m_freem(m);
+ }
MLD_PRINTF(("%s: m_append() failed\n",
__func__));
- return (-ENOMEM);
+ return -ENOMEM;
}
nallow += !!(crt == REC_ALLOW);
nblock += !!(crt == REC_BLOCK);
- if (++rsrcs == m0srcs)
+ if (++rsrcs == m0srcs) {
break;
+ }
}
/*
* If we did not append any tree nodes on this
MLD_PRINTF(("%s: m_adj(m, -mr)\n",
__func__));
m_adj(m, -((int)sizeof(
- struct mldv2_record)));
+ struct mldv2_record)));
}
continue;
}
npbytes += (rsrcs * sizeof(struct in6_addr));
- if (crt == REC_ALLOW)
+ if (crt == REC_ALLOW) {
pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
- else if (crt == REC_BLOCK)
+ } else if (crt == REC_BLOCK) {
pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
+ }
pmr->mr_numsrc = htons(rsrcs);
/*
* Count the new group record, and enqueue this
* packet if it wasn't already queued.
*/
m->m_pkthdr.vt_nrecs++;
- m->m_pkthdr.rcvif = ifp;
- if (m != m0)
+ if (m != m0) {
IF_ENQUEUE(ifq, m);
+ }
nbytes += npbytes;
} while (nims != NULL);
drt |= crt;
MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
nallow, nblock));
- return (nbytes);
+ return nbytes;
}
static int
mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
{
- struct ifqueue *gq;
- struct mbuf *m; /* pending state-change */
- struct mbuf *m0; /* copy of pending state-change */
- struct mbuf *mt; /* last state-change in packet */
- struct mbuf *n;
- int docopy, domerge;
- u_int recslen;
+ struct ifqueue *gq;
+ struct mbuf *m; /* pending state-change */
+ struct mbuf *m0; /* copy of pending state-change */
+ struct mbuf *mt; /* last state-change in packet */
+ struct mbuf *n;
+ int docopy, domerge;
+ u_int recslen;
IN6M_LOCK_ASSERT_HELD(inm);
* If there are further pending retransmissions, make a writable
* copy of each queued state-change message before merging.
*/
- if (inm->in6m_scrv > 0)
+ if (inm->in6m_scrv > 0) {
docopy = 1;
+ }
gq = &inm->in6m_scq;
#ifdef MLD_DEBUG
if (gq->ifq_head == NULL) {
- MLD_PRINTF(("%s: WARNING: queue for inm %p is empty\n",
- __func__, inm));
+ MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)));
}
#endif
m->m_pkthdr.vt_nrecs <=
MLD_V2_REPORT_MAXRECS) &&
(mt->m_pkthdr.len + recslen <=
- (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
+ (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) {
domerge = 1;
+ }
}
if (!domerge && IF_QFULL(gq)) {
MLD_PRINTF(("%s: outbound queue full, skipping whole "
- "packet %p\n", __func__, m));
+ "packet 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
n = m->m_nextpkt;
if (!docopy) {
IF_REMQUEUE(gq, m);
}
if (!docopy) {
- MLD_PRINTF(("%s: dequeueing %p\n", __func__, m));
+ MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
n = m->m_nextpkt;
IF_REMQUEUE(gq, m);
m0 = m;
m = n;
} else {
- MLD_PRINTF(("%s: copying %p\n", __func__, m));
+ MLD_PRINTF(("%s: copying 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
m0 = m_dup(m, M_NOWAIT);
- if (m0 == NULL)
- return (ENOMEM);
+ if (m0 == NULL) {
+ return ENOMEM;
+ }
m0->m_nextpkt = NULL;
m = m->m_nextpkt;
}
if (!domerge) {
- MLD_PRINTF(("%s: queueing %p to ifscq %p)\n",
- __func__, m0, ifscq));
- m0->m_pkthdr.rcvif = inm->in6m_ifp;
+ MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
IF_ENQUEUE(ifscq, m0);
} else {
- struct mbuf *mtl; /* last mbuf of packet mt */
+ struct mbuf *mtl; /* last mbuf of packet mt */
- MLD_PRINTF(("%s: merging %p with ifscq tail %p)\n",
- __func__, m0, mt));
+ MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
+ "0x%llx)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m0),
+ (uint64_t)VM_KERNEL_ADDRPERM(mt)));
mtl = m_last(mt);
m0->m_flags &= ~M_PKTHDR;
}
}
- return (0);
+ return 0;
}
/*
* Respond to a pending MLDv2 General Query.
*/
-static void
+static uint32_t
mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
{
- struct ifnet *ifp;
- struct in6_multi *inm;
- struct in6_multistep step;
- int retval;
+ struct ifnet *ifp;
+ struct in6_multi *inm;
+ struct in6_multistep step;
+ int retval;
MLI_LOCK_ASSERT_HELD(mli);
IN6_FIRST_MULTI(step, inm);
while (inm != NULL) {
IN6M_LOCK(inm);
- if (inm->in6m_ifp != ifp)
+ if (inm->in6m_ifp != ifp) {
goto next;
+ }
switch (inm->in6m_state) {
case MLD_NOT_MEMBER:
in6_multihead_lock_done();
MLI_LOCK(mli);
- mld_dispatch_queue(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
+ mld_dispatch_queue_locked(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
MLI_LOCK_ASSERT_HELD(mli);
/*
- * Slew transmission of bursts over 500ms intervals.
+ * Slew transmission of bursts over 1 second intervals.
*/
if (mli->mli_gq.ifq_head != NULL) {
mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
- MLD_RESPONSE_BURST_INTERVAL);
- interface_timers_running6 = 1;
+ MLD_RESPONSE_BURST_INTERVAL);
}
+
+ return mli->mli_v2_timer;
}
/*
static void
mld_dispatch_packet(struct mbuf *m)
{
- struct ip6_moptions *im6o;
- struct ifnet *ifp;
- struct ifnet *oifp = NULL;
- struct mbuf *m0;
- struct mbuf *md;
- struct ip6_hdr *ip6;
- struct mld_hdr *mld;
- int error;
- int off;
- int type;
-
- MLD_PRINTF(("%s: transmit %p\n", __func__, m));
+ struct ip6_moptions *im6o;
+ struct ifnet *ifp;
+ struct ifnet *oifp = NULL;
+ struct mbuf *m0;
+ struct mbuf *md;
+ struct ip6_hdr *ip6;
+ struct mld_hdr *mld;
+ int error;
+ int off;
+ int type;
+
+ MLD_PRINTF(("%s: transmit 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
/*
* Check if the ifnet is still attached.
*/
- ifp = m->m_pkthdr.rcvif;
+ ifp = mld_restore_context(m);
if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
- MLD_PRINTF(("%s: dropped %p as ifindex %u went away.\n",
- __func__, m, (u_int)if_index));
+ MLD_PRINTF(("%s: dropped 0x%llx as ifindex %u went away.\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(m),
+ (u_int)if_index));
m_freem(m);
ip6stat.ip6s_noroute++;
return;
}
im6o->im6o_multicast_hlim = 1;
-#if MROUTING
- im6o->im6o_multicast_loop = (ip6_mrouter != NULL);
-#else
im6o->im6o_multicast_loop = 0;
-#endif
im6o->im6o_multicast_ifp = ifp;
if (m->m_flags & M_MLDV1) {
} else {
m0 = mld_v2_encap_report(ifp, m);
if (m0 == NULL) {
- MLD_PRINTF(("%s: dropped %p\n", __func__, m));
+ MLD_PRINTF(("%s: dropped 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
/*
* mld_v2_encap_report() has already freed our mbuf.
*/
}
}
+ mld_scrub_context(m0);
m->m_flags &= ~(M_PROTOFLAGS);
m0->m_pkthdr.rcvif = lo_ifp;
ip6 = mtod(m0, struct ip6_hdr *);
-#if 0
- (void) in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
-#else
- /*
- * XXX XXX Break some KPI rules to prevent an LOR which would
- * occur if we called in6_setscope() at transmission.
- * See comments at top of file.
- */
- MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
-#endif
+ (void) in6_setscope(&ip6->ip6_dst, ifp, NULL);
/*
* Retrieve the ICMPv6 type before handoff to ip6_output(),
mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
type = mld->mld_type;
+ if (ifp->if_eflags & IFEF_TXSTART) {
+ /*
+ * Use control service class if the outgoing
+ * interface supports transmit-start model.
+ */
+ (void) m_set_service_class(m0, MBUF_SC_CTL);
+ }
+
error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o,
&oifp, NULL);
IM6O_REMREF(im6o);
if (error) {
- MLD_PRINTF(("%s: ip6_output(%p) = %d\n", __func__, m0, error));
- if (oifp != NULL)
+ MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m0), error));
+ if (oifp != NULL) {
ifnet_release(oifp);
+ }
return;
}
static struct mbuf *
mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
{
- struct mbuf *mh;
- struct mldv2_report *mld;
- struct ip6_hdr *ip6;
- struct in6_ifaddr *ia;
- int mldreclen;
+ struct mbuf *mh;
+ struct mldv2_report *mld;
+ struct ip6_hdr *ip6;
+ struct in6_ifaddr *ia;
+ int mldreclen;
VERIFY(m->m_flags & M_PKTHDR);
/*
* RFC3590: OK to send as :: or tentative during DAD.
*/
- ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
- if (ia == NULL)
+ ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST);
+ if (ia == NULL) {
MLD_PRINTF(("%s: warning: ia is NULL\n", __func__));
+ }
MGETHDR(mh, M_DONTWAIT, MT_HEADER);
if (mh == NULL) {
- if (ia != NULL)
+ if (ia != NULL) {
IFA_REMREF(&ia->ia_ifa);
+ }
m_freem(m);
- return (NULL);
+ return NULL;
}
MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
ip6->ip6_vfc |= IPV6_VERSION;
ip6->ip6_nxt = IPPROTO_ICMPV6;
- if (ia != NULL)
+ if (ia != NULL) {
IFA_LOCK(&ia->ia_ifa);
+ }
ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
if (ia != NULL) {
IFA_UNLOCK(&ia->ia_ifa);
mh->m_next = m;
mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
- return (mh);
+ return mh;
}
#ifdef MLD_DEBUG
mld_rec_type_to_str(const int type)
{
switch (type) {
- case MLD_CHANGE_TO_EXCLUDE_MODE:
- return "TO_EX";
- break;
- case MLD_CHANGE_TO_INCLUDE_MODE:
- return "TO_IN";
- break;
- case MLD_MODE_IS_EXCLUDE:
- return "MODE_EX";
- break;
- case MLD_MODE_IS_INCLUDE:
- return "MODE_IN";
- break;
- case MLD_ALLOW_NEW_SOURCES:
- return "ALLOW_NEW";
- break;
- case MLD_BLOCK_OLD_SOURCES:
- return "BLOCK_OLD";
- break;
- default:
- break;
+ case MLD_CHANGE_TO_EXCLUDE_MODE:
+ return "TO_EX";
+ case MLD_CHANGE_TO_INCLUDE_MODE:
+ return "TO_IN";
+ case MLD_MODE_IS_EXCLUDE:
+ return "MODE_EX";
+ case MLD_MODE_IS_INCLUDE:
+ return "MODE_IN";
+ case MLD_ALLOW_NEW_SOURCES:
+ return "ALLOW_NEW";
+ case MLD_BLOCK_OLD_SOURCES:
+ return "BLOCK_OLD";
+ default:
+ break;
}
return "unknown";
}
void
mld_init(void)
{
-
MLD_PRINTF(("%s: initializing\n", __func__));
- /* Setup lock group and attribute for mld6_mtx */
- mld_mtx_grp_attr = lck_grp_attr_alloc_init();
- mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr);
- mld_mtx_attr = lck_attr_alloc_init();
- lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr);
+ /* Setup lock group and attribute for mld_mtx */
+ mld_mtx_grp_attr = lck_grp_attr_alloc_init();
+ mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr);
+ mld_mtx_attr = lck_attr_alloc_init();
+ lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr);
ip6_initpktopts(&mld_po);
mld_po.ip6po_hlim = 1;
mld_po.ip6po_flags = IP6PO_DONTFRAG;
LIST_INIT(&mli_head);
- mli_size = sizeof (struct mld_ifinfo);
+ mli_size = sizeof(struct mld_ifinfo);
mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size,
0, MLI_ZONE_NAME);
if (mli_zone == NULL) {