/*
- * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <netinet/igmp_var.h>
#include <netinet/kpi_ipfilter_var.h>
-#ifdef IGMP_DEBUG
-__inline__ char *
-inet_ntoa(struct in_addr ina)
-{
- static char buf[4*sizeof "123"];
- unsigned char *ucp = (unsigned char *)&ina;
-
- snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
- ucp[0] & 0xff,
- ucp[1] & 0xff,
- ucp[2] & 0xff,
- ucp[3] & 0xff);
- return buf;
-}
-#endif
-
SLIST_HEAD(igmp_inm_relhead, in_multi);
static void igi_initvar(struct igmp_ifinfo *, struct ifnet *, int);
static void igi_free(struct igmp_ifinfo *);
static void igi_delete(const struct ifnet *, struct igmp_inm_relhead *);
static void igmp_dispatch_queue(struct igmp_ifinfo *, struct ifqueue *,
- int, const int, struct ifnet *);
-static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *);
+ int, const int);
+static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *,
+ struct igmp_tparams *);
static int igmp_handle_state_change(struct in_multi *,
- struct igmp_ifinfo *);
-static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *);
+ struct igmp_ifinfo *, struct igmp_tparams *);
+static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *,
+ struct igmp_tparams *);
static int igmp_input_v1_query(struct ifnet *, const struct ip *,
const struct igmp *);
static int igmp_input_v2_query(struct ifnet *, const struct ip *,
const struct igmp *);
static int igmp_input_v3_query(struct ifnet *, const struct ip *,
/*const*/ struct igmpv3 *);
-static int igmp_input_v3_group_query(struct in_multi *,
+static int igmp_input_v3_group_query(struct in_multi *,
int, /*const*/ struct igmpv3 *);
-static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
- /*const*/ struct igmp *);
-static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
- /*const*/ struct igmp *);
-void igmp_sendpkt(struct mbuf *, struct ifnet *);
+static int igmp_input_v1_report(struct ifnet *, struct mbuf *,
+ /*const*/ struct ip *, /*const*/ struct igmp *);
+static int igmp_input_v2_report(struct ifnet *, struct mbuf *,
+ /*const*/ struct ip *, /*const*/ struct igmp *);
+static void igmp_sendpkt(struct mbuf *);
static __inline__ int igmp_isgroupreported(const struct in_addr);
-static struct mbuf *
- igmp_ra_alloc(void);
+static struct mbuf *igmp_ra_alloc(void);
#ifdef IGMP_DEBUG
-static const char * igmp_rec_type_to_str(const int);
+static const char *igmp_rec_type_to_str(const int);
#endif
-static void igmp_set_version(struct igmp_ifinfo *, const int);
+static uint32_t igmp_set_version(struct igmp_ifinfo *, const int);
static void igmp_flush_relq(struct igmp_ifinfo *,
struct igmp_inm_relhead *);
static int igmp_v1v2_queue_report(struct in_multi *, const int);
static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *);
-static void igmp_v2_update_group(struct in_multi *, const int);
+static uint32_t igmp_v2_update_group(struct in_multi *, const int);
static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
-static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
+static uint32_t igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
static struct mbuf *
igmp_v3_encap_report(struct ifnet *, struct mbuf *);
static int igmp_v3_enqueue_group_record(struct ifqueue *,
static int sysctl_igmp_gsr SYSCTL_HANDLER_ARGS;
static int sysctl_igmp_default_version SYSCTL_HANDLER_ARGS;
-struct mbuf *m_raopt; /* Router Alert option */
+static int igmp_timeout_run; /* IGMP timer is scheduled to run */
+static void igmp_timeout(void *);
+static void igmp_sched_timeout(void);
+
+static struct mbuf *m_raopt; /* Router Alert option */
+static int querier_present_timers_running; /* IGMPv1/v2 older version
+ * querier present */
static int interface_timers_running; /* IGMPv3 general
- * query response */
+ * query response */
static int state_change_timers_running; /* IGMPv3 state-change
- * retransmit */
+ * retransmit */
static int current_state_timers_running; /* IGMPv1/v2 host
* report; IGMPv3 g/sg
* query response */
+/*
+ * Subsystem lock macros.
+ */
+#define IGMP_LOCK() \
+ lck_mtx_lock(&igmp_mtx)
+#define IGMP_LOCK_ASSERT_HELD() \
+ lck_mtx_assert(&igmp_mtx, LCK_MTX_ASSERT_OWNED)
+#define IGMP_LOCK_ASSERT_NOTHELD() \
+ lck_mtx_assert(&igmp_mtx, LCK_MTX_ASSERT_NOTOWNED)
+#define IGMP_UNLOCK() \
+ lck_mtx_unlock(&igmp_mtx)
+
static LIST_HEAD(, igmp_ifinfo) igi_head;
static struct igmpstat_v3 igmpstat_v3 = {
.igps_version = IGPS_VERSION_3,
static unsigned int igi_size; /* size of zone element */
static struct zone *igi_zone; /* zone for igmp_ifinfo */
+/* Store IGMPv3 record count in the module private scratch space */
+#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
+
+static __inline void
+igmp_save_context(struct mbuf *m, struct ifnet *ifp)
+{
+ m->m_pkthdr.rcvif = ifp;
+}
+
+static __inline void
+igmp_scrub_context(struct mbuf *m)
+{
+ m->m_pkthdr.rcvif = NULL;
+}
+
#ifdef IGMP_DEBUG
-static __inline char *
-inet_ntoa_haddr(in_addr_t haddr)
+static __inline const char *
+inet_ntop_haddr(in_addr_t haddr, char *buf, socklen_t size)
{
struct in_addr ia;
ia.s_addr = htonl(haddr);
- return (inet_ntoa(ia));
+ return (inet_ntop(AF_INET, &ia, buf, size));
}
#endif
+
+/*
+ * Restore context from a queued IGMP output chain.
+ * Return saved ifp.
+ */
+static __inline struct ifnet *
+igmp_restore_context(struct mbuf *m)
+{
+ return (m->m_pkthdr.rcvif);
+}
+
/*
* Retrieve or set default IGMP version.
*/
int error;
int new;
- lck_mtx_lock(&igmp_mtx);
+ IGMP_LOCK();
error = SYSCTL_OUT(req, arg1, sizeof(int));
if (error || !req->newptr)
goto out_locked;
}
- IGMP_PRINTF(("change igmp_default_version from %d to %d\n",
- igmp_default_version, new));
+ IGMP_PRINTF(("%s: change igmp_default_version from %d to %d\n",
+ __func__, igmp_default_version, new));
igmp_default_version = new;
out_locked:
- lck_mtx_unlock(&igmp_mtx);
+ IGMP_UNLOCK();
return (error);
}
int error;
int i;
- lck_mtx_lock(&igmp_mtx);
+ IGMP_LOCK();
i = igmp_gsrdelay.tv_sec;
igmp_gsrdelay.tv_sec = i;
out_locked:
- lck_mtx_unlock(&igmp_mtx);
+ IGMP_UNLOCK();
return (error);
}
if (namelen != 1)
return (EINVAL);
- lck_mtx_lock(&igmp_mtx);
+ IGMP_LOCK();
if (name[0] <= 0 || name[0] > (u_int)if_index) {
error = ENOENT;
}
out_locked:
- lck_mtx_unlock(&igmp_mtx);
+ IGMP_UNLOCK();
return (error);
}
*/
static void
igmp_dispatch_queue(struct igmp_ifinfo *igi, struct ifqueue *ifq, int limit,
- const int loop, struct ifnet *ifp)
+ const int loop)
{
struct mbuf *m;
struct ip *ip;
IF_DEQUEUE(ifq, m);
if (m == NULL)
break;
- IGMP_PRINTF(("%s: dispatch %p from %p\n", __func__, ifq, m));
+ IGMP_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifq),
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
ip = mtod(m, struct ip *);
if (loop)
m->m_flags |= M_IGMP_LOOP;
if (igi != NULL)
IGI_UNLOCK(igi);
- igmp_sendpkt(m, ifp);
+ igmp_sendpkt(m);
if (igi != NULL)
IGI_LOCK(igi);
if (--limit == 0)
{
struct igmp_ifinfo *igi;
- IGMP_PRINTF(("%s: called for ifp %p(%s)\n",
- __func__, ifp, ifp->if_name));
+ IGMP_PRINTF(("%s: called for ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name));
igi = igi_alloc(how);
if (igi == NULL)
return (NULL);
- lck_mtx_lock(&igmp_mtx);
+ IGMP_LOCK();
IGI_LOCK(igi);
igi_initvar(igi, ifp, 0);
IGI_ADDREF_LOCKED(igi); /* hold a reference for igi_head */
IGI_ADDREF_LOCKED(igi); /* hold a reference for caller */
IGI_UNLOCK(igi);
+ ifnet_lock_shared(ifp);
+ igmp_initsilent(ifp, igi);
+ ifnet_lock_done(ifp);
LIST_INSERT_HEAD(&igi_head, igi, igi_link);
- lck_mtx_unlock(&igmp_mtx);
+ IGMP_UNLOCK();
- IGMP_PRINTF(("allocate igmp_ifinfo for ifp %p(%s)\n",
- ifp, ifp->if_name));
+ IGMP_PRINTF(("%s: allocate igmp_ifinfo for ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name));
return (igi);
}
{
struct ifnet *ifp;
- lck_mtx_lock(&igmp_mtx);
+ IGMP_LOCK();
IGI_LOCK(igi);
VERIFY(!(igi->igi_debug & IFD_ATTACHED));
igi->igi_debug |= IFD_ATTACHED;
IGI_ADDREF_LOCKED(igi); /* hold a reference for igi_head */
IGI_UNLOCK(igi);
+ ifnet_lock_shared(ifp);
+ igmp_initsilent(ifp, igi);
+ ifnet_lock_done(ifp);
LIST_INSERT_HEAD(&igi_head, igi, igi_link);
- lck_mtx_unlock(&igmp_mtx);
+ IGMP_UNLOCK();
- IGMP_PRINTF(("reattached igmp_ifinfo for ifp %p(%s)\n",
- ifp, ifp->if_name));
+ IGMP_PRINTF(("%s: reattached igmp_ifinfo for ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name));
}
/*
SLIST_INIT(&inm_dthead);
- IGMP_PRINTF(("%s: called for ifp %p(%s%d)\n",
- __func__, ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: called for ifp 0x%llx(%s%d)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name, ifp->if_unit));
- lck_mtx_lock(&igmp_mtx);
+ IGMP_LOCK();
igi_delete(ifp, (struct igmp_inm_relhead *)&inm_dthead);
- lck_mtx_unlock(&igmp_mtx);
+ IGMP_UNLOCK();
/* Now that we're dropped all locks, release detached records */
IGMP_REMOVE_DETACHED_INM(&inm_dthead);
{
struct igmp_ifinfo *igi, *tigi;
- lck_mtx_assert(&igmp_mtx, LCK_MTX_ASSERT_OWNED);
+ IGMP_LOCK_ASSERT_HELD();
LIST_FOREACH_SAFE(igi, &igi_head, igi_link, tigi) {
IGI_LOCK(igi);
}
IGI_UNLOCK(igi);
}
- panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp);
+ panic("%s: igmp_ifinfo not found for ifp %p(%s)\n", __func__,
+ ifp, ifp->if_xname);
+}
+
+__private_extern__ void
+igmp_initsilent(struct ifnet *ifp, struct igmp_ifinfo *igi)
+{
+ ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
+
+ IGI_LOCK_ASSERT_NOTHELD(igi);
+ IGI_LOCK(igi);
+ if (!(ifp->if_flags & IFF_MULTICAST))
+ igi->igi_flags |= IGIF_SILENT;
+ else
+ igi->igi_flags &= ~IGIF_SILENT;
+ IGI_UNLOCK(igi);
}
static void
igi->igi_qri = IGMP_QRI_INIT;
igi->igi_uri = IGMP_URI_INIT;
- /* ifnet is not yet attached; no need to hold ifnet lock */
- if (!(ifp->if_flags & IFF_MULTICAST))
- igi->igi_flags |= IGIF_SILENT;
-
if (!reattach)
SLIST_INIT(&igi->igi_relinmhead);
/* Now that we're dropped all locks, release detached records */
IGMP_REMOVE_DETACHED_INM(&inm_dthead);
- IGMP_PRINTF(("%s: freeing igmp_ifinfo for ifp %p(%s%d)\n",
- __func__, ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: freeing igmp_ifinfo for ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
igi_free(igi);
}
struct igmp_ifinfo *igi;
struct in_multi *inm;
struct in_multistep step;
+ struct igmp_tparams itp = { 0, 0, 0, 0 };
+
+ IGMP_LOCK_ASSERT_NOTHELD();
/*
* IGMPv1 Host Membership Queries SHOULD always be addressed to
if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
IGMPSTAT_INC(igps_rcv_badqueries);
OIGMPSTAT_INC(igps_rcv_badqueries);
- return (0);
+ goto done;
}
IGMPSTAT_INC(igps_rcv_gen_queries);
IGI_LOCK(igi);
if (igi->igi_flags & IGIF_LOOPBACK) {
- IGMP_PRINTF(("ignore v1 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: ignore v1 query on IGIF_LOOPBACK "
+ "ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
IGI_UNLOCK(igi);
- return (0);
+ goto done;
}
/*
* Switch to IGMPv1 host compatibility mode.
*/
- igmp_set_version(igi, IGMP_VERSION_1);
+ itp.qpt = igmp_set_version(igi, IGMP_VERSION_1);
IGI_UNLOCK(igi);
- IGMP_PRINTF(("process v1 query on ifp %p(%s%d)\n", ifp, ifp->if_name,
- ifp->if_unit));
+ IGMP_PRINTF(("%s: process v1 query on ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* Start the timers in all of our group records
IN_FIRST_MULTI(step, inm);
while (inm != NULL) {
INM_LOCK(inm);
- if (inm->inm_ifp != ifp)
- goto next;
- if (inm->inm_timer != 0)
+ if (inm->inm_ifp != ifp || inm->inm_timer != 0)
goto next;
switch (inm->inm_state) {
case IGMP_SLEEPING_MEMBER:
case IGMP_AWAKENING_MEMBER:
inm->inm_state = IGMP_REPORTING_MEMBER;
- inm->inm_timer = IGMP_RANDOM_DELAY(
- IGMP_V1V2_MAX_RI * PR_SLOWHZ);
- current_state_timers_running = 1;
+ inm->inm_timer = IGMP_RANDOM_DELAY(IGMP_V1V2_MAX_RI);
+ itp.cst = 1;
break;
case IGMP_LEAVING_MEMBER:
break;
IN_NEXT_MULTI(step, inm);
}
in_multihead_lock_done();
+done:
+ igmp_set_timeout(&itp);
return (0);
}
struct in_multi *inm;
int is_general_query;
uint16_t timer;
+ struct igmp_tparams itp = { 0, 0, 0, 0 };
+
+ IGMP_LOCK_ASSERT_NOTHELD();
is_general_query = 0;
* If this was not sent to the all-hosts group, ignore it.
*/
if (!in_allhosts(ip->ip_dst))
- return (0);
+ goto done;
IGMPSTAT_INC(igps_rcv_gen_queries);
is_general_query = 1;
} else {
IGI_LOCK(igi);
if (igi->igi_flags & IGIF_LOOPBACK) {
- IGMP_PRINTF(("ignore v2 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: ignore v2 query on IGIF_LOOPBACK "
+ "ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
IGI_UNLOCK(igi);
- return(0);
+ goto done;
}
/*
* Ignore v2 query if in v1 Compatibility Mode.
*/
if (igi->igi_version == IGMP_VERSION_1) {
IGI_UNLOCK(igi);
- return (0);
+ goto done;
}
- igmp_set_version(igi, IGMP_VERSION_2);
+ itp.qpt = igmp_set_version(igi, IGMP_VERSION_2);
IGI_UNLOCK(igi);
- timer = igmp->igmp_code * PR_SLOWHZ / IGMP_TIMER_SCALE;
+ timer = igmp->igmp_code / IGMP_TIMER_SCALE;
if (timer == 0)
timer = 1;
if (is_general_query) {
struct in_multistep step;
- IGMP_PRINTF(("process v2 general query on ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* For each reporting group joined on this
* interface, kick the report timer.
while (inm != NULL) {
INM_LOCK(inm);
if (inm->inm_ifp == ifp)
- igmp_v2_update_group(inm, timer);
+ itp.cst += igmp_v2_update_group(inm, timer);
INM_UNLOCK(inm);
IN_NEXT_MULTI(step, inm);
}
in_multihead_lock_done();
if (inm != NULL) {
INM_LOCK(inm);
- IGMP_PRINTF(("process v2 query %s on ifp %p(%s%d)\n",
- inet_ntoa(igmp->igmp_group), ifp, ifp->if_name,
- ifp->if_unit));
- igmp_v2_update_group(inm, timer);
+ IGMP_INET_PRINTF(igmp->igmp_group,
+ ("process v2 query %s on ifp 0x%llx(%s)\n",
+ _igmp_inet_buf,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ itp.cst = igmp_v2_update_group(inm, timer);
INM_UNLOCK(inm);
INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
}
}
+done:
+ igmp_set_timeout(&itp);
return (0);
}
* Unlike IGMPv3, the delay per group should be jittered
* to avoid bursts of IGMPv2 reports.
*/
-static void
+static uint32_t
igmp_v2_update_group(struct in_multi *inm, const int timer)
{
- IGMP_PRINTF(("%s: %s/%s%d timer=%d\n", __func__,
- inet_ntoa(inm->inm_addr), inm->inm_ifp->if_name,
- inm->inm_ifp->if_unit, timer));
+ IGMP_INET_PRINTF(inm->inm_addr, ("%s: %s/%s timer=%d\n",
+ __func__, _igmp_inet_buf, if_name(inm->inm_ifp),
+ timer));
INM_LOCK_ASSERT_HELD(inm);
IGMP_PRINTF(("%s: ->REPORTING\n", __func__));
inm->inm_state = IGMP_REPORTING_MEMBER;
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
- current_state_timers_running = 1;
break;
case IGMP_SLEEPING_MEMBER:
IGMP_PRINTF(("%s: ->AWAKENING\n", __func__));
case IGMP_LEAVING_MEMBER:
break;
}
+
+ return (inm->inm_timer);
}
/*
uint32_t maxresp, nsrc, qqi;
uint16_t timer;
uint8_t qrv;
+ struct igmp_tparams itp = { 0, 0, 0, 0 };
+
+ IGMP_LOCK_ASSERT_NOTHELD();
is_general_query = 0;
- IGMP_PRINTF(("process v3 query on ifp %p(%s%d)\n", ifp, ifp->if_name,
- ifp->if_unit));
+ IGMP_PRINTF(("%s: process v3 query on ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
if (maxresp >= 128) {
(IGMP_EXP(igmpv3->igmp_qqi) + 3);
}
- timer = maxresp * PR_SLOWHZ / IGMP_TIMER_SCALE;
+ timer = maxresp / IGMP_TIMER_SCALE;
if (timer == 0)
timer = 1;
if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
IGMPSTAT_INC(igps_rcv_badqueries);
OIGMPSTAT_INC(igps_rcv_badqueries);
- return (0);
+ goto done;
}
is_general_query = 1;
} else {
IGI_LOCK(igi);
if (igi->igi_flags & IGIF_LOOPBACK) {
- IGMP_PRINTF(("ignore v3 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: ignore v3 query on IGIF_LOOPBACK "
+ "ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
IGI_UNLOCK(igi);
- return (0);
+ goto done;
}
/*
* timer expires.
*/
if (igi->igi_version != IGMP_VERSION_3) {
- IGMP_PRINTF(("ignore v3 query in v%d mode on ifp %p(%s%d)\n",
- igi->igi_version, ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: ignore v3 query in v%d mode on "
+ "ifp 0x%llx(%s)\n", __func__, igi->igi_version,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
IGI_UNLOCK(igi);
- return (0);
+ goto done;
}
- igmp_set_version(igi, IGMP_VERSION_3);
+ itp.qpt = igmp_set_version(igi, IGMP_VERSION_3);
igi->igi_rv = qrv;
igi->igi_qi = qqi;
- igi->igi_qri = maxresp;
-
+ igi->igi_qri = MAX(timer, IGMP_QRI_MIN);
- IGMP_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, qrv, qqi,
- maxresp));
+ IGMP_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, igi->igi_rv,
+ igi->igi_qi, igi->igi_qri));
if (is_general_query) {
/*
* not schedule any other reports.
* Otherwise, reset the interface timer.
*/
- IGMP_PRINTF(("process v3 general query on ifp %p(%s%d)\n",
- ifp, ifp->if_name, ifp->if_unit));
+ IGMP_PRINTF(("%s: process v3 general query on ifp 0x%llx(%s)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
- igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
- interface_timers_running = 1;
+ itp.it = igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
}
IGI_UNLOCK(igi);
} else {
IN_LOOKUP_MULTI(&igmpv3->igmp_group, ifp, inm);
in_multihead_lock_done();
if (inm == NULL)
- return (0);
+ goto done;
INM_LOCK(inm);
-#ifndef __APPLE__
- /* TODO: need ratecheck equivalent */
if (nsrc > 0) {
if (!ratecheck(&inm->inm_lastgsrtv,
&igmp_gsrdelay)) {
IGMPSTAT_INC(igps_drop_gsr_queries);
INM_UNLOCK(inm);
INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
- return (0);
+ goto done;
}
}
-#endif
- IGMP_PRINTF(("process v3 %s query on ifp %p(%s%d)\n",
- inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_name,
- ifp->if_unit));
+ IGMP_INET_PRINTF(igmpv3->igmp_group,
+ ("process v3 %s query on ifp 0x%llx(%s)\n", _igmp_inet_buf,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* If there is a pending General Query response
* scheduled sooner than the selected delay, no
* group-specific or group-and-source query.
*/
IGI_LOCK(igi);
- if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
- IGI_UNLOCK(igi);
- igmp_input_v3_group_query(inm, timer, igmpv3);
- } else {
- IGI_UNLOCK(igi);
+ itp.it = igi->igi_v3_timer;
+ IGI_UNLOCK(igi);
+ if (itp.it == 0 || itp.it >= timer) {
+ (void) igmp_input_v3_group_query(inm, timer, igmpv3);
+ itp.cst = inm->inm_timer;
}
INM_UNLOCK(inm);
INM_REMREF(inm); /* from IN_LOOKUP_MULTI */
}
+done:
+ if (itp.it > 0) {
+ IGMP_PRINTF(("%s: v3 general query response scheduled in "
+ "T+%d seconds on ifp 0x%llx(%s)\n", __func__, itp.it,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
+ }
+ igmp_set_timeout(&itp);
return (0);
}
* Return <0 if any error occured. Currently this is ignored.
*/
static int
-igmp_input_v3_group_query(struct in_multi *inm,
+igmp_input_v3_group_query(struct in_multi *inm,
int timer, /*const*/ struct igmpv3 *igmpv3)
{
int retval;
}
inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
- current_state_timers_running = 1;
return (retval);
}
if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
timer = min(inm->inm_timer, timer);
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
- current_state_timers_running = 1;
return (retval);
}
__func__));
inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
- current_state_timers_running = 1;
}
}
* NOTE: 0.0.0.0 workaround breaks const correctness.
*/
static int
-igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
+igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip,
/*const*/ struct igmp *igmp)
{
struct in_ifaddr *ia;
IGMPSTAT_INC(igps_rcv_reports);
OIGMPSTAT_INC(igps_rcv_reports);
- if (ifp->if_flags & IFF_LOOPBACK)
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (m->m_pkthdr.pkt_flags & PKTF_LOOP))
return (0);
if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr) ||
}
}
- IGMP_PRINTF(("process v1 report %s on ifp %p(%s%d)\n",
- inet_ntoa(igmp->igmp_group), ifp, ifp->if_name, ifp->if_unit));
+ IGMP_INET_PRINTF(igmp->igmp_group,
+ ("process v1 report %s on ifp 0x%llx(%s)\n", _igmp_inet_buf,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* IGMPv1 report suppression.
case IGMP_IDLE_MEMBER:
case IGMP_LAZY_MEMBER:
case IGMP_AWAKENING_MEMBER:
- IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
- inet_ntoa(igmp->igmp_group), ifp, ifp->if_name,
- ifp->if_unit));
+ IGMP_INET_PRINTF(igmp->igmp_group,
+ ("report suppressed for %s on ifp 0x%llx(%s)\n",
+ _igmp_inet_buf,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
case IGMP_SLEEPING_MEMBER:
inm->inm_state = IGMP_SLEEPING_MEMBER;
break;
case IGMP_REPORTING_MEMBER:
- IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
- inet_ntoa(igmp->igmp_group), ifp, ifp->if_name,
- ifp->if_unit));
+ IGMP_INET_PRINTF(igmp->igmp_group,
+ ("report suppressed for %s on ifp 0x%llx(%s)\n",
+ _igmp_inet_buf,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
if (igi->igi_version == IGMP_VERSION_1)
inm->inm_state = IGMP_LAZY_MEMBER;
else if (igi->igi_version == IGMP_VERSION_2)
* NOTE: 0.0.0.0 workaround breaks const correctness.
*/
static int
-igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
+igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip,
/*const*/ struct igmp *igmp)
{
struct in_ifaddr *ia;
IGMPSTAT_INC(igps_rcv_reports);
OIGMPSTAT_INC(igps_rcv_reports);
- if (ifp->if_flags & IFF_LOOPBACK) {
+ if ((ifp->if_flags & IFF_LOOPBACK) ||
+ (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
if (ia != NULL)
IFA_REMREF(&ia->ia_ifa);
return (0);
if (ia != NULL)
IFA_REMREF(&ia->ia_ifa);
- IGMP_PRINTF(("process v2 report %s on ifp %p(%s%d)\n",
- inet_ntoa(igmp->igmp_group), ifp, ifp->if_name, ifp->if_unit));
+ IGMP_INET_PRINTF(igmp->igmp_group,
+ ("process v2 report %s on ifp 0x%llx(%s)\n", _igmp_inet_buf,
+ (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp)));
/*
* IGMPv2 report suppression.
case IGMP_REPORTING_MEMBER:
case IGMP_IDLE_MEMBER:
case IGMP_AWAKENING_MEMBER:
- IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
- inet_ntoa(igmp->igmp_group), ifp, ifp->if_name,
- ifp->if_unit));
+ IGMP_INET_PRINTF(igmp->igmp_group,
+ ("report suppressed for %s on ifp 0x%llx(%s)\n",
+ _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp),
+ if_name(ifp)));
case IGMP_LAZY_MEMBER:
inm->inm_state = IGMP_LAZY_MEMBER;
break;
int minlen;
int queryver;
- IGMP_PRINTF(("%s: called w/mbuf (%p,%d)\n", __func__, m, off));
+ IGMP_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m), off));
ifp = m->m_pkthdr.rcvif;
IGMPSTAT_INC(igps_rcv_total);
OIGMPSTAT_INC(igps_rcv_total);
+ /* Expect 32-bit aligned data pointer on strict-align platforms */
+ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
+
ip = mtod(m, struct ip *);
iphlen = off;
else
minlen = IGMP_MINLEN;
- M_STRUCT_GET(igmp, struct igmp *, m, off, minlen);
+ /* A bit more expensive than M_STRUCT_GET, but ensures alignment */
+ M_STRUCT_GET0(igmp, struct igmp *, m, off, minlen);
if (igmp == NULL) {
IGMPSTAT_INC(igps_rcv_tooshort);
OIGMPSTAT_INC(igps_rcv_tooshort);
return;
}
+ /* N.B.: we assume the packet was correctly aligned in ip_input. */
/*
* Validate checksum.
return;
}
igmpv3len = IGMP_V3_QUERY_MINLEN + srclen;
- M_STRUCT_GET(igmpv3, struct igmpv3 *, m,
+ /*
+ * A bit more expensive than M_STRUCT_GET,
+ * but ensures alignment.
+ */
+ M_STRUCT_GET0(igmpv3, struct igmpv3 *, m,
off, igmpv3len);
if (igmpv3 == NULL) {
IGMPSTAT_INC(igps_rcv_tooshort);
OIGMPSTAT_INC(igps_rcv_tooshort);
return;
}
+ /*
+ * N.B.: we assume the packet was correctly
+ * aligned in ip_input.
+ */
if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
m_freem(m);
return;
case IGMP_v1_HOST_MEMBERSHIP_REPORT:
if (!igmp_v1enable)
break;
- if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
+ if (igmp_input_v1_report(ifp, m, ip, igmp) != 0) {
m_freem(m);
return;
}
case IGMP_v2_HOST_MEMBERSHIP_REPORT:
if (!igmp_v2enable)
break;
-#ifndef __APPLE__
if (!ip_checkrouteralert(m))
IGMPSTAT_INC(igps_rcv_nora);
-#endif
- if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
+ if (igmp_input_v2_report(ifp, m, ip, igmp) != 0) {
m_freem(m);
return;
}
* Hosts do not need to process IGMPv3 membership reports,
* as report suppression is no longer required.
*/
-#ifndef __APPLE__
if (!ip_checkrouteralert(m))
IGMPSTAT_INC(igps_rcv_nora);
-#endif
break;
default:
break;
}
- lck_mtx_assert(&igmp_mtx, LCK_MTX_ASSERT_NOTOWNED);
+ IGMP_LOCK_ASSERT_NOTHELD();
/*
* Pass all valid IGMP packets up to any process(es) listening on a
* raw IGMP socket.
rip_input(m, off);
}
-
/*
- * IGMP slowtimo handler.
- * Combiles both the slow and fast timer into one. We loose some responsivness but
- * allows the system to avoid having a pr_fasttimo, thus allowing for power savings.
- *
+ * Schedule IGMP timer based on various parameters; caller must ensure that
+ * lock ordering is maintained as this routine acquires IGMP global lock.
*/
void
-igmp_slowtimo(void)
+igmp_set_timeout(struct igmp_tparams *itp)
{
+ IGMP_LOCK_ASSERT_NOTHELD();
+ VERIFY(itp != NULL);
+
+ if (itp->qpt != 0 || itp->it != 0 || itp->cst != 0 || itp->sct != 0) {
+ IGMP_LOCK();
+ if (itp->qpt != 0)
+ querier_present_timers_running = 1;
+ if (itp->it != 0)
+ interface_timers_running = 1;
+ if (itp->cst != 0)
+ current_state_timers_running = 1;
+ if (itp->sct != 0)
+ state_change_timers_running = 1;
+ igmp_sched_timeout();
+ IGMP_UNLOCK();
+ }
+}
+
+/*
+ * IGMP timer handler (per 1 second).
+ */
+static void
+igmp_timeout(void *arg)
+{
+#pragma unused(arg)
struct ifqueue scq; /* State-change packets */
struct ifqueue qrq; /* Query response packets */
struct ifnet *ifp;
struct igmp_ifinfo *igi;
struct in_multi *inm;
- int loop = 0, uri_fasthz = 0;
+ int loop = 0, uri_sec = 0;
SLIST_HEAD(, in_multi) inm_dthead;
SLIST_INIT(&inm_dthead);
- lck_mtx_lock(&igmp_mtx);
+ /*
+ * Update coarse-grained networking timestamp (in sec.); the idea
+ * is to piggy-back on the timeout callout to update the counter
+ * returnable via net_uptime().
+ */
+ net_update_uptime();
- LIST_FOREACH(igi, &igi_head, igi_link) {
- IGI_LOCK(igi);
- igmp_v1v2_process_querier_timers(igi);
- IGI_UNLOCK(igi);
- }
+ IGMP_LOCK();
+
+ IGMP_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d\n", __func__,
+ querier_present_timers_running, interface_timers_running,
+ current_state_timers_running, state_change_timers_running));
/*
- * NOTE: previously handled by fasttimo
- *
- * Quick check to see if any work needs to be done, in order to
- * minimize the overhead of fasttimo processing.
+ * IGMPv1/v2 querier present timer processing.
*/
- if (!current_state_timers_running &&
- !interface_timers_running &&
- !state_change_timers_running) {
- lck_mtx_unlock(&igmp_mtx);
- return;
+ if (querier_present_timers_running) {
+ querier_present_timers_running = 0;
+ LIST_FOREACH(igi, &igi_head, igi_link) {
+ IGI_LOCK(igi);
+ igmp_v1v2_process_querier_timers(igi);
+ if (igi->igi_v1_timer > 0 || igi->igi_v2_timer > 0)
+ querier_present_timers_running = 1;
+ IGI_UNLOCK(igi);
+ }
}
/*
* IGMPv3 General Query response timer processing.
*/
if (interface_timers_running) {
+ IGMP_PRINTF(("%s: interface timers running\n", __func__));
interface_timers_running = 0;
LIST_FOREACH(igi, &igi_head, igi_link) {
IGI_LOCK(igi);
+ if (igi->igi_version != IGMP_VERSION_3) {
+ IGI_UNLOCK(igi);
+ continue;
+ }
if (igi->igi_v3_timer == 0) {
/* Do nothing. */
} else if (--igi->igi_v3_timer == 0) {
- igmp_v3_dispatch_general_query(igi);
+ if (igmp_v3_dispatch_general_query(igi) > 0)
+ interface_timers_running = 1;
} else {
interface_timers_running = 1;
}
memset(&scq, 0, sizeof(struct ifqueue));
scq.ifq_maxlen = IGMP_MAX_STATE_CHANGE_PACKETS;
+ IGMP_PRINTF(("%s: state change timers running\n", __func__));
+
/*
* IGMPv1/v2/v3 host report and state-change timer processing.
* Note: Processing a v3 group timer may remove a node.
IGI_LOCK(igi);
ifp = igi->igi_ifp;
loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
- uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri * PR_SLOWHZ);
+ uri_sec = IGMP_RANDOM_DELAY(igi->igi_uri);
IGI_UNLOCK(igi);
in_multihead_lock_shared();
break;
case IGMP_VERSION_3:
igmp_v3_process_group_timers(igi, &qrq,
- &scq, inm, uri_fasthz);
+ &scq, inm, uri_sec);
break;
}
IGI_UNLOCK(igi);
IGI_LOCK(igi);
if (igi->igi_version == IGMP_VERSION_1 ||
igi->igi_version == IGMP_VERSION_2) {
- igmp_dispatch_queue(igi, &igi->igi_v2q, 0, loop, ifp);
+ igmp_dispatch_queue(igi, &igi->igi_v2q, 0, loop);
} else if (igi->igi_version == IGMP_VERSION_3) {
IGI_UNLOCK(igi);
- igmp_dispatch_queue(NULL, &qrq, 0, loop, ifp);
- igmp_dispatch_queue(NULL, &scq, 0, loop, ifp);
+ igmp_dispatch_queue(NULL, &qrq, 0, loop);
+ igmp_dispatch_queue(NULL, &scq, 0, loop);
VERIFY(qrq.ifq_len == 0);
VERIFY(scq.ifq_len == 0);
IGI_LOCK(igi);
}
out_locked:
- lck_mtx_unlock(&igmp_mtx);
+ /* re-arm the timer if there's work to do */
+ igmp_timeout_run = 0;
+ igmp_sched_timeout();
+ IGMP_UNLOCK();
/* Now that we're dropped all locks, release detached records */
IGMP_REMOVE_DETACHED_INM(&inm_dthead);
}
+static void
+igmp_sched_timeout(void)
+{
+ IGMP_LOCK_ASSERT_HELD();
+
+ if (!igmp_timeout_run &&
+ (querier_present_timers_running || current_state_timers_running ||
+ interface_timers_running || state_change_timers_running)) {
+ igmp_timeout_run = 1;
+ timeout(igmp_timeout, NULL, hz);
+ }
+}
+
/*
* Free the in_multi reference(s) for this IGMP lifecycle.
*
{
int report_timer_expired;
+ IGMP_LOCK_ASSERT_HELD();
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_HELD(inm->inm_igi);
report_timer_expired = 1;
} else {
current_state_timers_running = 1;
+ /* caller will schedule timer */
return;
}
static void
igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
struct ifqueue *qrq, struct ifqueue *scq,
- struct in_multi *inm, const int uri_fasthz)
+ struct in_multi *inm, const int uri_sec)
{
int query_response_timer_expired;
int state_change_retransmit_timer_expired;
+ IGMP_LOCK_ASSERT_HELD();
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_HELD(igi);
VERIFY(igi == inm->inm_igi);
* During a transition from v1/v2 compatibility mode back to v3,
* a group record in REPORTING state may still have its group
* timer active. This is a no-op in this function; it is easier
- * to deal with it here than to complicate the slow-timeout path.
+ * to deal with it here than to complicate the timeout path.
*/
if (inm->inm_timer == 0) {
query_response_timer_expired = 0;
query_response_timer_expired = 1;
} else {
current_state_timers_running = 1;
+ /* caller will schedule timer */
}
if (inm->inm_sctimer == 0) {
state_change_retransmit_timer_expired = 1;
} else {
state_change_timers_running = 1;
+ /* caller will schedule timer */
}
- /* We are in fasttimo, so be quick about it. */
+ /* We are in timer callback, so be quick about it. */
if (!state_change_retransmit_timer_expired &&
!query_response_timer_expired)
return;
* reset the timer.
*/
if (--inm->inm_scrv > 0) {
- inm->inm_sctimer = uri_fasthz;
+ inm->inm_sctimer = uri_sec;
state_change_timers_running = 1;
+ /* caller will schedule timer */
}
/*
* Retransmit the previously computed state-change
(void) igmp_v3_merge_state_changes(inm, scq);
inm_commit(inm);
- IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
- inet_ntoa(inm->inm_addr), inm->inm_ifp->if_name,
- inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: T1 -> T0 for %s/%s\n", __func__,
+ _igmp_inet_buf, if_name(inm->inm_ifp)));
/*
* If we are leaving the group for good, make sure
* Switch to a different IGMP version on the given interface,
* as per Section 7.2.1.
*/
-static void
+static uint32_t
igmp_set_version(struct igmp_ifinfo *igi, const int igmp_version)
{
int old_version_timer;
IGI_LOCK_ASSERT_HELD(igi);
- IGMP_PRINTF(("%s: switching to v%d on ifp %p(%s%d)\n", __func__,
- igmp_version, igi->igi_ifp, igi->igi_ifp->if_name,
- igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__,
+ igmp_version, (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
+ if_name(igi->igi_ifp)));
if (igmp_version == IGMP_VERSION_1 || igmp_version == IGMP_VERSION_2) {
/*
* Compute the "Older Version Querier Present" timer as per
- * Section 8.12.
+ * Section 8.12, in seconds.
*/
old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
- old_version_timer *= PR_SLOWHZ;
if (igmp_version == IGMP_VERSION_1) {
igi->igi_v1_timer = old_version_timer;
}
IGI_LOCK_ASSERT_HELD(igi);
+
+ return (MAX(igi->igi_v1_timer, igi->igi_v2_timer));
}
/*
IGI_LOCK_ASSERT_HELD(igi);
- IGMP_PRINTF(("%s: cancel v3 timers on ifp %p(%s%d)\n", __func__,
- igi->igi_ifp, igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: cancel v3 timers on ifp 0x%llx(%s)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp), if_name(igi->igi_ifp)));
/*
* Stop the v3 General Query Response on this link stone dead.
- * If fasttimo is woken up due to interface_timers_running,
+ * If timer is woken up due to interface_timers_running,
* the flag will be cleared if there are no pending link timers.
*/
igi->igi_v3_timer = 0;
* Revert to IGMPv3.
*/
if (igi->igi_version != IGMP_VERSION_3) {
- IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
- __func__, igi->igi_version, IGMP_VERSION_3,
- igi->igi_ifp, igi->igi_ifp->if_name,
- igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: transition from v%d -> v%d "
+ "on 0x%llx(%s)\n", __func__,
+ igi->igi_version, IGMP_VERSION_3,
+ (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
+ if_name(igi->igi_ifp)));
igi->igi_version = IGMP_VERSION_3;
IF_DRAIN(&igi->igi_v2q);
}
* If IGMPv2 is enabled, revert to IGMPv2.
*/
if (!igmp_v2enable) {
- IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
- __func__, igi->igi_version, IGMP_VERSION_3,
- igi->igi_ifp, igi->igi_ifp->if_name,
- igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: transition from v%d -> v%d "
+ "on 0x%llx(%s%d)\n", __func__,
+ igi->igi_version, IGMP_VERSION_3,
+ (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
+ igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
igi->igi_v2_timer = 0;
igi->igi_version = IGMP_VERSION_3;
IF_DRAIN(&igi->igi_v2q);
} else {
--igi->igi_v2_timer;
if (igi->igi_version != IGMP_VERSION_2) {
- IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
- __func__, igi->igi_version, IGMP_VERSION_2,
- igi->igi_ifp, igi->igi_ifp->if_name,
- igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: transition from v%d -> v%d "
+ "on 0x%llx(%s)\n", __func__,
+ igi->igi_version, IGMP_VERSION_2,
+ (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
+ if_name(igi->igi_ifp)));
igi->igi_version = IGMP_VERSION_2;
IF_DRAIN(&igi->igi_gq);
+ igmp_v3_cancel_link_timers(igi);
}
}
} else if (igi->igi_v1_timer > 0) {
* If IGMPv1 is enabled, reset IGMPv2 timer if running.
*/
if (!igmp_v1enable) {
- IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
- __func__, igi->igi_version, IGMP_VERSION_3,
- igi->igi_ifp, igi->igi_ifp->if_name,
- igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: transition from v%d -> v%d "
+ "on 0x%llx(%s%d)\n", __func__,
+ igi->igi_version, IGMP_VERSION_3,
+ (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
+ igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
igi->igi_v1_timer = 0;
igi->igi_version = IGMP_VERSION_3;
IF_DRAIN(&igi->igi_v2q);
--igi->igi_v1_timer;
}
if (igi->igi_v2_timer > 0) {
- IGMP_PRINTF(("%s: cancel v2 timer on %p(%s%d)\n",
- __func__, igi->igi_ifp, igi->igi_ifp->if_name,
- igi->igi_ifp->if_unit));
+ IGMP_PRINTF(("%s: cancel v2 timer on 0x%llx(%s%d)\n",
+ __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(igi->igi_ifp),
+ igi->igi_ifp->if_name, igi->igi_ifp->if_unit));
igi->igi_v2_timer = 0;
}
}
else
ip->ip_dst = inm->inm_addr;
+ igmp_save_context(m, ifp);
+
m->m_flags |= M_IGMPV2;
if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
m->m_flags |= M_IGMP_LOOP;
* in_multihead_lock in shared or exclusive mode, we can't call
* igmp_sendpkt() here since that will eventually call ip_output(),
* which will try to lock in_multihead_lock and cause a deadlock.
- * Instead we defer the work to the igmp_slowtimo() thread, thus
+ * Instead we defer the work to the igmp_timeout() thread, thus
* avoiding unlocking in_multihead_lock here.
*/
if (IF_QFULL(&inm->inm_igi->igi_v2q)) {
IGMP_PRINTF(("%s: v1/v2 outbound queue full\n", __func__));
error = ENOMEM;
m_freem(m);
- } else
+ } else {
IF_ENQUEUE(&inm->inm_igi->igi_v2q, m);
-
+ VERIFY(error == 0);
+ }
return (error);
}
* compute source filter lists.
*/
int
-igmp_change_state(struct in_multi *inm)
+igmp_change_state(struct in_multi *inm, struct igmp_tparams *itp)
{
struct igmp_ifinfo *igi;
struct ifnet *ifp;
int error = 0;
+ VERIFY(itp != NULL);
+ bzero(itp, sizeof (*itp));
+
INM_LOCK_ASSERT_HELD(inm);
VERIFY(inm->inm_igi != NULL);
IGI_LOCK_ASSERT_NOTHELD(inm->inm_igi);
inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode));
if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
IGMP_PRINTF(("%s: initial join\n", __func__));
- error = igmp_initial_join(inm, igi);
+ error = igmp_initial_join(inm, igi, itp);
goto out;
} else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
IGMP_PRINTF(("%s: final leave\n", __func__));
- igmp_final_leave(inm, igi);
+ igmp_final_leave(inm, igi, itp);
goto out;
}
} else {
IGMP_PRINTF(("%s: filter set change\n", __func__));
}
- error = igmp_handle_state_change(inm, igi);
+ error = igmp_handle_state_change(inm, igi, itp);
out:
return (error);
}
* initial state of the membership.
*/
static int
-igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
+igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi,
+ struct igmp_tparams *itp)
{
struct ifnet *ifp;
struct ifqueue *ifq;
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_NOTHELD(igi);
+ VERIFY(itp != NULL);
- IGMP_PRINTF(("%s: initial join %s on ifp %p(%s%d)\n",
- __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
- inm->inm_ifp->if_name, inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: initial join %s on ifp 0x%llx(%s)\n", __func__,
+ _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(inm->inm_ifp),
+ if_name(inm->inm_ifp)));
error = 0;
syncstates = 1;
IGI_LOCK_ASSERT_HELD(igi);
if (error == 0) {
- inm->inm_timer = IGMP_RANDOM_DELAY(
- IGMP_V1V2_MAX_RI * PR_SLOWHZ);
- current_state_timers_running = 1;
+ inm->inm_timer =
+ IGMP_RANDOM_DELAY(IGMP_V1V2_MAX_RI);
+ itp->cst = 1;
}
break;
IF_DRAIN(ifq);
retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
0, 0);
+ itp->cst = (ifq->ifq_len > 0);
IGMP_PRINTF(("%s: enqueue record = %d\n",
__func__, retval));
if (retval <= 0) {
/*
* Schedule transmission of pending state-change
* report up to RV times for this link. The timer
- * will fire at the next igmp_fasttimo (~200ms),
+ * will fire at the next igmp_timeout (1 second),
* giving us an opportunity to merge the reports.
*/
if (igi->igi_flags & IGIF_LOOPBACK) {
inm->inm_scrv = igi->igi_rv;
}
inm->inm_sctimer = 1;
- state_change_timers_running = 1;
+ itp->sct = 1;
error = 0;
break;
*/
if (syncstates) {
inm_commit(inm);
- IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
- inet_ntoa(inm->inm_addr), inm->inm_ifp->if_name,
- inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: T1 -> T0 for %s/%s\n", __func__,
+ _igmp_inet_buf, if_name(inm->inm_ifp)));
}
return (error);
* Issue an intermediate state change during the IGMP life-cycle.
*/
static int
-igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi)
+igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi,
+ struct igmp_tparams *itp)
{
struct ifnet *ifp;
- int retval;
+ int retval = 0;
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_NOTHELD(igi);
+ VERIFY(itp != NULL);
- IGMP_PRINTF(("%s: state change for %s on ifp %p(%s%d)\n",
- __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
- inm->inm_ifp->if_name, inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: state change for %s on ifp 0x%llx(%s)\n", __func__,
+ _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(inm->inm_ifp),
+ if_name(inm->inm_ifp)));
ifp = inm->inm_ifp;
}
IGMP_PRINTF(("%s: nothing to do\n", __func__));
inm_commit(inm);
- IGMP_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__,
- inet_ntoa(inm->inm_addr), inm->inm_ifp->if_name));
- return (0);
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: T1 -> T0 for %s/%s\n", __func__,
+ _igmp_inet_buf, inm->inm_ifp->if_name));
+ goto done;
}
IF_DRAIN(&inm->inm_scq);
retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
+ itp->cst = (inm->inm_scq.ifq_len > 0);
IGMP_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
if (retval <= 0) {
IGI_UNLOCK(igi);
- return (-retval);
+ retval *= -1;
+ goto done;
}
/*
* If record(s) were enqueued, start the state-change
*/
inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
inm->inm_sctimer = 1;
- state_change_timers_running = 1;
+ itp->sct = 1;
IGI_UNLOCK(igi);
-
- return (0);
+done:
+ return (retval);
}
/*
* to INCLUDE {} for immediate transmission.
*/
static void
-igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
+igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi,
+ struct igmp_tparams *itp)
{
int syncstates = 1;
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_NOTHELD(igi);
+ VERIFY(itp != NULL);
- IGMP_PRINTF(("%s: final leave %s on ifp %p(%s%d)\n",
- __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
- inm->inm_ifp->if_name, inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: final leave %s on ifp 0x%llx(%s)\n", __func__,
+ _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(inm->inm_ifp),
+ if_name(inm->inm_ifp)));
switch (inm->inm_state) {
case IGMP_NOT_MEMBER:
"mode\n", __func__);
/* NOTREACHED */
}
- igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
+ /* scheduler timer if enqueue is successful */
+ itp->cst = (igmp_v1v2_queue_report(inm,
+ IGMP_HOST_LEAVE_MESSAGE) == 0);
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_HELD(igi);
/*
* Stop group timer and all pending reports.
* Immediately enqueue a state-change report
- * TO_IN {} to be sent on the next fast timeout,
+ * TO_IN {} to be sent on the next timeout,
* giving us an opportunity to merge reports.
*/
IF_DRAIN(&inm->inm_scq);
} else {
inm->inm_scrv = igi->igi_rv;
}
- IGMP_PRINTF(("%s: Leaving %s/%s%d with %d "
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: Leaving %s/%s with %d "
"pending retransmissions.\n", __func__,
- inet_ntoa(inm->inm_addr),
- inm->inm_ifp->if_name, inm->inm_ifp->if_unit,
+ _igmp_inet_buf, if_name(inm->inm_ifp),
inm->inm_scrv));
if (inm->inm_scrv == 0) {
inm->inm_state = IGMP_NOT_MEMBER;
retval = igmp_v3_enqueue_group_record(
&inm->inm_scq, inm, 1, 0, 0);
+ itp->cst = (inm->inm_scq.ifq_len > 0);
KASSERT(retval != 0,
("%s: enqueue record = %d\n", __func__,
retval));
inm->inm_state = IGMP_LEAVING_MEMBER;
inm->inm_sctimer = 1;
- state_change_timers_running = 1;
+ itp->sct = 1;
syncstates = 0;
}
}
if (syncstates) {
inm_commit(inm);
- IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
- inet_ntoa(inm->inm_addr), inm->inm_ifp->if_name,
- inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: T1 -> T0 for %s/%s\n", __func__,
+ _igmp_inet_buf, if_name(inm->inm_ifp)));
inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
- IGMP_PRINTF(("%s: T1 now MCAST_UNDEFINED for %s/%s%d\n",
- __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_name,
- inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: T1 now MCAST_UNDEFINED for %s/%s\n",
+ __func__, _igmp_inet_buf, if_name(inm->inm_ifp)));
}
}
int type;
in_addr_t naddr;
uint8_t mode;
+ u_int16_t ig_numsrc;
INM_LOCK_ASSERT_HELD(inm);
IGI_LOCK_ASSERT_HELD(inm->inm_igi);
return (igmp_v3_enqueue_filter_change(ifq, inm));
if (type == IGMP_DO_NOTHING) {
- IGMP_PRINTF(("%s: nothing to do for %s/%s%d\n",
- __func__, inet_ntoa(inm->inm_addr),
- inm->inm_ifp->if_name, inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: nothing to do for %s/%s\n",
+ __func__, _igmp_inet_buf,
+ if_name(inm->inm_ifp)));
return (0);
}
if (record_has_sources)
minrec0len += sizeof(in_addr_t);
- IGMP_PRINTF(("%s: queueing %s for %s/%s%d\n", __func__,
- igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr),
- inm->inm_ifp->if_name, inm->inm_ifp->if_unit));
+ IGMP_INET_PRINTF(inm->inm_addr,
+ ("%s: queueing %s for %s/%s\n", __func__,
+ igmp_rec_type_to_str(type), _igmp_inet_buf,
+ if_name(inm->inm_ifp)));
/*
* Check if we have a packet in the tail of the queue for this
if (m == NULL)
return (-ENOMEM);
+ igmp_save_context(m, ifp);
+
IGMP_PRINTF(("%s: allocated first packet\n", __func__));
}
if (record_has_sources) {
if (m == m0) {
md = m_last(m);
- pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
- md->m_len - nbytes);
+ pig = (struct igmp_grouprec *)(void *)
+ (mtod(md, uint8_t *) + md->m_len - nbytes);
} else {
md = m_getptr(m, 0, &off);
- pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
- off);
+ pig = (struct igmp_grouprec *)(void *)
+ (mtod(md, uint8_t *) + off);
}
msrcs = 0;
RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
- IGMP_PRINTF(("%s: visit node %s\n", __func__,
- inet_ntoa_haddr(ims->ims_haddr)));
+#ifdef IGMP_DEBUG
+ char buf[MAX_IPv4_STR_LEN];
+
+ inet_ntop_haddr(ims->ims_haddr, buf, sizeof(buf));
+ IGMP_PRINTF(("%s: visit node %s\n", __func__, buf));
+#endif
now = ims_get_mode(inm, ims, 1);
IGMP_PRINTF(("%s: node is %d\n", __func__, now));
if ((now != mode) ||
}
IGMP_PRINTF(("%s: msrcs is %d this packet\n", __func__,
msrcs));
- pig->ig_numsrc = htons(msrcs);
+ ig_numsrc = htons(msrcs);
+ bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof (ig_numsrc));
nbytes += (msrcs * sizeof(in_addr_t));
}
if (m != m0) {
IGMP_PRINTF(("%s: enqueueing first packet\n", __func__));
m->m_pkthdr.vt_nrecs = 1;
- m->m_pkthdr.rcvif = ifp;
IF_ENQUEUE(ifq, m);
} else {
m->m_pkthdr.vt_nrecs++;
}
if (m == NULL)
return (-ENOMEM);
+ igmp_save_context(m, ifp);
md = m_getptr(m, 0, &off);
- pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
+ pig = (struct igmp_grouprec *)(void *)
+ (mtod(md, uint8_t *) + off);
IGMP_PRINTF(("%s: allocated next packet\n", __func__));
if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
msrcs = 0;
RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
- IGMP_PRINTF(("%s: visit node %s\n", __func__,
- inet_ntoa_haddr(ims->ims_haddr)));
+#ifdef IGMP_DEBUG
+ char buf[MAX_IPv4_STR_LEN];
+
+ inet_ntop_haddr(ims->ims_haddr, buf, sizeof(buf));
+ IGMP_PRINTF(("%s: visit node %s\n", __func__, buf));
+#endif
now = ims_get_mode(inm, ims, 1);
if ((now != mode) ||
(now == mode && mode == MCAST_UNDEFINED)) {
if (msrcs == m0srcs)
break;
}
- pig->ig_numsrc = htons(msrcs);
+ ig_numsrc = htons(msrcs);
+ bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof (ig_numsrc));
nbytes += (msrcs * sizeof(in_addr_t));
IGMP_PRINTF(("%s: enqueueing next packet\n", __func__));
- m->m_pkthdr.rcvif = ifp;
IF_ENQUEUE(ifq, m);
}
int nallow, nblock;
uint8_t mode, now, then;
rectype_t crt, drt, nrt;
+ u_int16_t ig_numsrc;
INM_LOCK_ASSERT_HELD(inm);
return (-ENOMEM);
}
m->m_pkthdr.vt_nrecs = 0;
+ igmp_save_context(m, ifp);
m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
sizeof(struct igmp_grouprec)) /
sizeof(in_addr_t);
/* new packet; offset in c hain */
md = m_getptr(m, npbytes -
sizeof(struct igmp_grouprec), &off);
- pig = (struct igmp_grouprec *)(mtod(md,
+ pig = (struct igmp_grouprec *)(void *)(mtod(md,
uint8_t *) + off);
} else {
/* current packet; offset from last append */
md = m_last(m);
- pig = (struct igmp_grouprec *)(mtod(md,
+ pig = (struct igmp_grouprec *)(void *)(mtod(md,
uint8_t *) + md->m_len -
sizeof(struct igmp_grouprec));
}
if (nims == NULL)
nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
- IGMP_PRINTF(("%s: visit node %s\n",
- __func__, inet_ntoa_haddr(ims->ims_haddr)));
+#ifdef IGMP_DEBUG
+ char buf[MAX_IPv4_STR_LEN];
+
+ inet_ntop_haddr(ims->ims_haddr, buf, sizeof(buf));
+ IGMP_PRINTF(("%s: visit node %s\n", __func__, buf));
+#endif
now = ims_get_mode(inm, ims, 1);
then = ims_get_mode(inm, ims, 0);
IGMP_PRINTF(("%s: mode: t0 %d, t1 %d\n",
pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
else if (crt == REC_BLOCK)
pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
- pig->ig_numsrc = htons(rsrcs);
+ ig_numsrc = htons(rsrcs);
+ bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof (ig_numsrc));
/*
* Count the new group record, and enqueue this
* packet if it wasn't already queued.
*/
m->m_pkthdr.vt_nrecs++;
- m->m_pkthdr.rcvif = ifp;
if (m != m0)
IF_ENQUEUE(ifq, m);
nbytes += npbytes;
gq = &inm->inm_scq;
#ifdef IGMP_DEBUG
if (gq->ifq_head == NULL) {
- IGMP_PRINTF(("%s: WARNING: queue for inm %p is empty\n",
- __func__, inm));
+ IGMP_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)));
}
#endif
if (!domerge && IF_QFULL(gq)) {
IGMP_PRINTF(("%s: outbound queue full, skipping whole "
- "packet %p\n", __func__, m));
+ "packet 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
n = m->m_nextpkt;
if (!docopy) {
IF_REMQUEUE(gq, m);
}
if (!docopy) {
- IGMP_PRINTF(("%s: dequeueing %p\n", __func__, m));
+ IGMP_PRINTF(("%s: dequeueing 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
n = m->m_nextpkt;
IF_REMQUEUE(gq, m);
m0 = m;
m = n;
} else {
- IGMP_PRINTF(("%s: copying %p\n", __func__, m));
+ IGMP_PRINTF(("%s: copying 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
m0 = m_dup(m, M_NOWAIT);
if (m0 == NULL)
return (ENOMEM);
}
if (!domerge) {
- IGMP_PRINTF(("%s: queueing %p to ifscq %p)\n",
- __func__, m0, ifscq));
- m0->m_pkthdr.rcvif = inm->inm_ifp;
+ IGMP_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0),
+ (uint64_t)VM_KERNEL_ADDRPERM(ifscq)));
IF_ENQUEUE(ifscq, m0);
} else {
struct mbuf *mtl; /* last mbuf of packet mt */
- IGMP_PRINTF(("%s: merging %p with ifscq tail %p)\n",
- __func__, m0, mt));
+ IGMP_PRINTF(("%s: merging 0x%llx with ifscq tail "
+ "0x%llx)\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m0),
+ (uint64_t)VM_KERNEL_ADDRPERM(mt)));
mtl = m_last(mt);
m0->m_flags &= ~M_PKTHDR;
/*
* Respond to a pending IGMPv3 General Query.
*/
-static void
+static uint32_t
igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
{
struct ifnet *ifp;
IGI_LOCK(igi);
loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
igmp_dispatch_queue(igi, &igi->igi_gq, IGMP_MAX_RESPONSE_BURST,
- loop, ifp);
+ loop);
IGI_LOCK_ASSERT_HELD(igi);
/*
- * Slew transmission of bursts over 500ms intervals.
+ * Slew transmission of bursts over 1 second intervals.
*/
if (igi->igi_gq.ifq_head != NULL) {
igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
IGMP_RESPONSE_BURST_INTERVAL);
- interface_timers_running = 1;
}
+
+ return (igi->igi_v3_timer);
}
/*
*
* Must not be called with inm_lock or igi_lock held.
*/
-void
-igmp_sendpkt(struct mbuf *m, struct ifnet *ifp)
+static void
+igmp_sendpkt(struct mbuf *m)
{
struct ip_moptions *imo;
struct mbuf *ipopts, *m0;
- int error;
+ int error;
struct route ro;
+ struct ifnet *ifp;
- IGMP_PRINTF(("%s: transmit %p\n", __func__, m));
+ IGMP_PRINTF(("%s: transmit 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
+ ifp = igmp_restore_context(m);
/*
* Check if the ifnet is still attached.
*/
if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
- IGMP_PRINTF(("%s: dropped %p as ifp u went away.\n",
- __func__, m));
+ IGMP_PRINTF(("%s: dropped 0x%llx as ifp went away.\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)));
m_freem(m);
OSAddAtomic(1, &ipstat.ips_noroute);
return;
imo->imo_multicast_ttl = 1;
imo->imo_multicast_vif = -1;
-#if MROUTING
- imo->imo_multicast_loop = (ip_mrouter != NULL);
-#else
imo->imo_multicast_loop = 0;
-#endif
/*
* If the user requested that IGMP traffic be explicitly
* already freed the original mbuf chain.
* This means that we don't have to m_freem(m) here.
*/
- IGMP_PRINTF(("%s: dropped %p\n", __func__, m));
+ IGMP_PRINTF(("%s: dropped 0x%llx\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m)));
IMO_REMREF(imo);
atomic_add_32(&ipstat.ips_odropped, 1);
return;
}
}
+ igmp_scrub_context(m0);
m->m_flags &= ~(M_PROTOFLAGS | M_IGMP_LOOP);
m0->m_pkthdr.rcvif = lo_ifp;
#ifdef MAC
mac_netinet_igmp_send(ifp, m0);
#endif
+
+ if (ifp->if_eflags & IFEF_TXSTART) {
+ /*
+ * Use control service class if the interface supports
+ * transmit-start model.
+ */
+ (void) m_set_service_class(m0, MBUF_SC_CTL);
+ }
bzero(&ro, sizeof (ro));
error = ip_output(m0, ipopts, &ro, 0, imo, NULL);
- if (ro.ro_rt != NULL) {
- rtfree(ro.ro_rt);
- ro.ro_rt = NULL;
- }
+ ROUTE_RELEASE(&ro);
IMO_REMREF(imo);
if (error) {
- IGMP_PRINTF(("%s: ip_output(%p) = %d\n", __func__, m0, error));
+ IGMP_PRINTF(("%s: ip_output(0x%llx) = %d\n", __func__,
+ (uint64_t)VM_KERNEL_ADDRPERM(m0), error));
return;
}
if (m->m_flags & M_IGMPV3_HDR) {
igmpreclen -= hdrlen;
} else {
- M_PREPEND(m, hdrlen, M_DONTWAIT);
+ M_PREPEND(m, hdrlen, M_DONTWAIT, 1);
if (m == NULL)
return (NULL);
m->m_flags |= M_IGMPV3_HDR;
#endif
void
-igmp_init(void)
+igmp_init(struct protosw *pp, struct domain *dp)
{
+#pragma unused(dp)
+ static int igmp_initialized = 0;
+
+ VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
+
+ if (igmp_initialized)
+ return;
+ igmp_initialized = 1;
IGMP_PRINTF(("%s: initializing\n", __func__));