2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 2007-2009 Bruce Simpson.
30 * Copyright (c) 1988 Stephen Deering.
31 * Copyright (c) 1992, 1993
32 * The Regents of the University of California. All rights reserved.
34 * This code is derived from software contributed to Berkeley by
35 * Stephen Deering of Stanford University.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
75 * Internet Group Management Protocol (IGMP) routines.
76 * [RFC1112, RFC2236, RFC3376]
78 * Written by Steve Deering, Stanford, May 1988.
79 * Modified by Rosen Sharma, Stanford, Aug 1994.
80 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
81 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
82 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
84 * MULTICAST Revision: 3.5.1.4
87 #include <sys/cdefs.h>
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/malloc.h>
93 #include <sys/socket.h>
94 #include <sys/protosw.h>
95 #include <sys/kernel.h>
96 #include <sys/sysctl.h>
97 #include <sys/mcache.h>
99 #include <libkern/libkern.h>
100 #include <kern/zalloc.h>
103 #include <net/route.h>
105 #include <netinet/in.h>
106 #include <netinet/in_var.h>
107 #include <netinet/in_systm.h>
108 #include <netinet/ip.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/igmp.h>
111 #include <netinet/igmp_var.h>
112 #include <netinet/kpi_ipfilter_var.h>
116 inet_ntoa(struct in_addr ina
)
118 static char buf
[4*sizeof "123"];
119 unsigned char *ucp
= (unsigned char *)&ina
;
121 snprintf(buf
, sizeof(buf
), "%d.%d.%d.%d",
130 SLIST_HEAD(igmp_inm_relhead
, in_multi
);
132 static void igi_initvar(struct igmp_ifinfo
*, struct ifnet
*, int);
133 static struct igmp_ifinfo
*igi_alloc(int);
134 static void igi_free(struct igmp_ifinfo
*);
135 static void igi_delete(const struct ifnet
*, struct igmp_inm_relhead
*);
136 static void igmp_dispatch_queue(struct igmp_ifinfo
*, struct ifqueue
*,
137 int, const int, struct ifnet
*);
138 static void igmp_final_leave(struct in_multi
*, struct igmp_ifinfo
*);
139 static int igmp_handle_state_change(struct in_multi
*,
140 struct igmp_ifinfo
*);
141 static int igmp_initial_join(struct in_multi
*, struct igmp_ifinfo
*);
142 static int igmp_input_v1_query(struct ifnet
*, const struct ip
*,
143 const struct igmp
*);
144 static int igmp_input_v2_query(struct ifnet
*, const struct ip
*,
145 const struct igmp
*);
146 static int igmp_input_v3_query(struct ifnet
*, const struct ip
*,
147 /*const*/ struct igmpv3
*);
148 static int igmp_input_v3_group_query(struct in_multi
*,
149 int, /*const*/ struct igmpv3
*);
150 static int igmp_input_v1_report(struct ifnet
*, /*const*/ struct ip
*,
151 /*const*/ struct igmp
*);
152 static int igmp_input_v2_report(struct ifnet
*, /*const*/ struct ip
*,
153 /*const*/ struct igmp
*);
154 void igmp_sendpkt(struct mbuf
*, struct ifnet
*);
155 static __inline__
int igmp_isgroupreported(const struct in_addr
);
159 static const char * igmp_rec_type_to_str(const int);
161 static void igmp_set_version(struct igmp_ifinfo
*, const int);
162 static void igmp_flush_relq(struct igmp_ifinfo
*,
163 struct igmp_inm_relhead
*);
164 static int igmp_v1v2_queue_report(struct in_multi
*, const int);
165 static void igmp_v1v2_process_group_timer(struct in_multi
*, const int);
166 static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo
*);
167 static void igmp_v2_update_group(struct in_multi
*, const int);
168 static void igmp_v3_cancel_link_timers(struct igmp_ifinfo
*);
169 static void igmp_v3_dispatch_general_query(struct igmp_ifinfo
*);
171 igmp_v3_encap_report(struct ifnet
*, struct mbuf
*);
172 static int igmp_v3_enqueue_group_record(struct ifqueue
*,
173 struct in_multi
*, const int, const int, const int);
174 static int igmp_v3_enqueue_filter_change(struct ifqueue
*,
176 static void igmp_v3_process_group_timers(struct igmp_ifinfo
*,
177 struct ifqueue
*, struct ifqueue
*, struct in_multi
*,
179 static int igmp_v3_merge_state_changes(struct in_multi
*,
181 static void igmp_v3_suppress_group_record(struct in_multi
*);
182 static int sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS
;
183 static int sysctl_igmp_gsr SYSCTL_HANDLER_ARGS
;
184 static int sysctl_igmp_default_version SYSCTL_HANDLER_ARGS
;
186 struct mbuf
*m_raopt
; /* Router Alert option */
188 static int interface_timers_running
; /* IGMPv3 general
190 static int state_change_timers_running
; /* IGMPv3 state-change
192 static int current_state_timers_running
; /* IGMPv1/v2 host
193 * report; IGMPv3 g/sg
196 static LIST_HEAD(, igmp_ifinfo
) igi_head
;
197 static struct igmpstat_v3 igmpstat_v3
= {
198 .igps_version
= IGPS_VERSION_3
,
199 .igps_len
= sizeof(struct igmpstat_v3
),
201 static struct igmpstat igmpstat
; /* old IGMPv2 stats structure */
202 static struct timeval igmp_gsrdelay
= {10, 0};
204 static int igmp_recvifkludge
= 1;
205 static int igmp_sendra
= 1;
206 static int igmp_sendlocal
= 1;
207 static int igmp_v1enable
= 1;
208 static int igmp_v2enable
= 1;
209 static int igmp_legacysupp
= 0;
210 static int igmp_default_version
= IGMP_VERSION_3
;
212 SYSCTL_STRUCT(_net_inet_igmp
, IGMPCTL_STATS
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
213 &igmpstat
, igmpstat
, "");
214 SYSCTL_STRUCT(_net_inet_igmp
, OID_AUTO
, v3stats
,
215 CTLFLAG_RD
| CTLFLAG_LOCKED
, &igmpstat_v3
, igmpstat_v3
, "");
216 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, recvifkludge
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
217 &igmp_recvifkludge
, 0,
218 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
219 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, sendra
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
221 "Send IP Router Alert option in IGMPv2/v3 messages");
222 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, sendlocal
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
224 "Send IGMP membership reports for 224.0.0.0/24 groups");
225 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, v1enable
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
227 "Enable backwards compatibility with IGMPv1");
228 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, v2enable
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
230 "Enable backwards compatibility with IGMPv2");
231 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, legacysupp
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
233 "Allow v1/v2 reports to suppress v3 group responses");
234 SYSCTL_PROC(_net_inet_igmp
, OID_AUTO
, default_version
,
235 CTLTYPE_INT
| CTLFLAG_RW
,
236 &igmp_default_version
, 0, sysctl_igmp_default_version
, "I",
237 "Default version of IGMP to run on each interface");
238 SYSCTL_PROC(_net_inet_igmp
, OID_AUTO
, gsrdelay
,
239 CTLTYPE_INT
| CTLFLAG_RW
,
240 &igmp_gsrdelay
.tv_sec
, 0, sysctl_igmp_gsr
, "I",
241 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
244 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
,
245 debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &igmp_debug
, 0, "");
248 SYSCTL_NODE(_net_inet_igmp
, OID_AUTO
, ifinfo
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
249 sysctl_igmp_ifinfo
, "Per-interface IGMPv3 state");
251 /* Lock group and attribute for igmp_mtx */
252 static lck_attr_t
*igmp_mtx_attr
;
253 static lck_grp_t
*igmp_mtx_grp
;
254 static lck_grp_attr_t
*igmp_mtx_grp_attr
;
257 * Locking and reference counting:
259 * igmp_mtx mainly protects igi_head. In cases where both igmp_mtx and
260 * in_multihead_lock must be held, the former must be acquired first in order
261 * to maintain lock ordering. It is not a requirement that igmp_mtx be
262 * acquired first before in_multihead_lock, but in case both must be acquired
263 * in succession, the correct lock ordering must be followed.
265 * Instead of walking the if_multiaddrs list at the interface and returning
266 * the ifma_protospec value of a matching entry, we search the global list
267 * of in_multi records and find it that way; this is done with in_multihead
268 * lock held. Doing so avoids the race condition issues that many other BSDs
269 * suffer from (therefore in our implementation, ifma_protospec will never be
270 * NULL for as long as the in_multi is valid.)
272 * The above creates a requirement for the in_multi to stay in in_multihead
273 * list even after the final IGMP leave (in IGMPv3 mode) until no longer needs
274 * be retransmitted (this is not required for IGMPv1/v2.) In order to handle
275 * this, the request and reference counts of the in_multi are bumped up when
276 * the state changes to IGMP_LEAVING_MEMBER, and later dropped in the timeout
277 * handler. Each in_multi holds a reference to the underlying igmp_ifinfo.
279 * Thus, the permitted lock oder is:
281 * igmp_mtx, in_multihead_lock, inm_lock, igi_lock
283 * Any may be taken independently, but if any are held at the same time,
284 * the above lock order must be followed.
286 static decl_lck_mtx_data(, igmp_mtx
);
287 static int igmp_timers_are_running
;
289 #define IGMP_ADD_DETACHED_INM(_head, _inm) { \
290 SLIST_INSERT_HEAD(_head, _inm, inm_dtle); \
293 #define IGMP_REMOVE_DETACHED_INM(_head) { \
294 struct in_multi *_inm, *_inm_tmp; \
295 SLIST_FOREACH_SAFE(_inm, _head, inm_dtle, _inm_tmp) { \
296 SLIST_REMOVE(_head, _inm, in_multi, inm_dtle); \
299 VERIFY(SLIST_EMPTY(_head)); \
302 #define IGI_ZONE_MAX 64 /* maximum elements in zone */
303 #define IGI_ZONE_NAME "igmp_ifinfo" /* zone name */
305 static unsigned int igi_size
; /* size of zone element */
306 static struct zone
*igi_zone
; /* zone for igmp_ifinfo */
309 static __inline
char *
310 inet_ntoa_haddr(in_addr_t haddr
)
314 ia
.s_addr
= htonl(haddr
);
315 return (inet_ntoa(ia
));
319 * Retrieve or set default IGMP version.
322 sysctl_igmp_default_version SYSCTL_HANDLER_ARGS
324 #pragma unused(oidp, arg2)
328 lck_mtx_lock(&igmp_mtx
);
330 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
331 if (error
|| !req
->newptr
)
334 new = igmp_default_version
;
336 error
= SYSCTL_IN(req
, &new, sizeof(int));
340 if (new < IGMP_VERSION_1
|| new > IGMP_VERSION_3
) {
345 IGMP_PRINTF(("change igmp_default_version from %d to %d\n",
346 igmp_default_version
, new));
348 igmp_default_version
= new;
351 lck_mtx_unlock(&igmp_mtx
);
356 * Retrieve or set threshold between group-source queries in seconds.
360 sysctl_igmp_gsr SYSCTL_HANDLER_ARGS
362 #pragma unused(arg1, arg2)
366 lck_mtx_lock(&igmp_mtx
);
368 i
= igmp_gsrdelay
.tv_sec
;
370 error
= sysctl_handle_int(oidp
, &i
, 0, req
);
371 if (error
|| !req
->newptr
)
374 if (i
< -1 || i
>= 60) {
379 igmp_gsrdelay
.tv_sec
= i
;
382 lck_mtx_unlock(&igmp_mtx
);
387 * Expose struct igmp_ifinfo to userland, keyed by ifindex.
388 * For use by ifmcstat(8).
392 sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS
399 struct igmp_ifinfo
*igi
;
400 struct igmp_ifinfo_u igi_u
;
405 if (req
->newptr
!= USER_ADDR_NULL
)
411 lck_mtx_lock(&igmp_mtx
);
413 if (name
[0] <= 0 || name
[0] > (u_int
)if_index
) {
420 ifnet_head_lock_shared();
421 ifp
= ifindex2ifnet
[name
[0]];
426 bzero(&igi_u
, sizeof (igi_u
));
428 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
430 if (ifp
!= igi
->igi_ifp
) {
434 igi_u
.igi_ifindex
= igi
->igi_ifp
->if_index
;
435 igi_u
.igi_version
= igi
->igi_version
;
436 igi_u
.igi_v1_timer
= igi
->igi_v1_timer
;
437 igi_u
.igi_v2_timer
= igi
->igi_v2_timer
;
438 igi_u
.igi_v3_timer
= igi
->igi_v3_timer
;
439 igi_u
.igi_flags
= igi
->igi_flags
;
440 igi_u
.igi_rv
= igi
->igi_rv
;
441 igi_u
.igi_qi
= igi
->igi_qi
;
442 igi_u
.igi_qri
= igi
->igi_qri
;
443 igi_u
.igi_uri
= igi
->igi_uri
;
446 error
= SYSCTL_OUT(req
, &igi_u
, sizeof (igi_u
));
451 lck_mtx_unlock(&igmp_mtx
);
456 * Dispatch an entire queue of pending packet chains
458 * Must not be called with inm_lock held.
461 igmp_dispatch_queue(struct igmp_ifinfo
*igi
, struct ifqueue
*ifq
, int limit
,
462 const int loop
, struct ifnet
*ifp
)
468 IGI_LOCK_ASSERT_HELD(igi
);
474 IGMP_PRINTF(("%s: dispatch %p from %p\n", __func__
, ifq
, m
));
475 ip
= mtod(m
, struct ip
*);
477 m
->m_flags
|= M_IGMP_LOOP
;
480 igmp_sendpkt(m
, ifp
);
488 IGI_LOCK_ASSERT_HELD(igi
);
492 * Filter outgoing IGMP report state by group.
494 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
495 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
496 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
497 * this may break certain IGMP snooping switches which rely on the old
500 * Return zero if the given group is one for which IGMP reports
501 * should be suppressed, or non-zero if reports should be issued.
505 int igmp_isgroupreported(const struct in_addr addr
)
508 if (in_allhosts(addr
) ||
509 ((!igmp_sendlocal
&& IN_LOCAL_GROUP(ntohl(addr
.s_addr
)))))
516 * Construct a Router Alert option to use in outgoing packets.
524 MGET(m
, M_WAITOK
, MT_DATA
);
525 p
= mtod(m
, struct ipoption
*);
526 p
->ipopt_dst
.s_addr
= INADDR_ANY
;
527 p
->ipopt_list
[0] = IPOPT_RA
; /* Router Alert Option */
528 p
->ipopt_list
[1] = 0x04; /* 4 bytes long */
529 p
->ipopt_list
[2] = IPOPT_EOL
; /* End of IP option list */
530 p
->ipopt_list
[3] = 0x00; /* pad byte */
531 m
->m_len
= sizeof(p
->ipopt_dst
) + p
->ipopt_list
[1];
537 * Attach IGMP when PF_INET is attached to an interface.
540 igmp_domifattach(struct ifnet
*ifp
, int how
)
542 struct igmp_ifinfo
*igi
;
544 IGMP_PRINTF(("%s: called for ifp %p(%s)\n",
545 __func__
, ifp
, ifp
->if_name
));
547 igi
= igi_alloc(how
);
551 lck_mtx_lock(&igmp_mtx
);
554 igi_initvar(igi
, ifp
, 0);
555 igi
->igi_debug
|= IFD_ATTACHED
;
556 IGI_ADDREF_LOCKED(igi
); /* hold a reference for igi_head */
557 IGI_ADDREF_LOCKED(igi
); /* hold a reference for caller */
559 ifnet_lock_shared(ifp
);
560 igmp_initsilent(ifp
, igi
);
561 ifnet_lock_done(ifp
);
563 LIST_INSERT_HEAD(&igi_head
, igi
, igi_link
);
565 lck_mtx_unlock(&igmp_mtx
);
567 IGMP_PRINTF(("allocate igmp_ifinfo for ifp %p(%s)\n",
574 * Attach IGMP when PF_INET is reattached to an interface. Caller is
575 * expected to have an outstanding reference to the igi.
578 igmp_domifreattach(struct igmp_ifinfo
*igi
)
582 lck_mtx_lock(&igmp_mtx
);
585 VERIFY(!(igi
->igi_debug
& IFD_ATTACHED
));
588 igi_initvar(igi
, ifp
, 1);
589 igi
->igi_debug
|= IFD_ATTACHED
;
590 IGI_ADDREF_LOCKED(igi
); /* hold a reference for igi_head */
592 ifnet_lock_shared(ifp
);
593 igmp_initsilent(ifp
, igi
);
594 ifnet_lock_done(ifp
);
596 LIST_INSERT_HEAD(&igi_head
, igi
, igi_link
);
598 lck_mtx_unlock(&igmp_mtx
);
600 IGMP_PRINTF(("reattached igmp_ifinfo for ifp %p(%s)\n",
605 * Hook for domifdetach.
608 igmp_domifdetach(struct ifnet
*ifp
)
610 SLIST_HEAD(, in_multi
) inm_dthead
;
612 SLIST_INIT(&inm_dthead
);
614 IGMP_PRINTF(("%s: called for ifp %p(%s%d)\n",
615 __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
));
617 lck_mtx_lock(&igmp_mtx
);
618 igi_delete(ifp
, (struct igmp_inm_relhead
*)&inm_dthead
);
619 lck_mtx_unlock(&igmp_mtx
);
621 /* Now that we're dropped all locks, release detached records */
622 IGMP_REMOVE_DETACHED_INM(&inm_dthead
);
626 * Called at interface detach time. Note that we only flush all deferred
627 * responses and record releases; all remaining inm records and their source
628 * entries related to this interface are left intact, in order to handle
632 igi_delete(const struct ifnet
*ifp
, struct igmp_inm_relhead
*inm_dthead
)
634 struct igmp_ifinfo
*igi
, *tigi
;
636 lck_mtx_assert(&igmp_mtx
, LCK_MTX_ASSERT_OWNED
);
638 LIST_FOREACH_SAFE(igi
, &igi_head
, igi_link
, tigi
) {
640 if (igi
->igi_ifp
== ifp
) {
642 * Free deferred General Query responses.
644 IF_DRAIN(&igi
->igi_gq
);
645 IF_DRAIN(&igi
->igi_v2q
);
646 igmp_flush_relq(igi
, inm_dthead
);
647 VERIFY(SLIST_EMPTY(&igi
->igi_relinmhead
));
648 igi
->igi_debug
&= ~IFD_ATTACHED
;
651 LIST_REMOVE(igi
, igi_link
);
652 IGI_REMREF(igi
); /* release igi_head reference */
657 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__
, ifp
);
660 __private_extern__
void
661 igmp_initsilent(struct ifnet
*ifp
, struct igmp_ifinfo
*igi
)
663 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_OWNED
);
665 IGI_LOCK_ASSERT_NOTHELD(igi
);
667 if (!(ifp
->if_flags
& IFF_MULTICAST
))
668 igi
->igi_flags
|= IGIF_SILENT
;
670 igi
->igi_flags
&= ~IGIF_SILENT
;
675 igi_initvar(struct igmp_ifinfo
*igi
, struct ifnet
*ifp
, int reattach
)
677 IGI_LOCK_ASSERT_HELD(igi
);
680 igi
->igi_version
= igmp_default_version
;
682 igi
->igi_rv
= IGMP_RV_INIT
;
683 igi
->igi_qi
= IGMP_QI_INIT
;
684 igi
->igi_qri
= IGMP_QRI_INIT
;
685 igi
->igi_uri
= IGMP_URI_INIT
;
688 SLIST_INIT(&igi
->igi_relinmhead
);
691 * Responses to general queries are subject to bounds.
693 igi
->igi_gq
.ifq_maxlen
= IGMP_MAX_RESPONSE_PACKETS
;
694 igi
->igi_v2q
.ifq_maxlen
= IGMP_MAX_RESPONSE_PACKETS
;
697 static struct igmp_ifinfo
*
700 struct igmp_ifinfo
*igi
;
702 igi
= (how
== M_WAITOK
) ? zalloc(igi_zone
) : zalloc_noblock(igi_zone
);
704 bzero(igi
, igi_size
);
705 lck_mtx_init(&igi
->igi_lock
, igmp_mtx_grp
, igmp_mtx_attr
);
706 igi
->igi_debug
|= IFD_ALLOC
;
712 igi_free(struct igmp_ifinfo
*igi
)
715 if (igi
->igi_debug
& IFD_ATTACHED
) {
716 panic("%s: attached igi=%p is being freed", __func__
, igi
);
718 } else if (igi
->igi_ifp
!= NULL
) {
719 panic("%s: ifp not NULL for igi=%p", __func__
, igi
);
721 } else if (!(igi
->igi_debug
& IFD_ALLOC
)) {
722 panic("%s: igi %p cannot be freed", __func__
, igi
);
724 } else if (igi
->igi_refcnt
!= 0) {
725 panic("%s: non-zero refcnt igi=%p", __func__
, igi
);
728 igi
->igi_debug
&= ~IFD_ALLOC
;
731 lck_mtx_destroy(&igi
->igi_lock
, igmp_mtx_grp
);
732 zfree(igi_zone
, igi
);
736 igi_addref(struct igmp_ifinfo
*igi
, int locked
)
741 IGI_LOCK_ASSERT_HELD(igi
);
743 if (++igi
->igi_refcnt
== 0) {
744 panic("%s: igi=%p wraparound refcnt", __func__
, igi
);
752 igi_remref(struct igmp_ifinfo
*igi
)
754 SLIST_HEAD(, in_multi
) inm_dthead
;
759 if (igi
->igi_refcnt
== 0) {
760 panic("%s: igi=%p negative refcnt", __func__
, igi
);
765 if (igi
->igi_refcnt
> 0) {
772 IF_DRAIN(&igi
->igi_gq
);
773 IF_DRAIN(&igi
->igi_v2q
);
774 SLIST_INIT(&inm_dthead
);
775 igmp_flush_relq(igi
, (struct igmp_inm_relhead
*)&inm_dthead
);
776 VERIFY(SLIST_EMPTY(&igi
->igi_relinmhead
));
779 /* Now that we're dropped all locks, release detached records */
780 IGMP_REMOVE_DETACHED_INM(&inm_dthead
);
782 IGMP_PRINTF(("%s: freeing igmp_ifinfo for ifp %p(%s%d)\n",
783 __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
));
789 * Process a received IGMPv1 query.
790 * Return non-zero if the message should be dropped.
793 igmp_input_v1_query(struct ifnet
*ifp
, const struct ip
*ip
,
794 const struct igmp
*igmp
)
796 struct igmp_ifinfo
*igi
;
797 struct in_multi
*inm
;
798 struct in_multistep step
;
801 * IGMPv1 Host Membership Queries SHOULD always be addressed to
802 * 224.0.0.1. They are always treated as General Queries.
803 * igmp_group is always ignored. Do not drop it as a userland
804 * daemon may wish to see it.
806 if (!in_allhosts(ip
->ip_dst
) || !in_nullhost(igmp
->igmp_group
)) {
807 IGMPSTAT_INC(igps_rcv_badqueries
);
808 OIGMPSTAT_INC(igps_rcv_badqueries
);
811 IGMPSTAT_INC(igps_rcv_gen_queries
);
813 igi
= IGMP_IFINFO(ifp
);
817 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
818 IGMP_PRINTF(("ignore v1 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
819 ifp
, ifp
->if_name
, ifp
->if_unit
));
824 * Switch to IGMPv1 host compatibility mode.
826 igmp_set_version(igi
, IGMP_VERSION_1
);
829 IGMP_PRINTF(("process v1 query on ifp %p(%s%d)\n", ifp
, ifp
->if_name
,
833 * Start the timers in all of our group records
834 * for the interface on which the query arrived,
835 * except those which are already running.
837 in_multihead_lock_shared();
838 IN_FIRST_MULTI(step
, inm
);
839 while (inm
!= NULL
) {
841 if (inm
->inm_ifp
!= ifp
)
843 if (inm
->inm_timer
!= 0)
846 switch (inm
->inm_state
) {
847 case IGMP_NOT_MEMBER
:
848 case IGMP_SILENT_MEMBER
:
850 case IGMP_G_QUERY_PENDING_MEMBER
:
851 case IGMP_SG_QUERY_PENDING_MEMBER
:
852 case IGMP_REPORTING_MEMBER
:
853 case IGMP_IDLE_MEMBER
:
854 case IGMP_LAZY_MEMBER
:
855 case IGMP_SLEEPING_MEMBER
:
856 case IGMP_AWAKENING_MEMBER
:
857 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
858 inm
->inm_timer
= IGMP_RANDOM_DELAY(
859 IGMP_V1V2_MAX_RI
* PR_SLOWHZ
);
860 current_state_timers_running
= 1;
862 case IGMP_LEAVING_MEMBER
:
867 IN_NEXT_MULTI(step
, inm
);
869 in_multihead_lock_done();
875 * Process a received IGMPv2 general or group-specific query.
878 igmp_input_v2_query(struct ifnet
*ifp
, const struct ip
*ip
,
879 const struct igmp
*igmp
)
881 struct igmp_ifinfo
*igi
;
882 struct in_multi
*inm
;
883 int is_general_query
;
886 is_general_query
= 0;
889 * Validate address fields upfront.
891 if (in_nullhost(igmp
->igmp_group
)) {
893 * IGMPv2 General Query.
894 * If this was not sent to the all-hosts group, ignore it.
896 if (!in_allhosts(ip
->ip_dst
))
898 IGMPSTAT_INC(igps_rcv_gen_queries
);
899 is_general_query
= 1;
901 /* IGMPv2 Group-Specific Query. */
902 IGMPSTAT_INC(igps_rcv_group_queries
);
905 igi
= IGMP_IFINFO(ifp
);
909 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
910 IGMP_PRINTF(("ignore v2 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
911 ifp
, ifp
->if_name
, ifp
->if_unit
));
916 * Ignore v2 query if in v1 Compatibility Mode.
918 if (igi
->igi_version
== IGMP_VERSION_1
) {
922 igmp_set_version(igi
, IGMP_VERSION_2
);
925 timer
= igmp
->igmp_code
* PR_SLOWHZ
/ IGMP_TIMER_SCALE
;
929 if (is_general_query
) {
930 struct in_multistep step
;
932 IGMP_PRINTF(("process v2 general query on ifp %p(%s%d)\n",
933 ifp
, ifp
->if_name
, ifp
->if_unit
));
935 * For each reporting group joined on this
936 * interface, kick the report timer.
938 in_multihead_lock_shared();
939 IN_FIRST_MULTI(step
, inm
);
940 while (inm
!= NULL
) {
942 if (inm
->inm_ifp
== ifp
)
943 igmp_v2_update_group(inm
, timer
);
945 IN_NEXT_MULTI(step
, inm
);
947 in_multihead_lock_done();
950 * Group-specific IGMPv2 query, we need only
951 * look up the single group to process it.
953 in_multihead_lock_shared();
954 IN_LOOKUP_MULTI(&igmp
->igmp_group
, ifp
, inm
);
955 in_multihead_lock_done();
958 IGMP_PRINTF(("process v2 query %s on ifp %p(%s%d)\n",
959 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
961 igmp_v2_update_group(inm
, timer
);
963 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
971 * Update the report timer on a group in response to an IGMPv2 query.
973 * If we are becoming the reporting member for this group, start the timer.
974 * If we already are the reporting member for this group, and timer is
975 * below the threshold, reset it.
977 * We may be updating the group for the first time since we switched
978 * to IGMPv3. If we are, then we must clear any recorded source lists,
979 * and transition to REPORTING state; the group timer is overloaded
980 * for group and group-source query responses.
982 * Unlike IGMPv3, the delay per group should be jittered
983 * to avoid bursts of IGMPv2 reports.
986 igmp_v2_update_group(struct in_multi
*inm
, const int timer
)
989 IGMP_PRINTF(("%s: %s/%s%d timer=%d\n", __func__
,
990 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
991 inm
->inm_ifp
->if_unit
, timer
));
993 INM_LOCK_ASSERT_HELD(inm
);
995 switch (inm
->inm_state
) {
996 case IGMP_NOT_MEMBER
:
997 case IGMP_SILENT_MEMBER
:
999 case IGMP_REPORTING_MEMBER
:
1000 if (inm
->inm_timer
!= 0 &&
1001 inm
->inm_timer
<= timer
) {
1002 IGMP_PRINTF(("%s: REPORTING and timer running, "
1003 "skipping.\n", __func__
));
1007 case IGMP_SG_QUERY_PENDING_MEMBER
:
1008 case IGMP_G_QUERY_PENDING_MEMBER
:
1009 case IGMP_IDLE_MEMBER
:
1010 case IGMP_LAZY_MEMBER
:
1011 case IGMP_AWAKENING_MEMBER
:
1012 IGMP_PRINTF(("%s: ->REPORTING\n", __func__
));
1013 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
1014 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1015 current_state_timers_running
= 1;
1017 case IGMP_SLEEPING_MEMBER
:
1018 IGMP_PRINTF(("%s: ->AWAKENING\n", __func__
));
1019 inm
->inm_state
= IGMP_AWAKENING_MEMBER
;
1021 case IGMP_LEAVING_MEMBER
:
1027 * Process a received IGMPv3 general, group-specific or
1028 * group-and-source-specific query.
1029 * Assumes m has already been pulled up to the full IGMP message length.
1030 * Return 0 if successful, otherwise an appropriate error code is returned.
1033 igmp_input_v3_query(struct ifnet
*ifp
, const struct ip
*ip
,
1034 /*const*/ struct igmpv3
*igmpv3
)
1036 struct igmp_ifinfo
*igi
;
1037 struct in_multi
*inm
;
1038 int is_general_query
;
1039 uint32_t maxresp
, nsrc
, qqi
;
1043 is_general_query
= 0;
1045 IGMP_PRINTF(("process v3 query on ifp %p(%s%d)\n", ifp
, ifp
->if_name
,
1048 maxresp
= igmpv3
->igmp_code
; /* in 1/10ths of a second */
1049 if (maxresp
>= 128) {
1050 maxresp
= IGMP_MANT(igmpv3
->igmp_code
) <<
1051 (IGMP_EXP(igmpv3
->igmp_code
) + 3);
1055 * Robustness must never be less than 2 for on-wire IGMPv3.
1056 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
1057 * an exception for interfaces whose IGMPv3 state changes
1058 * are redirected to loopback (e.g. MANET).
1060 qrv
= IGMP_QRV(igmpv3
->igmp_misc
);
1062 IGMP_PRINTF(("%s: clamping qrv %d to %d\n", __func__
,
1063 qrv
, IGMP_RV_INIT
));
1067 qqi
= igmpv3
->igmp_qqi
;
1069 qqi
= IGMP_MANT(igmpv3
->igmp_qqi
) <<
1070 (IGMP_EXP(igmpv3
->igmp_qqi
) + 3);
1073 timer
= maxresp
* PR_SLOWHZ
/ IGMP_TIMER_SCALE
;
1077 nsrc
= ntohs(igmpv3
->igmp_numsrc
);
1080 * Validate address fields and versions upfront before
1081 * accepting v3 query.
1083 if (in_nullhost(igmpv3
->igmp_group
)) {
1085 * IGMPv3 General Query.
1087 * General Queries SHOULD be directed to 224.0.0.1.
1088 * A general query with a source list has undefined
1089 * behaviour; discard it.
1091 IGMPSTAT_INC(igps_rcv_gen_queries
);
1092 if (!in_allhosts(ip
->ip_dst
) || nsrc
> 0) {
1093 IGMPSTAT_INC(igps_rcv_badqueries
);
1094 OIGMPSTAT_INC(igps_rcv_badqueries
);
1097 is_general_query
= 1;
1099 /* Group or group-source specific query. */
1101 IGMPSTAT_INC(igps_rcv_group_queries
);
1103 IGMPSTAT_INC(igps_rcv_gsr_queries
);
1106 igi
= IGMP_IFINFO(ifp
);
1107 VERIFY(igi
!= NULL
);
1110 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
1111 IGMP_PRINTF(("ignore v3 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
1112 ifp
, ifp
->if_name
, ifp
->if_unit
));
1118 * Discard the v3 query if we're in Compatibility Mode.
1119 * The RFC is not obviously worded that hosts need to stay in
1120 * compatibility mode until the Old Version Querier Present
1123 if (igi
->igi_version
!= IGMP_VERSION_3
) {
1124 IGMP_PRINTF(("ignore v3 query in v%d mode on ifp %p(%s%d)\n",
1125 igi
->igi_version
, ifp
, ifp
->if_name
, ifp
->if_unit
));
1130 igmp_set_version(igi
, IGMP_VERSION_3
);
1133 igi
->igi_qri
= maxresp
;
1136 IGMP_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__
, qrv
, qqi
,
1139 if (is_general_query
) {
1141 * Schedule a current-state report on this ifp for
1142 * all groups, possibly containing source lists.
1143 * If there is a pending General Query response
1144 * scheduled earlier than the selected delay, do
1145 * not schedule any other reports.
1146 * Otherwise, reset the interface timer.
1148 IGMP_PRINTF(("process v3 general query on ifp %p(%s%d)\n",
1149 ifp
, ifp
->if_name
, ifp
->if_unit
));
1150 if (igi
->igi_v3_timer
== 0 || igi
->igi_v3_timer
>= timer
) {
1151 igi
->igi_v3_timer
= IGMP_RANDOM_DELAY(timer
);
1152 interface_timers_running
= 1;
1158 * Group-source-specific queries are throttled on
1159 * a per-group basis to defeat denial-of-service attempts.
1160 * Queries for groups we are not a member of on this
1161 * link are simply ignored.
1163 in_multihead_lock_shared();
1164 IN_LOOKUP_MULTI(&igmpv3
->igmp_group
, ifp
, inm
);
1165 in_multihead_lock_done();
1171 /* TODO: need ratecheck equivalent */
1173 if (!ratecheck(&inm
->inm_lastgsrtv
,
1175 IGMP_PRINTF(("%s: GS query throttled.\n",
1177 IGMPSTAT_INC(igps_drop_gsr_queries
);
1179 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1184 IGMP_PRINTF(("process v3 %s query on ifp %p(%s%d)\n",
1185 inet_ntoa(igmpv3
->igmp_group
), ifp
, ifp
->if_name
,
1188 * If there is a pending General Query response
1189 * scheduled sooner than the selected delay, no
1190 * further report need be scheduled.
1191 * Otherwise, prepare to respond to the
1192 * group-specific or group-and-source query.
1195 if (igi
->igi_v3_timer
== 0 || igi
->igi_v3_timer
>= timer
) {
1197 igmp_input_v3_group_query(inm
, timer
, igmpv3
);
1202 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1209 * Process a recieved IGMPv3 group-specific or group-and-source-specific
1211 * Return <0 if any error occured. Currently this is ignored.
1214 igmp_input_v3_group_query(struct in_multi
*inm
,
1215 int timer
, /*const*/ struct igmpv3
*igmpv3
)
1220 INM_LOCK_ASSERT_HELD(inm
);
1224 switch (inm
->inm_state
) {
1225 case IGMP_NOT_MEMBER
:
1226 case IGMP_SILENT_MEMBER
:
1227 case IGMP_SLEEPING_MEMBER
:
1228 case IGMP_LAZY_MEMBER
:
1229 case IGMP_AWAKENING_MEMBER
:
1230 case IGMP_IDLE_MEMBER
:
1231 case IGMP_LEAVING_MEMBER
:
1233 case IGMP_REPORTING_MEMBER
:
1234 case IGMP_G_QUERY_PENDING_MEMBER
:
1235 case IGMP_SG_QUERY_PENDING_MEMBER
:
1239 nsrc
= ntohs(igmpv3
->igmp_numsrc
);
1242 * Deal with group-specific queries upfront.
1243 * If any group query is already pending, purge any recorded
1244 * source-list state if it exists, and schedule a query response
1245 * for this group-specific query.
1248 if (inm
->inm_state
== IGMP_G_QUERY_PENDING_MEMBER
||
1249 inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
) {
1250 inm_clear_recorded(inm
);
1251 timer
= min(inm
->inm_timer
, timer
);
1253 inm
->inm_state
= IGMP_G_QUERY_PENDING_MEMBER
;
1254 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1255 current_state_timers_running
= 1;
1260 * Deal with the case where a group-and-source-specific query has
1261 * been received but a group-specific query is already pending.
1263 if (inm
->inm_state
== IGMP_G_QUERY_PENDING_MEMBER
) {
1264 timer
= min(inm
->inm_timer
, timer
);
1265 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1266 current_state_timers_running
= 1;
1271 * Finally, deal with the case where a group-and-source-specific
1272 * query has been received, where a response to a previous g-s-r
1273 * query exists, or none exists.
1274 * In this case, we need to parse the source-list which the Querier
1275 * has provided us with and check if we have any source list filter
1276 * entries at T1 for these sources. If we do not, there is no need
1277 * schedule a report and the query may be dropped.
1278 * If we do, we must record them and schedule a current-state
1279 * report for those sources.
1280 * FIXME: Handling source lists larger than 1 mbuf requires that
1281 * we pass the mbuf chain pointer down to this function, and use
1282 * m_getptr() to walk the chain.
1284 if (inm
->inm_nsrc
> 0) {
1285 const struct in_addr
*ap
;
1288 ap
= (const struct in_addr
*)(igmpv3
+ 1);
1290 for (i
= 0; i
< nsrc
; i
++, ap
++) {
1291 retval
= inm_record_source(inm
, ap
->s_addr
);
1294 nrecorded
+= retval
;
1296 if (nrecorded
> 0) {
1297 IGMP_PRINTF(("%s: schedule response to SG query\n",
1299 inm
->inm_state
= IGMP_SG_QUERY_PENDING_MEMBER
;
1300 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1301 current_state_timers_running
= 1;
1309 * Process a received IGMPv1 host membership report.
1311 * NOTE: 0.0.0.0 workaround breaks const correctness.
1314 igmp_input_v1_report(struct ifnet
*ifp
, /*const*/ struct ip
*ip
,
1315 /*const*/ struct igmp
*igmp
)
1317 struct in_ifaddr
*ia
;
1318 struct in_multi
*inm
;
1320 IGMPSTAT_INC(igps_rcv_reports
);
1321 OIGMPSTAT_INC(igps_rcv_reports
);
1323 if (ifp
->if_flags
& IFF_LOOPBACK
)
1326 if (!IN_MULTICAST(ntohl(igmp
->igmp_group
.s_addr
) ||
1327 !in_hosteq(igmp
->igmp_group
, ip
->ip_dst
))) {
1328 IGMPSTAT_INC(igps_rcv_badreports
);
1329 OIGMPSTAT_INC(igps_rcv_badreports
);
1334 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1335 * Booting clients may use the source address 0.0.0.0. Some
1336 * IGMP daemons may not know how to use IP_RECVIF to determine
1337 * the interface upon which this message was received.
1338 * Replace 0.0.0.0 with the subnet address if told to do so.
1340 if (igmp_recvifkludge
&& in_nullhost(ip
->ip_src
)) {
1343 IFA_LOCK(&ia
->ia_ifa
);
1344 ip
->ip_src
.s_addr
= htonl(ia
->ia_subnet
);
1345 IFA_UNLOCK(&ia
->ia_ifa
);
1346 IFA_REMREF(&ia
->ia_ifa
);
1350 IGMP_PRINTF(("process v1 report %s on ifp %p(%s%d)\n",
1351 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
, ifp
->if_unit
));
1354 * IGMPv1 report suppression.
1355 * If we are a member of this group, and our membership should be
1356 * reported, stop our group timer and transition to the 'lazy' state.
1358 in_multihead_lock_shared();
1359 IN_LOOKUP_MULTI(&igmp
->igmp_group
, ifp
, inm
);
1360 in_multihead_lock_done();
1362 struct igmp_ifinfo
*igi
;
1367 VERIFY(igi
!= NULL
);
1369 IGMPSTAT_INC(igps_rcv_ourreports
);
1370 OIGMPSTAT_INC(igps_rcv_ourreports
);
1373 * If we are in IGMPv3 host mode, do not allow the
1374 * other host's IGMPv1 report to suppress our reports
1375 * unless explicitly configured to do so.
1378 if (igi
->igi_version
== IGMP_VERSION_3
) {
1379 if (igmp_legacysupp
)
1380 igmp_v3_suppress_group_record(inm
);
1383 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1387 INM_LOCK_ASSERT_HELD(inm
);
1390 switch (inm
->inm_state
) {
1391 case IGMP_NOT_MEMBER
:
1392 case IGMP_SILENT_MEMBER
:
1394 case IGMP_IDLE_MEMBER
:
1395 case IGMP_LAZY_MEMBER
:
1396 case IGMP_AWAKENING_MEMBER
:
1397 IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1398 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
1400 case IGMP_SLEEPING_MEMBER
:
1401 inm
->inm_state
= IGMP_SLEEPING_MEMBER
;
1403 case IGMP_REPORTING_MEMBER
:
1404 IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1405 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
1407 if (igi
->igi_version
== IGMP_VERSION_1
)
1408 inm
->inm_state
= IGMP_LAZY_MEMBER
;
1409 else if (igi
->igi_version
== IGMP_VERSION_2
)
1410 inm
->inm_state
= IGMP_SLEEPING_MEMBER
;
1412 case IGMP_G_QUERY_PENDING_MEMBER
:
1413 case IGMP_SG_QUERY_PENDING_MEMBER
:
1414 case IGMP_LEAVING_MEMBER
:
1419 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1426 * Process a received IGMPv2 host membership report.
1428 * NOTE: 0.0.0.0 workaround breaks const correctness.
1431 igmp_input_v2_report(struct ifnet
*ifp
, /*const*/ struct ip
*ip
,
1432 /*const*/ struct igmp
*igmp
)
1434 struct in_ifaddr
*ia
;
1435 struct in_multi
*inm
;
1438 * Make sure we don't hear our own membership report. Fast
1439 * leave requires knowing that we are the only member of a
1444 IFA_LOCK(&ia
->ia_ifa
);
1445 if (in_hosteq(ip
->ip_src
, IA_SIN(ia
)->sin_addr
)) {
1446 IFA_UNLOCK(&ia
->ia_ifa
);
1447 IFA_REMREF(&ia
->ia_ifa
);
1450 IFA_UNLOCK(&ia
->ia_ifa
);
1453 IGMPSTAT_INC(igps_rcv_reports
);
1454 OIGMPSTAT_INC(igps_rcv_reports
);
1456 if (ifp
->if_flags
& IFF_LOOPBACK
) {
1458 IFA_REMREF(&ia
->ia_ifa
);
1462 if (!IN_MULTICAST(ntohl(igmp
->igmp_group
.s_addr
)) ||
1463 !in_hosteq(igmp
->igmp_group
, ip
->ip_dst
)) {
1465 IFA_REMREF(&ia
->ia_ifa
);
1466 IGMPSTAT_INC(igps_rcv_badreports
);
1467 OIGMPSTAT_INC(igps_rcv_badreports
);
1472 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1473 * Booting clients may use the source address 0.0.0.0. Some
1474 * IGMP daemons may not know how to use IP_RECVIF to determine
1475 * the interface upon which this message was received.
1476 * Replace 0.0.0.0 with the subnet address if told to do so.
1478 if (igmp_recvifkludge
&& in_nullhost(ip
->ip_src
)) {
1480 IFA_LOCK(&ia
->ia_ifa
);
1481 ip
->ip_src
.s_addr
= htonl(ia
->ia_subnet
);
1482 IFA_UNLOCK(&ia
->ia_ifa
);
1486 IFA_REMREF(&ia
->ia_ifa
);
1488 IGMP_PRINTF(("process v2 report %s on ifp %p(%s%d)\n",
1489 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
, ifp
->if_unit
));
1492 * IGMPv2 report suppression.
1493 * If we are a member of this group, and our membership should be
1494 * reported, and our group timer is pending or about to be reset,
1495 * stop our group timer by transitioning to the 'lazy' state.
1497 in_multihead_lock_shared();
1498 IN_LOOKUP_MULTI(&igmp
->igmp_group
, ifp
, inm
);
1499 in_multihead_lock_done();
1501 struct igmp_ifinfo
*igi
;
1505 VERIFY(igi
!= NULL
);
1507 IGMPSTAT_INC(igps_rcv_ourreports
);
1508 OIGMPSTAT_INC(igps_rcv_ourreports
);
1511 * If we are in IGMPv3 host mode, do not allow the
1512 * other host's IGMPv1 report to suppress our reports
1513 * unless explicitly configured to do so.
1516 if (igi
->igi_version
== IGMP_VERSION_3
) {
1517 if (igmp_legacysupp
)
1518 igmp_v3_suppress_group_record(inm
);
1527 switch (inm
->inm_state
) {
1528 case IGMP_NOT_MEMBER
:
1529 case IGMP_SILENT_MEMBER
:
1530 case IGMP_SLEEPING_MEMBER
:
1532 case IGMP_REPORTING_MEMBER
:
1533 case IGMP_IDLE_MEMBER
:
1534 case IGMP_AWAKENING_MEMBER
:
1535 IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1536 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
1538 case IGMP_LAZY_MEMBER
:
1539 inm
->inm_state
= IGMP_LAZY_MEMBER
;
1541 case IGMP_G_QUERY_PENDING_MEMBER
:
1542 case IGMP_SG_QUERY_PENDING_MEMBER
:
1543 case IGMP_LEAVING_MEMBER
:
1555 igmp_input(struct mbuf
*m
, int off
)
1565 IGMP_PRINTF(("%s: called w/mbuf (%p,%d)\n", __func__
, m
, off
));
1567 ifp
= m
->m_pkthdr
.rcvif
;
1569 IGMPSTAT_INC(igps_rcv_total
);
1570 OIGMPSTAT_INC(igps_rcv_total
);
1572 /* Expect 32-bit aligned data pointer on strict-align platforms */
1573 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
1575 ip
= mtod(m
, struct ip
*);
1578 /* By now, ip_len no longer contains the length of IP header */
1579 igmplen
= ip
->ip_len
;
1584 if (igmplen
< IGMP_MINLEN
) {
1585 IGMPSTAT_INC(igps_rcv_tooshort
);
1586 OIGMPSTAT_INC(igps_rcv_tooshort
);
1592 * Always pullup to the minimum size for v1/v2 or v3
1593 * to amortize calls to m_pulldown().
1595 if (igmplen
>= IGMP_V3_QUERY_MINLEN
)
1596 minlen
= IGMP_V3_QUERY_MINLEN
;
1598 minlen
= IGMP_MINLEN
;
1600 /* A bit more expensive than M_STRUCT_GET, but ensures alignment */
1601 M_STRUCT_GET0(igmp
, struct igmp
*, m
, off
, minlen
);
1603 IGMPSTAT_INC(igps_rcv_tooshort
);
1604 OIGMPSTAT_INC(igps_rcv_tooshort
);
1607 /* N.B.: we assume the packet was correctly aligned in ip_input. */
1610 * Validate checksum.
1612 m
->m_data
+= iphlen
;
1614 if (in_cksum(m
, igmplen
)) {
1615 IGMPSTAT_INC(igps_rcv_badsum
);
1616 OIGMPSTAT_INC(igps_rcv_badsum
);
1620 m
->m_data
-= iphlen
;
1624 * IGMP control traffic is link-scope, and must have a TTL of 1.
1625 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1626 * probe packets may come from beyond the LAN.
1628 if (igmp
->igmp_type
!= IGMP_DVMRP
&& ip
->ip_ttl
!= 1) {
1629 IGMPSTAT_INC(igps_rcv_badttl
);
1634 switch (igmp
->igmp_type
) {
1635 case IGMP_HOST_MEMBERSHIP_QUERY
:
1636 if (igmplen
== IGMP_MINLEN
) {
1637 if (igmp
->igmp_code
== 0)
1638 queryver
= IGMP_VERSION_1
;
1640 queryver
= IGMP_VERSION_2
;
1641 } else if (igmplen
>= IGMP_V3_QUERY_MINLEN
) {
1642 queryver
= IGMP_VERSION_3
;
1644 IGMPSTAT_INC(igps_rcv_tooshort
);
1645 OIGMPSTAT_INC(igps_rcv_tooshort
);
1650 OIGMPSTAT_INC(igps_rcv_queries
);
1653 case IGMP_VERSION_1
:
1654 IGMPSTAT_INC(igps_rcv_v1v2_queries
);
1657 if (igmp_input_v1_query(ifp
, ip
, igmp
) != 0) {
1663 case IGMP_VERSION_2
:
1664 IGMPSTAT_INC(igps_rcv_v1v2_queries
);
1667 if (igmp_input_v2_query(ifp
, ip
, igmp
) != 0) {
1673 case IGMP_VERSION_3
: {
1674 struct igmpv3
*igmpv3
;
1679 IGMPSTAT_INC(igps_rcv_v3_queries
);
1680 igmpv3
= (struct igmpv3
*)igmp
;
1682 * Validate length based on source count.
1684 nsrc
= ntohs(igmpv3
->igmp_numsrc
);
1685 srclen
= sizeof(struct in_addr
) * nsrc
;
1686 if (igmplen
< (IGMP_V3_QUERY_MINLEN
+ srclen
)) {
1687 IGMPSTAT_INC(igps_rcv_tooshort
);
1688 OIGMPSTAT_INC(igps_rcv_tooshort
);
1692 igmpv3len
= IGMP_V3_QUERY_MINLEN
+ srclen
;
1694 * A bit more expensive than M_STRUCT_GET,
1695 * but ensures alignment.
1697 M_STRUCT_GET0(igmpv3
, struct igmpv3
*, m
,
1699 if (igmpv3
== NULL
) {
1700 IGMPSTAT_INC(igps_rcv_tooshort
);
1701 OIGMPSTAT_INC(igps_rcv_tooshort
);
1705 * N.B.: we assume the packet was correctly
1706 * aligned in ip_input.
1708 if (igmp_input_v3_query(ifp
, ip
, igmpv3
) != 0) {
1717 case IGMP_v1_HOST_MEMBERSHIP_REPORT
:
1720 if (igmp_input_v1_report(ifp
, ip
, igmp
) != 0) {
1726 case IGMP_v2_HOST_MEMBERSHIP_REPORT
:
1730 if (!ip_checkrouteralert(m
))
1731 IGMPSTAT_INC(igps_rcv_nora
);
1733 if (igmp_input_v2_report(ifp
, ip
, igmp
) != 0) {
1739 case IGMP_v3_HOST_MEMBERSHIP_REPORT
:
1741 * Hosts do not need to process IGMPv3 membership reports,
1742 * as report suppression is no longer required.
1745 if (!ip_checkrouteralert(m
))
1746 IGMPSTAT_INC(igps_rcv_nora
);
1754 lck_mtx_assert(&igmp_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
1756 * Pass all valid IGMP packets up to any process(es) listening on a
1764 * IGMP slowtimo handler.
1765 * Combiles both the slow and fast timer into one. We loose some responsivness but
1766 * allows the system to avoid having a pr_fasttimo, thus allowing for power savings.
1772 struct ifqueue scq
; /* State-change packets */
1773 struct ifqueue qrq
; /* Query response packets */
1775 struct igmp_ifinfo
*igi
;
1776 struct in_multi
*inm
;
1777 int loop
= 0, uri_fasthz
= 0;
1778 SLIST_HEAD(, in_multi
) inm_dthead
;
1780 SLIST_INIT(&inm_dthead
);
1782 lck_mtx_lock(&igmp_mtx
);
1784 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
1786 igmp_v1v2_process_querier_timers(igi
);
1791 * NOTE: previously handled by fasttimo
1793 * Quick check to see if any work needs to be done, in order to
1794 * minimize the overhead of fasttimo processing.
1796 if (!current_state_timers_running
&&
1797 !interface_timers_running
&&
1798 !state_change_timers_running
) {
1799 lck_mtx_unlock(&igmp_mtx
);
1804 * IGMPv3 General Query response timer processing.
1806 if (interface_timers_running
) {
1807 interface_timers_running
= 0;
1808 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
1810 if (igi
->igi_v3_timer
== 0) {
1812 } else if (--igi
->igi_v3_timer
== 0) {
1813 igmp_v3_dispatch_general_query(igi
);
1815 interface_timers_running
= 1;
1821 if (!current_state_timers_running
&&
1822 !state_change_timers_running
)
1825 current_state_timers_running
= 0;
1826 state_change_timers_running
= 0;
1828 memset(&qrq
, 0, sizeof(struct ifqueue
));
1829 qrq
.ifq_maxlen
= IGMP_MAX_G_GS_PACKETS
;
1831 memset(&scq
, 0, sizeof(struct ifqueue
));
1832 scq
.ifq_maxlen
= IGMP_MAX_STATE_CHANGE_PACKETS
;
1835 * IGMPv1/v2/v3 host report and state-change timer processing.
1836 * Note: Processing a v3 group timer may remove a node.
1838 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
1839 struct in_multistep step
;
1843 loop
= (igi
->igi_flags
& IGIF_LOOPBACK
) ? 1 : 0;
1844 uri_fasthz
= IGMP_RANDOM_DELAY(igi
->igi_uri
* PR_SLOWHZ
);
1847 in_multihead_lock_shared();
1848 IN_FIRST_MULTI(step
, inm
);
1849 while (inm
!= NULL
) {
1851 if (inm
->inm_ifp
!= ifp
)
1855 switch (igi
->igi_version
) {
1856 case IGMP_VERSION_1
:
1857 case IGMP_VERSION_2
:
1858 igmp_v1v2_process_group_timer(inm
,
1861 case IGMP_VERSION_3
:
1862 igmp_v3_process_group_timers(igi
, &qrq
,
1863 &scq
, inm
, uri_fasthz
);
1869 IN_NEXT_MULTI(step
, inm
);
1871 in_multihead_lock_done();
1874 if (igi
->igi_version
== IGMP_VERSION_1
||
1875 igi
->igi_version
== IGMP_VERSION_2
) {
1876 igmp_dispatch_queue(igi
, &igi
->igi_v2q
, 0, loop
, ifp
);
1877 } else if (igi
->igi_version
== IGMP_VERSION_3
) {
1879 igmp_dispatch_queue(NULL
, &qrq
, 0, loop
, ifp
);
1880 igmp_dispatch_queue(NULL
, &scq
, 0, loop
, ifp
);
1881 VERIFY(qrq
.ifq_len
== 0);
1882 VERIFY(scq
.ifq_len
== 0);
1886 * In case there are still any pending membership reports
1887 * which didn't get drained at version change time.
1889 IF_DRAIN(&igi
->igi_v2q
);
1891 * Release all deferred inm records, and drain any locally
1892 * enqueued packets; do it even if the current IGMP version
1893 * for the link is no longer IGMPv3, in order to handle the
1894 * version change case.
1896 igmp_flush_relq(igi
, (struct igmp_inm_relhead
*)&inm_dthead
);
1897 VERIFY(SLIST_EMPTY(&igi
->igi_relinmhead
));
1905 lck_mtx_unlock(&igmp_mtx
);
1907 /* Now that we're dropped all locks, release detached records */
1908 IGMP_REMOVE_DETACHED_INM(&inm_dthead
);
1912 * Free the in_multi reference(s) for this IGMP lifecycle.
1914 * Caller must be holding igi_lock.
1917 igmp_flush_relq(struct igmp_ifinfo
*igi
, struct igmp_inm_relhead
*inm_dthead
)
1919 struct in_multi
*inm
;
1922 IGI_LOCK_ASSERT_HELD(igi
);
1923 inm
= SLIST_FIRST(&igi
->igi_relinmhead
);
1927 SLIST_REMOVE_HEAD(&igi
->igi_relinmhead
, inm_nrele
);
1930 in_multihead_lock_exclusive();
1932 VERIFY(inm
->inm_nrelecnt
!= 0);
1933 inm
->inm_nrelecnt
--;
1934 lastref
= in_multi_detach(inm
);
1935 VERIFY(!lastref
|| (!(inm
->inm_debug
& IFD_ATTACHED
) &&
1936 inm
->inm_reqcnt
== 0));
1938 in_multihead_lock_done();
1939 /* from igi_relinmhead */
1941 /* from in_multihead list */
1944 * Defer releasing our final reference, as we
1945 * are holding the IGMP lock at this point, and
1946 * we could end up with locking issues later on
1947 * (while issuing SIOCDELMULTI) when this is the
1948 * final reference count. Let the caller do it
1951 IGMP_ADD_DETACHED_INM(inm_dthead
, inm
);
1959 * Update host report group timer for IGMPv1/v2.
1960 * Will update the global pending timer flags.
1963 igmp_v1v2_process_group_timer(struct in_multi
*inm
, const int igmp_version
)
1965 int report_timer_expired
;
1967 INM_LOCK_ASSERT_HELD(inm
);
1968 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
1970 if (inm
->inm_timer
== 0) {
1971 report_timer_expired
= 0;
1972 } else if (--inm
->inm_timer
== 0) {
1973 report_timer_expired
= 1;
1975 current_state_timers_running
= 1;
1979 switch (inm
->inm_state
) {
1980 case IGMP_NOT_MEMBER
:
1981 case IGMP_SILENT_MEMBER
:
1982 case IGMP_IDLE_MEMBER
:
1983 case IGMP_LAZY_MEMBER
:
1984 case IGMP_SLEEPING_MEMBER
:
1985 case IGMP_AWAKENING_MEMBER
:
1987 case IGMP_REPORTING_MEMBER
:
1988 if (report_timer_expired
) {
1989 inm
->inm_state
= IGMP_IDLE_MEMBER
;
1990 (void) igmp_v1v2_queue_report(inm
,
1991 (igmp_version
== IGMP_VERSION_2
) ?
1992 IGMP_v2_HOST_MEMBERSHIP_REPORT
:
1993 IGMP_v1_HOST_MEMBERSHIP_REPORT
);
1994 INM_LOCK_ASSERT_HELD(inm
);
1995 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
1998 case IGMP_G_QUERY_PENDING_MEMBER
:
1999 case IGMP_SG_QUERY_PENDING_MEMBER
:
2000 case IGMP_LEAVING_MEMBER
:
2006 * Update a group's timers for IGMPv3.
2007 * Will update the global pending timer flags.
2008 * Note: Unlocked read from igi.
2011 igmp_v3_process_group_timers(struct igmp_ifinfo
*igi
,
2012 struct ifqueue
*qrq
, struct ifqueue
*scq
,
2013 struct in_multi
*inm
, const int uri_fasthz
)
2015 int query_response_timer_expired
;
2016 int state_change_retransmit_timer_expired
;
2018 INM_LOCK_ASSERT_HELD(inm
);
2019 IGI_LOCK_ASSERT_HELD(igi
);
2020 VERIFY(igi
== inm
->inm_igi
);
2022 query_response_timer_expired
= 0;
2023 state_change_retransmit_timer_expired
= 0;
2026 * During a transition from v1/v2 compatibility mode back to v3,
2027 * a group record in REPORTING state may still have its group
2028 * timer active. This is a no-op in this function; it is easier
2029 * to deal with it here than to complicate the slow-timeout path.
2031 if (inm
->inm_timer
== 0) {
2032 query_response_timer_expired
= 0;
2033 } else if (--inm
->inm_timer
== 0) {
2034 query_response_timer_expired
= 1;
2036 current_state_timers_running
= 1;
2039 if (inm
->inm_sctimer
== 0) {
2040 state_change_retransmit_timer_expired
= 0;
2041 } else if (--inm
->inm_sctimer
== 0) {
2042 state_change_retransmit_timer_expired
= 1;
2044 state_change_timers_running
= 1;
2047 /* We are in fasttimo, so be quick about it. */
2048 if (!state_change_retransmit_timer_expired
&&
2049 !query_response_timer_expired
)
2052 switch (inm
->inm_state
) {
2053 case IGMP_NOT_MEMBER
:
2054 case IGMP_SILENT_MEMBER
:
2055 case IGMP_SLEEPING_MEMBER
:
2056 case IGMP_LAZY_MEMBER
:
2057 case IGMP_AWAKENING_MEMBER
:
2058 case IGMP_IDLE_MEMBER
:
2060 case IGMP_G_QUERY_PENDING_MEMBER
:
2061 case IGMP_SG_QUERY_PENDING_MEMBER
:
2063 * Respond to a previously pending Group-Specific
2064 * or Group-and-Source-Specific query by enqueueing
2065 * the appropriate Current-State report for
2066 * immediate transmission.
2068 if (query_response_timer_expired
) {
2071 retval
= igmp_v3_enqueue_group_record(qrq
, inm
, 0, 1,
2072 (inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
));
2073 IGMP_PRINTF(("%s: enqueue record = %d\n",
2075 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2076 /* XXX Clear recorded sources for next time. */
2077 inm_clear_recorded(inm
);
2080 case IGMP_REPORTING_MEMBER
:
2081 case IGMP_LEAVING_MEMBER
:
2082 if (state_change_retransmit_timer_expired
) {
2084 * State-change retransmission timer fired.
2085 * If there are any further pending retransmissions,
2086 * set the global pending state-change flag, and
2089 if (--inm
->inm_scrv
> 0) {
2090 inm
->inm_sctimer
= uri_fasthz
;
2091 state_change_timers_running
= 1;
2094 * Retransmit the previously computed state-change
2095 * report. If there are no further pending
2096 * retransmissions, the mbuf queue will be consumed.
2097 * Update T0 state to T1 as we have now sent
2100 (void) igmp_v3_merge_state_changes(inm
, scq
);
2103 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
,
2104 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2105 inm
->inm_ifp
->if_unit
));
2108 * If we are leaving the group for good, make sure
2109 * we release IGMP's reference to it.
2110 * This release must be deferred using a SLIST,
2111 * as we are called from a loop which traverses
2112 * the in_multihead list.
2114 if (inm
->inm_state
== IGMP_LEAVING_MEMBER
&&
2115 inm
->inm_scrv
== 0) {
2116 inm
->inm_state
= IGMP_NOT_MEMBER
;
2118 * A reference has already been held in
2119 * igmp_final_leave() for this inm, so
2120 * no need to hold another one. We also
2121 * bumped up its request count then, so
2122 * that it stays in in_multihead. Both
2123 * of them will be released when it is
2124 * dequeued later on.
2126 VERIFY(inm
->inm_nrelecnt
!= 0);
2127 SLIST_INSERT_HEAD(&igi
->igi_relinmhead
,
2136 * Suppress a group's pending response to a group or source/group query.
2138 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
2139 * Do NOT update ST1/ST0 as this operation merely suppresses
2140 * the currently pending group record.
2141 * Do NOT suppress the response to a general query. It is possible but
2142 * it would require adding another state or flag.
2145 igmp_v3_suppress_group_record(struct in_multi
*inm
)
2148 INM_LOCK_ASSERT_HELD(inm
);
2149 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
2151 VERIFY(inm
->inm_igi
->igi_version
== IGMP_VERSION_3
);
2153 if (inm
->inm_state
!= IGMP_G_QUERY_PENDING_MEMBER
||
2154 inm
->inm_state
!= IGMP_SG_QUERY_PENDING_MEMBER
)
2157 if (inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
)
2158 inm_clear_recorded(inm
);
2161 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2165 * Switch to a different IGMP version on the given interface,
2166 * as per Section 7.2.1.
2169 igmp_set_version(struct igmp_ifinfo
*igi
, const int igmp_version
)
2171 int old_version_timer
;
2173 IGI_LOCK_ASSERT_HELD(igi
);
2175 IGMP_PRINTF(("%s: switching to v%d on ifp %p(%s%d)\n", __func__
,
2176 igmp_version
, igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2177 igi
->igi_ifp
->if_unit
));
2179 if (igmp_version
== IGMP_VERSION_1
|| igmp_version
== IGMP_VERSION_2
) {
2181 * Compute the "Older Version Querier Present" timer as per
2184 old_version_timer
= igi
->igi_rv
* igi
->igi_qi
+ igi
->igi_qri
;
2185 old_version_timer
*= PR_SLOWHZ
;
2187 if (igmp_version
== IGMP_VERSION_1
) {
2188 igi
->igi_v1_timer
= old_version_timer
;
2189 igi
->igi_v2_timer
= 0;
2190 } else if (igmp_version
== IGMP_VERSION_2
) {
2191 igi
->igi_v1_timer
= 0;
2192 igi
->igi_v2_timer
= old_version_timer
;
2196 if (igi
->igi_v1_timer
== 0 && igi
->igi_v2_timer
> 0) {
2197 if (igi
->igi_version
!= IGMP_VERSION_2
) {
2198 igi
->igi_version
= IGMP_VERSION_2
;
2199 igmp_v3_cancel_link_timers(igi
);
2201 } else if (igi
->igi_v1_timer
> 0) {
2202 if (igi
->igi_version
!= IGMP_VERSION_1
) {
2203 igi
->igi_version
= IGMP_VERSION_1
;
2204 igmp_v3_cancel_link_timers(igi
);
2208 IGI_LOCK_ASSERT_HELD(igi
);
2212 * Cancel pending IGMPv3 timers for the given link and all groups
2213 * joined on it; state-change, general-query, and group-query timers.
2215 * Only ever called on a transition from v3 to Compatibility mode. Kill
2216 * the timers stone dead (this may be expensive for large N groups), they
2217 * will be restarted if Compatibility Mode deems that they must be due to
2221 igmp_v3_cancel_link_timers(struct igmp_ifinfo
*igi
)
2224 struct in_multi
*inm
;
2225 struct in_multistep step
;
2227 IGI_LOCK_ASSERT_HELD(igi
);
2229 IGMP_PRINTF(("%s: cancel v3 timers on ifp %p(%s%d)\n", __func__
,
2230 igi
->igi_ifp
, igi
->igi_ifp
->if_name
, igi
->igi_ifp
->if_unit
));
2233 * Stop the v3 General Query Response on this link stone dead.
2234 * If fasttimo is woken up due to interface_timers_running,
2235 * the flag will be cleared if there are no pending link timers.
2237 igi
->igi_v3_timer
= 0;
2240 * Now clear the current-state and state-change report timers
2241 * for all memberships scoped to this link.
2246 in_multihead_lock_shared();
2247 IN_FIRST_MULTI(step
, inm
);
2248 while (inm
!= NULL
) {
2250 if (inm
->inm_ifp
!= ifp
)
2253 switch (inm
->inm_state
) {
2254 case IGMP_NOT_MEMBER
:
2255 case IGMP_SILENT_MEMBER
:
2256 case IGMP_IDLE_MEMBER
:
2257 case IGMP_LAZY_MEMBER
:
2258 case IGMP_SLEEPING_MEMBER
:
2259 case IGMP_AWAKENING_MEMBER
:
2261 * These states are either not relevant in v3 mode,
2262 * or are unreported. Do nothing.
2265 case IGMP_LEAVING_MEMBER
:
2267 * If we are leaving the group and switching to
2268 * compatibility mode, we need to release the final
2269 * reference held for issuing the INCLUDE {}, and
2270 * transition to REPORTING to ensure the host leave
2271 * message is sent upstream to the old querier --
2272 * transition to NOT would lose the leave and race.
2273 * During igmp_final_leave(), we bumped up both the
2274 * request and reference counts. Since we cannot
2275 * call in_multi_detach() here, defer this task to
2276 * the timer routine.
2278 VERIFY(inm
->inm_nrelecnt
!= 0);
2280 SLIST_INSERT_HEAD(&igi
->igi_relinmhead
, inm
, inm_nrele
);
2283 case IGMP_G_QUERY_PENDING_MEMBER
:
2284 case IGMP_SG_QUERY_PENDING_MEMBER
:
2285 inm_clear_recorded(inm
);
2287 case IGMP_REPORTING_MEMBER
:
2288 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2292 * Always clear state-change and group report timers.
2293 * Free any pending IGMPv3 state-change records.
2295 inm
->inm_sctimer
= 0;
2297 IF_DRAIN(&inm
->inm_scq
);
2300 IN_NEXT_MULTI(step
, inm
);
2302 in_multihead_lock_done();
2308 * Update the Older Version Querier Present timers for a link.
2309 * See Section 7.2.1 of RFC 3376.
2312 igmp_v1v2_process_querier_timers(struct igmp_ifinfo
*igi
)
2314 IGI_LOCK_ASSERT_HELD(igi
);
2316 if (igi
->igi_v1_timer
== 0 && igi
->igi_v2_timer
== 0) {
2318 * IGMPv1 and IGMPv2 Querier Present timers expired.
2322 if (igi
->igi_version
!= IGMP_VERSION_3
) {
2323 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2324 __func__
, igi
->igi_version
, IGMP_VERSION_3
,
2325 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2326 igi
->igi_ifp
->if_unit
));
2327 igi
->igi_version
= IGMP_VERSION_3
;
2328 IF_DRAIN(&igi
->igi_v2q
);
2330 } else if (igi
->igi_v1_timer
== 0 && igi
->igi_v2_timer
> 0) {
2332 * IGMPv1 Querier Present timer expired,
2333 * IGMPv2 Querier Present timer running.
2334 * If IGMPv2 was disabled since last timeout,
2336 * If IGMPv2 is enabled, revert to IGMPv2.
2338 if (!igmp_v2enable
) {
2339 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2340 __func__
, igi
->igi_version
, IGMP_VERSION_3
,
2341 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2342 igi
->igi_ifp
->if_unit
));
2343 igi
->igi_v2_timer
= 0;
2344 igi
->igi_version
= IGMP_VERSION_3
;
2345 IF_DRAIN(&igi
->igi_v2q
);
2347 --igi
->igi_v2_timer
;
2348 if (igi
->igi_version
!= IGMP_VERSION_2
) {
2349 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2350 __func__
, igi
->igi_version
, IGMP_VERSION_2
,
2351 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2352 igi
->igi_ifp
->if_unit
));
2353 igi
->igi_version
= IGMP_VERSION_2
;
2354 IF_DRAIN(&igi
->igi_gq
);
2357 } else if (igi
->igi_v1_timer
> 0) {
2359 * IGMPv1 Querier Present timer running.
2360 * Stop IGMPv2 timer if running.
2362 * If IGMPv1 was disabled since last timeout,
2364 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2366 if (!igmp_v1enable
) {
2367 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2368 __func__
, igi
->igi_version
, IGMP_VERSION_3
,
2369 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2370 igi
->igi_ifp
->if_unit
));
2371 igi
->igi_v1_timer
= 0;
2372 igi
->igi_version
= IGMP_VERSION_3
;
2373 IF_DRAIN(&igi
->igi_v2q
);
2375 --igi
->igi_v1_timer
;
2377 if (igi
->igi_v2_timer
> 0) {
2378 IGMP_PRINTF(("%s: cancel v2 timer on %p(%s%d)\n",
2379 __func__
, igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2380 igi
->igi_ifp
->if_unit
));
2381 igi
->igi_v2_timer
= 0;
2387 * Dispatch an IGMPv1/v2 host report or leave message.
2388 * These are always small enough to fit inside a single mbuf.
2391 igmp_v1v2_queue_report(struct in_multi
*inm
, const int type
)
2399 INM_LOCK_ASSERT_HELD(inm
);
2400 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
2404 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
2407 MH_ALIGN(m
, sizeof(struct ip
) + sizeof(struct igmp
));
2409 m
->m_pkthdr
.len
= sizeof(struct ip
) + sizeof(struct igmp
);
2411 m
->m_data
+= sizeof(struct ip
);
2412 m
->m_len
= sizeof(struct igmp
);
2414 igmp
= mtod(m
, struct igmp
*);
2415 igmp
->igmp_type
= type
;
2416 igmp
->igmp_code
= 0;
2417 igmp
->igmp_group
= inm
->inm_addr
;
2418 igmp
->igmp_cksum
= 0;
2419 igmp
->igmp_cksum
= in_cksum(m
, sizeof(struct igmp
));
2421 m
->m_data
-= sizeof(struct ip
);
2422 m
->m_len
+= sizeof(struct ip
);
2424 ip
= mtod(m
, struct ip
*);
2426 ip
->ip_len
= sizeof(struct ip
) + sizeof(struct igmp
);
2428 ip
->ip_p
= IPPROTO_IGMP
;
2429 ip
->ip_src
.s_addr
= INADDR_ANY
;
2431 if (type
== IGMP_HOST_LEAVE_MESSAGE
)
2432 ip
->ip_dst
.s_addr
= htonl(INADDR_ALLRTRS_GROUP
);
2434 ip
->ip_dst
= inm
->inm_addr
;
2436 m
->m_flags
|= M_IGMPV2
;
2437 if (inm
->inm_igi
->igi_flags
& IGIF_LOOPBACK
)
2438 m
->m_flags
|= M_IGMP_LOOP
;
2441 * Due to the fact that at this point we are possibly holding
2442 * in_multihead_lock in shared or exclusive mode, we can't call
2443 * igmp_sendpkt() here since that will eventually call ip_output(),
2444 * which will try to lock in_multihead_lock and cause a deadlock.
2445 * Instead we defer the work to the igmp_slowtimo() thread, thus
2446 * avoiding unlocking in_multihead_lock here.
2448 if (IF_QFULL(&inm
->inm_igi
->igi_v2q
)) {
2449 IGMP_PRINTF(("%s: v1/v2 outbound queue full\n", __func__
));
2453 IF_ENQUEUE(&inm
->inm_igi
->igi_v2q
, m
);
2459 * Process a state change from the upper layer for the given IPv4 group.
2461 * Each socket holds a reference on the in_multi in its own ip_moptions.
2462 * The socket layer will have made the necessary updates to the group
2463 * state, it is now up to IGMP to issue a state change report if there
2464 * has been any change between T0 (when the last state-change was issued)
2467 * We use the IGMPv3 state machine at group level. The IGMP module
2468 * however makes the decision as to which IGMP protocol version to speak.
2469 * A state change *from* INCLUDE {} always means an initial join.
2470 * A state change *to* INCLUDE {} always means a final leave.
2472 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2473 * save ourselves a bunch of work; any exclusive mode groups need not
2474 * compute source filter lists.
2477 igmp_change_state(struct in_multi
*inm
)
2479 struct igmp_ifinfo
*igi
;
2483 INM_LOCK_ASSERT_HELD(inm
);
2484 VERIFY(inm
->inm_igi
!= NULL
);
2485 IGI_LOCK_ASSERT_NOTHELD(inm
->inm_igi
);
2488 * Try to detect if the upper layer just asked us to change state
2489 * for an interface which has now gone away.
2491 VERIFY(inm
->inm_ifma
!= NULL
);
2492 ifp
= inm
->inm_ifma
->ifma_ifp
;
2494 * Sanity check that netinet's notion of ifp is the same as net's.
2496 VERIFY(inm
->inm_ifp
== ifp
);
2498 igi
= IGMP_IFINFO(ifp
);
2499 VERIFY(igi
!= NULL
);
2502 * If we detect a state transition to or from MCAST_UNDEFINED
2503 * for this group, then we are starting or finishing an IGMP
2504 * life cycle for this group.
2506 if (inm
->inm_st
[1].iss_fmode
!= inm
->inm_st
[0].iss_fmode
) {
2507 IGMP_PRINTF(("%s: inm transition %d -> %d\n", __func__
,
2508 inm
->inm_st
[0].iss_fmode
, inm
->inm_st
[1].iss_fmode
));
2509 if (inm
->inm_st
[0].iss_fmode
== MCAST_UNDEFINED
) {
2510 IGMP_PRINTF(("%s: initial join\n", __func__
));
2511 error
= igmp_initial_join(inm
, igi
);
2513 } else if (inm
->inm_st
[1].iss_fmode
== MCAST_UNDEFINED
) {
2514 IGMP_PRINTF(("%s: final leave\n", __func__
));
2515 igmp_final_leave(inm
, igi
);
2519 IGMP_PRINTF(("%s: filter set change\n", __func__
));
2522 error
= igmp_handle_state_change(inm
, igi
);
2528 * Perform the initial join for an IGMP group.
2530 * When joining a group:
2531 * If the group should have its IGMP traffic suppressed, do nothing.
2532 * IGMPv1 starts sending IGMPv1 host membership reports.
2533 * IGMPv2 starts sending IGMPv2 host membership reports.
2534 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2535 * initial state of the membership.
2538 igmp_initial_join(struct in_multi
*inm
, struct igmp_ifinfo
*igi
)
2541 struct ifqueue
*ifq
;
2542 int error
, retval
, syncstates
;
2544 INM_LOCK_ASSERT_HELD(inm
);
2545 IGI_LOCK_ASSERT_NOTHELD(igi
);
2547 IGMP_PRINTF(("%s: initial join %s on ifp %p(%s%d)\n",
2548 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
,
2549 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2557 VERIFY(igi
->igi_ifp
== ifp
);
2560 * Groups joined on loopback or marked as 'not reported',
2561 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2562 * are never reported in any IGMP protocol exchanges.
2563 * All other groups enter the appropriate IGMP state machine
2564 * for the version in use on this link.
2565 * A link marked as IGIF_SILENT causes IGMP to be completely
2566 * disabled for the link.
2568 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2569 (igi
->igi_flags
& IGIF_SILENT
) ||
2570 !igmp_isgroupreported(inm
->inm_addr
)) {
2571 IGMP_PRINTF(("%s: not kicking state machine for silent group\n",
2573 inm
->inm_state
= IGMP_SILENT_MEMBER
;
2577 * Deal with overlapping in_multi lifecycle.
2578 * If this group was LEAVING, then make sure
2579 * we drop the reference we picked up to keep the
2580 * group around for the final INCLUDE {} enqueue.
2581 * Since we cannot call in_multi_detach() here,
2582 * defer this task to the timer routine.
2584 if (igi
->igi_version
== IGMP_VERSION_3
&&
2585 inm
->inm_state
== IGMP_LEAVING_MEMBER
) {
2586 VERIFY(inm
->inm_nrelecnt
!= 0);
2587 SLIST_INSERT_HEAD(&igi
->igi_relinmhead
, inm
, inm_nrele
);
2590 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2592 switch (igi
->igi_version
) {
2593 case IGMP_VERSION_1
:
2594 case IGMP_VERSION_2
:
2595 inm
->inm_state
= IGMP_IDLE_MEMBER
;
2596 error
= igmp_v1v2_queue_report(inm
,
2597 (igi
->igi_version
== IGMP_VERSION_2
) ?
2598 IGMP_v2_HOST_MEMBERSHIP_REPORT
:
2599 IGMP_v1_HOST_MEMBERSHIP_REPORT
);
2601 INM_LOCK_ASSERT_HELD(inm
);
2602 IGI_LOCK_ASSERT_HELD(igi
);
2605 inm
->inm_timer
= IGMP_RANDOM_DELAY(
2606 IGMP_V1V2_MAX_RI
* PR_SLOWHZ
);
2607 current_state_timers_running
= 1;
2611 case IGMP_VERSION_3
:
2613 * Defer update of T0 to T1, until the first copy
2614 * of the state change has been transmitted.
2619 * Immediately enqueue a State-Change Report for
2620 * this interface, freeing any previous reports.
2621 * Don't kick the timers if there is nothing to do,
2622 * or if an error occurred.
2624 ifq
= &inm
->inm_scq
;
2626 retval
= igmp_v3_enqueue_group_record(ifq
, inm
, 1,
2628 IGMP_PRINTF(("%s: enqueue record = %d\n",
2631 error
= retval
* -1;
2636 * Schedule transmission of pending state-change
2637 * report up to RV times for this link. The timer
2638 * will fire at the next igmp_fasttimo (~200ms),
2639 * giving us an opportunity to merge the reports.
2641 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
2644 VERIFY(igi
->igi_rv
> 1);
2645 inm
->inm_scrv
= igi
->igi_rv
;
2647 inm
->inm_sctimer
= 1;
2648 state_change_timers_running
= 1;
2657 * Only update the T0 state if state change is atomic,
2658 * i.e. we don't need to wait for a timer to fire before we
2659 * can consider the state change to have been communicated.
2663 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
,
2664 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2665 inm
->inm_ifp
->if_unit
));
2672 * Issue an intermediate state change during the IGMP life-cycle.
2675 igmp_handle_state_change(struct in_multi
*inm
, struct igmp_ifinfo
*igi
)
2680 INM_LOCK_ASSERT_HELD(inm
);
2681 IGI_LOCK_ASSERT_NOTHELD(igi
);
2683 IGMP_PRINTF(("%s: state change for %s on ifp %p(%s%d)\n",
2684 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
,
2685 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2690 VERIFY(igi
->igi_ifp
== ifp
);
2692 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2693 (igi
->igi_flags
& IGIF_SILENT
) ||
2694 !igmp_isgroupreported(inm
->inm_addr
) ||
2695 (igi
->igi_version
!= IGMP_VERSION_3
)) {
2697 if (!igmp_isgroupreported(inm
->inm_addr
)) {
2698 IGMP_PRINTF(("%s: not kicking state "
2699 "machine for silent group\n", __func__
));
2701 IGMP_PRINTF(("%s: nothing to do\n", __func__
));
2703 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2704 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
));
2708 IF_DRAIN(&inm
->inm_scq
);
2710 retval
= igmp_v3_enqueue_group_record(&inm
->inm_scq
, inm
, 1, 0, 0);
2711 IGMP_PRINTF(("%s: enqueue record = %d\n", __func__
, retval
));
2717 * If record(s) were enqueued, start the state-change
2718 * report timer for this group.
2720 inm
->inm_scrv
= ((igi
->igi_flags
& IGIF_LOOPBACK
) ? 1 : igi
->igi_rv
);
2721 inm
->inm_sctimer
= 1;
2722 state_change_timers_running
= 1;
2729 * Perform the final leave for an IGMP group.
2731 * When leaving a group:
2732 * IGMPv1 does nothing.
2733 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2734 * IGMPv3 enqueues a state-change report containing a transition
2735 * to INCLUDE {} for immediate transmission.
2738 igmp_final_leave(struct in_multi
*inm
, struct igmp_ifinfo
*igi
)
2742 INM_LOCK_ASSERT_HELD(inm
);
2743 IGI_LOCK_ASSERT_NOTHELD(igi
);
2745 IGMP_PRINTF(("%s: final leave %s on ifp %p(%s%d)\n",
2746 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
,
2747 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2749 switch (inm
->inm_state
) {
2750 case IGMP_NOT_MEMBER
:
2751 case IGMP_SILENT_MEMBER
:
2752 case IGMP_LEAVING_MEMBER
:
2753 /* Already leaving or left; do nothing. */
2754 IGMP_PRINTF(("%s: not kicking state machine for silent group\n",
2757 case IGMP_REPORTING_MEMBER
:
2758 case IGMP_IDLE_MEMBER
:
2759 case IGMP_G_QUERY_PENDING_MEMBER
:
2760 case IGMP_SG_QUERY_PENDING_MEMBER
:
2762 if (igi
->igi_version
== IGMP_VERSION_2
) {
2763 if (inm
->inm_state
== IGMP_G_QUERY_PENDING_MEMBER
||
2764 inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
) {
2765 panic("%s: IGMPv3 state reached, not IGMPv3 "
2766 "mode\n", __func__
);
2769 igmp_v1v2_queue_report(inm
, IGMP_HOST_LEAVE_MESSAGE
);
2771 INM_LOCK_ASSERT_HELD(inm
);
2772 IGI_LOCK_ASSERT_HELD(igi
);
2774 inm
->inm_state
= IGMP_NOT_MEMBER
;
2775 } else if (igi
->igi_version
== IGMP_VERSION_3
) {
2777 * Stop group timer and all pending reports.
2778 * Immediately enqueue a state-change report
2779 * TO_IN {} to be sent on the next fast timeout,
2780 * giving us an opportunity to merge reports.
2782 IF_DRAIN(&inm
->inm_scq
);
2784 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
2787 inm
->inm_scrv
= igi
->igi_rv
;
2789 IGMP_PRINTF(("%s: Leaving %s/%s%d with %d "
2790 "pending retransmissions.\n", __func__
,
2791 inet_ntoa(inm
->inm_addr
),
2792 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
,
2794 if (inm
->inm_scrv
== 0) {
2795 inm
->inm_state
= IGMP_NOT_MEMBER
;
2796 inm
->inm_sctimer
= 0;
2800 * Stick around in the in_multihead list;
2801 * the final detach will be issued by
2802 * igmp_v3_process_group_timers() when
2803 * the retransmit timer expires.
2805 INM_ADDREF_LOCKED(inm
);
2806 VERIFY(inm
->inm_debug
& IFD_ATTACHED
);
2808 VERIFY(inm
->inm_reqcnt
>= 1);
2809 inm
->inm_nrelecnt
++;
2810 VERIFY(inm
->inm_nrelecnt
!= 0);
2812 retval
= igmp_v3_enqueue_group_record(
2813 &inm
->inm_scq
, inm
, 1, 0, 0);
2814 KASSERT(retval
!= 0,
2815 ("%s: enqueue record = %d\n", __func__
,
2818 inm
->inm_state
= IGMP_LEAVING_MEMBER
;
2819 inm
->inm_sctimer
= 1;
2820 state_change_timers_running
= 1;
2826 case IGMP_LAZY_MEMBER
:
2827 case IGMP_SLEEPING_MEMBER
:
2828 case IGMP_AWAKENING_MEMBER
:
2829 /* Our reports are suppressed; do nothing. */
2835 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
,
2836 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2837 inm
->inm_ifp
->if_unit
));
2838 inm
->inm_st
[1].iss_fmode
= MCAST_UNDEFINED
;
2839 IGMP_PRINTF(("%s: T1 now MCAST_UNDEFINED for %s/%s%d\n",
2840 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2841 inm
->inm_ifp
->if_unit
));
2846 * Enqueue an IGMPv3 group record to the given output queue.
2848 * XXX This function could do with having the allocation code
2849 * split out, and the multiple-tree-walks coalesced into a single
2850 * routine as has been done in igmp_v3_enqueue_filter_change().
2852 * If is_state_change is zero, a current-state record is appended.
2853 * If is_state_change is non-zero, a state-change report is appended.
2855 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2856 * If is_group_query is zero, and if there is a packet with free space
2857 * at the tail of the queue, it will be appended to providing there
2858 * is enough free space.
2859 * Otherwise a new mbuf packet chain is allocated.
2861 * If is_source_query is non-zero, each source is checked to see if
2862 * it was recorded for a Group-Source query, and will be omitted if
2863 * it is not both in-mode and recorded.
2865 * The function will attempt to allocate leading space in the packet
2866 * for the IP/IGMP header to be prepended without fragmenting the chain.
2868 * If successful the size of all data appended to the queue is returned,
2869 * otherwise an error code less than zero is returned, or zero if
2870 * no record(s) were appended.
2873 igmp_v3_enqueue_group_record(struct ifqueue
*ifq
, struct in_multi
*inm
,
2874 const int is_state_change
, const int is_group_query
,
2875 const int is_source_query
)
2877 struct igmp_grouprec ig
;
2878 struct igmp_grouprec
*pig
;
2880 struct ip_msource
*ims
, *nims
;
2881 struct mbuf
*m0
, *m
, *md
;
2882 int error
, is_filter_list_change
;
2883 int minrec0len
, m0srcs
, msrcs
, nbytes
, off
;
2884 int record_has_sources
;
2889 u_int16_t ig_numsrc
;
2891 INM_LOCK_ASSERT_HELD(inm
);
2892 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
2896 is_filter_list_change
= 0;
2903 record_has_sources
= 1;
2905 type
= IGMP_DO_NOTHING
;
2906 mode
= inm
->inm_st
[1].iss_fmode
;
2909 * If we did not transition out of ASM mode during t0->t1,
2910 * and there are no source nodes to process, we can skip
2911 * the generation of source records.
2913 if (inm
->inm_st
[0].iss_asm
> 0 && inm
->inm_st
[1].iss_asm
> 0 &&
2915 record_has_sources
= 0;
2917 if (is_state_change
) {
2919 * Queue a state change record.
2920 * If the mode did not change, and there are non-ASM
2921 * listeners or source filters present,
2922 * we potentially need to issue two records for the group.
2923 * If we are transitioning to MCAST_UNDEFINED, we need
2924 * not send any sources.
2925 * If there are ASM listeners, and there was no filter
2926 * mode transition of any kind, do nothing.
2928 if (mode
!= inm
->inm_st
[0].iss_fmode
) {
2929 if (mode
== MCAST_EXCLUDE
) {
2930 IGMP_PRINTF(("%s: change to EXCLUDE\n",
2932 type
= IGMP_CHANGE_TO_EXCLUDE_MODE
;
2934 IGMP_PRINTF(("%s: change to INCLUDE\n",
2936 type
= IGMP_CHANGE_TO_INCLUDE_MODE
;
2937 if (mode
== MCAST_UNDEFINED
)
2938 record_has_sources
= 0;
2941 if (record_has_sources
) {
2942 is_filter_list_change
= 1;
2944 type
= IGMP_DO_NOTHING
;
2949 * Queue a current state record.
2951 if (mode
== MCAST_EXCLUDE
) {
2952 type
= IGMP_MODE_IS_EXCLUDE
;
2953 } else if (mode
== MCAST_INCLUDE
) {
2954 type
= IGMP_MODE_IS_INCLUDE
;
2955 VERIFY(inm
->inm_st
[1].iss_asm
== 0);
2960 * Generate the filter list changes using a separate function.
2962 if (is_filter_list_change
)
2963 return (igmp_v3_enqueue_filter_change(ifq
, inm
));
2965 if (type
== IGMP_DO_NOTHING
) {
2966 IGMP_PRINTF(("%s: nothing to do for %s/%s%d\n",
2967 __func__
, inet_ntoa(inm
->inm_addr
),
2968 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2973 * If any sources are present, we must be able to fit at least
2974 * one in the trailing space of the tail packet's mbuf,
2977 minrec0len
= sizeof(struct igmp_grouprec
);
2978 if (record_has_sources
)
2979 minrec0len
+= sizeof(in_addr_t
);
2981 IGMP_PRINTF(("%s: queueing %s for %s/%s%d\n", __func__
,
2982 igmp_rec_type_to_str(type
), inet_ntoa(inm
->inm_addr
),
2983 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2986 * Check if we have a packet in the tail of the queue for this
2987 * group into which the first group record for this group will fit.
2988 * Otherwise allocate a new packet.
2989 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2990 * Note: Group records for G/GSR query responses MUST be sent
2991 * in their own packet.
2994 if (!is_group_query
&&
2996 (m0
->m_pkthdr
.vt_nrecs
+ 1 <= IGMP_V3_REPORT_MAXRECS
) &&
2997 (m0
->m_pkthdr
.len
+ minrec0len
) <
2998 (ifp
->if_mtu
- IGMP_LEADINGSPACE
)) {
2999 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
3000 sizeof(struct igmp_grouprec
)) / sizeof(in_addr_t
);
3002 IGMP_PRINTF(("%s: use existing packet\n", __func__
));
3004 if (IF_QFULL(ifq
)) {
3005 IGMP_PRINTF(("%s: outbound queue full\n", __func__
));
3009 m0srcs
= (ifp
->if_mtu
- IGMP_LEADINGSPACE
-
3010 sizeof(struct igmp_grouprec
)) / sizeof(in_addr_t
);
3011 if (!is_state_change
&& !is_group_query
) {
3012 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3014 m
->m_data
+= IGMP_LEADINGSPACE
;
3017 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3019 MH_ALIGN(m
, IGMP_LEADINGSPACE
);
3024 IGMP_PRINTF(("%s: allocated first packet\n", __func__
));
3028 * Append group record.
3029 * If we have sources, we don't know how many yet.
3034 ig
.ig_group
= inm
->inm_addr
;
3035 if (!m_append(m
, sizeof(struct igmp_grouprec
), (void *)&ig
)) {
3038 IGMP_PRINTF(("%s: m_append() failed.\n", __func__
));
3041 nbytes
+= sizeof(struct igmp_grouprec
);
3044 * Append as many sources as will fit in the first packet.
3045 * If we are appending to a new packet, the chain allocation
3046 * may potentially use clusters; use m_getptr() in this case.
3047 * If we are appending to an existing packet, we need to obtain
3048 * a pointer to the group record after m_append(), in case a new
3049 * mbuf was allocated.
3050 * Only append sources which are in-mode at t1. If we are
3051 * transitioning to MCAST_UNDEFINED state on the group, do not
3052 * include source entries.
3053 * Only report recorded sources in our filter set when responding
3054 * to a group-source query.
3056 if (record_has_sources
) {
3059 pig
= (struct igmp_grouprec
*)(void *)
3060 (mtod(md
, uint8_t *) + md
->m_len
- nbytes
);
3062 md
= m_getptr(m
, 0, &off
);
3063 pig
= (struct igmp_grouprec
*)(void *)
3064 (mtod(md
, uint8_t *) + off
);
3067 RB_FOREACH_SAFE(ims
, ip_msource_tree
, &inm
->inm_srcs
, nims
) {
3068 IGMP_PRINTF(("%s: visit node %s\n", __func__
,
3069 inet_ntoa_haddr(ims
->ims_haddr
)));
3070 now
= ims_get_mode(inm
, ims
, 1);
3071 IGMP_PRINTF(("%s: node is %d\n", __func__
, now
));
3072 if ((now
!= mode
) ||
3073 (now
== mode
&& mode
== MCAST_UNDEFINED
)) {
3074 IGMP_PRINTF(("%s: skip node\n", __func__
));
3077 if (is_source_query
&& ims
->ims_stp
== 0) {
3078 IGMP_PRINTF(("%s: skip unrecorded node\n",
3082 IGMP_PRINTF(("%s: append node\n", __func__
));
3083 naddr
= htonl(ims
->ims_haddr
);
3084 if (!m_append(m
, sizeof(in_addr_t
), (void *)&naddr
)) {
3087 IGMP_PRINTF(("%s: m_append() failed.\n",
3091 nbytes
+= sizeof(in_addr_t
);
3093 if (msrcs
== m0srcs
)
3096 IGMP_PRINTF(("%s: msrcs is %d this packet\n", __func__
,
3098 ig_numsrc
= htons(msrcs
);
3099 bcopy(&ig_numsrc
, &pig
->ig_numsrc
, sizeof (ig_numsrc
));
3100 nbytes
+= (msrcs
* sizeof(in_addr_t
));
3103 if (is_source_query
&& msrcs
== 0) {
3104 IGMP_PRINTF(("%s: no recorded sources to report\n", __func__
));
3111 * We are good to go with first packet.
3114 IGMP_PRINTF(("%s: enqueueing first packet\n", __func__
));
3115 m
->m_pkthdr
.vt_nrecs
= 1;
3116 m
->m_pkthdr
.rcvif
= ifp
;
3119 m
->m_pkthdr
.vt_nrecs
++;
3122 * No further work needed if no source list in packet(s).
3124 if (!record_has_sources
)
3128 * Whilst sources remain to be announced, we need to allocate
3129 * a new packet and fill out as many sources as will fit.
3130 * Always try for a cluster first.
3132 while (nims
!= NULL
) {
3133 if (IF_QFULL(ifq
)) {
3134 IGMP_PRINTF(("%s: outbound queue full\n", __func__
));
3137 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3139 m
->m_data
+= IGMP_LEADINGSPACE
;
3141 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3143 MH_ALIGN(m
, IGMP_LEADINGSPACE
);
3147 md
= m_getptr(m
, 0, &off
);
3148 pig
= (struct igmp_grouprec
*)(void *)
3149 (mtod(md
, uint8_t *) + off
);
3150 IGMP_PRINTF(("%s: allocated next packet\n", __func__
));
3152 if (!m_append(m
, sizeof(struct igmp_grouprec
), (void *)&ig
)) {
3155 IGMP_PRINTF(("%s: m_append() failed.\n", __func__
));
3158 m
->m_pkthdr
.vt_nrecs
= 1;
3159 nbytes
+= sizeof(struct igmp_grouprec
);
3161 m0srcs
= (ifp
->if_mtu
- IGMP_LEADINGSPACE
-
3162 sizeof(struct igmp_grouprec
)) / sizeof(in_addr_t
);
3165 RB_FOREACH_FROM(ims
, ip_msource_tree
, nims
) {
3166 IGMP_PRINTF(("%s: visit node %s\n", __func__
,
3167 inet_ntoa_haddr(ims
->ims_haddr
)));
3168 now
= ims_get_mode(inm
, ims
, 1);
3169 if ((now
!= mode
) ||
3170 (now
== mode
&& mode
== MCAST_UNDEFINED
)) {
3171 IGMP_PRINTF(("%s: skip node\n", __func__
));
3174 if (is_source_query
&& ims
->ims_stp
== 0) {
3175 IGMP_PRINTF(("%s: skip unrecorded node\n",
3179 IGMP_PRINTF(("%s: append node\n", __func__
));
3180 naddr
= htonl(ims
->ims_haddr
);
3181 if (!m_append(m
, sizeof(in_addr_t
), (void *)&naddr
)) {
3184 IGMP_PRINTF(("%s: m_append() failed.\n",
3189 if (msrcs
== m0srcs
)
3192 ig_numsrc
= htons(msrcs
);
3193 bcopy(&ig_numsrc
, &pig
->ig_numsrc
, sizeof (ig_numsrc
));
3194 nbytes
+= (msrcs
* sizeof(in_addr_t
));
3196 IGMP_PRINTF(("%s: enqueueing next packet\n", __func__
));
3197 m
->m_pkthdr
.rcvif
= ifp
;
3205 * Type used to mark record pass completion.
3206 * We exploit the fact we can cast to this easily from the
3207 * current filter modes on each ip_msource node.
3210 REC_NONE
= 0x00, /* MCAST_UNDEFINED */
3211 REC_ALLOW
= 0x01, /* MCAST_INCLUDE */
3212 REC_BLOCK
= 0x02, /* MCAST_EXCLUDE */
3213 REC_FULL
= REC_ALLOW
| REC_BLOCK
3217 * Enqueue an IGMPv3 filter list change to the given output queue.
3219 * Source list filter state is held in an RB-tree. When the filter list
3220 * for a group is changed without changing its mode, we need to compute
3221 * the deltas between T0 and T1 for each source in the filter set,
3222 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3224 * As we may potentially queue two record types, and the entire R-B tree
3225 * needs to be walked at once, we break this out into its own function
3226 * so we can generate a tightly packed queue of packets.
3228 * XXX This could be written to only use one tree walk, although that makes
3229 * serializing into the mbuf chains a bit harder. For now we do two walks
3230 * which makes things easier on us, and it may or may not be harder on
3233 * If successful the size of all data appended to the queue is returned,
3234 * otherwise an error code less than zero is returned, or zero if
3235 * no record(s) were appended.
3238 igmp_v3_enqueue_filter_change(struct ifqueue
*ifq
, struct in_multi
*inm
)
3240 static const int MINRECLEN
=
3241 sizeof(struct igmp_grouprec
) + sizeof(in_addr_t
);
3243 struct igmp_grouprec ig
;
3244 struct igmp_grouprec
*pig
;
3245 struct ip_msource
*ims
, *nims
;
3246 struct mbuf
*m
, *m0
, *md
;
3248 int m0srcs
, nbytes
, npbytes
, off
, rsrcs
, schanged
;
3250 uint8_t mode
, now
, then
;
3251 rectype_t crt
, drt
, nrt
;
3252 u_int16_t ig_numsrc
;
3254 INM_LOCK_ASSERT_HELD(inm
);
3256 if (inm
->inm_nsrc
== 0 ||
3257 (inm
->inm_st
[0].iss_asm
> 0 && inm
->inm_st
[1].iss_asm
> 0))
3260 ifp
= inm
->inm_ifp
; /* interface */
3261 mode
= inm
->inm_st
[1].iss_fmode
; /* filter mode at t1 */
3262 crt
= REC_NONE
; /* current group record type */
3263 drt
= REC_NONE
; /* mask of completed group record types */
3264 nrt
= REC_NONE
; /* record type for current node */
3265 m0srcs
= 0; /* # source which will fit in current mbuf chain */
3266 nbytes
= 0; /* # of bytes appended to group's state-change queue */
3267 npbytes
= 0; /* # of bytes appended this packet */
3268 rsrcs
= 0; /* # sources encoded in current record */
3269 schanged
= 0; /* # nodes encoded in overall filter change */
3270 nallow
= 0; /* # of source entries in ALLOW_NEW */
3271 nblock
= 0; /* # of source entries in BLOCK_OLD */
3272 nims
= NULL
; /* next tree node pointer */
3275 * For each possible filter record mode.
3276 * The first kind of source we encounter tells us which
3277 * is the first kind of record we start appending.
3278 * If a node transitioned to UNDEFINED at t1, its mode is treated
3279 * as the inverse of the group's filter mode.
3281 while (drt
!= REC_FULL
) {
3285 (m0
->m_pkthdr
.vt_nrecs
+ 1 <=
3286 IGMP_V3_REPORT_MAXRECS
) &&
3287 (m0
->m_pkthdr
.len
+ MINRECLEN
) <
3288 (ifp
->if_mtu
- IGMP_LEADINGSPACE
)) {
3290 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
3291 sizeof(struct igmp_grouprec
)) /
3293 IGMP_PRINTF(("%s: use previous packet\n",
3296 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3298 m
->m_data
+= IGMP_LEADINGSPACE
;
3300 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3302 MH_ALIGN(m
, IGMP_LEADINGSPACE
);
3305 IGMP_PRINTF(("%s: m_get*() failed\n",
3309 m
->m_pkthdr
.vt_nrecs
= 0;
3310 m0srcs
= (ifp
->if_mtu
- IGMP_LEADINGSPACE
-
3311 sizeof(struct igmp_grouprec
)) /
3314 IGMP_PRINTF(("%s: allocated new packet\n",
3318 * Append the IGMP group record header to the
3319 * current packet's data area.
3320 * Recalculate pointer to free space for next
3321 * group record, in case m_append() allocated
3322 * a new mbuf or cluster.
3324 memset(&ig
, 0, sizeof(ig
));
3325 ig
.ig_group
= inm
->inm_addr
;
3326 if (!m_append(m
, sizeof(ig
), (void *)&ig
)) {
3329 IGMP_PRINTF(("%s: m_append() failed\n",
3333 npbytes
+= sizeof(struct igmp_grouprec
);
3335 /* new packet; offset in c hain */
3336 md
= m_getptr(m
, npbytes
-
3337 sizeof(struct igmp_grouprec
), &off
);
3338 pig
= (struct igmp_grouprec
*)(void *)(mtod(md
,
3341 /* current packet; offset from last append */
3343 pig
= (struct igmp_grouprec
*)(void *)(mtod(md
,
3344 uint8_t *) + md
->m_len
-
3345 sizeof(struct igmp_grouprec
));
3348 * Begin walking the tree for this record type
3349 * pass, or continue from where we left off
3350 * previously if we had to allocate a new packet.
3351 * Only report deltas in-mode at t1.
3352 * We need not report included sources as allowed
3353 * if we are in inclusive mode on the group,
3354 * however the converse is not true.
3358 nims
= RB_MIN(ip_msource_tree
, &inm
->inm_srcs
);
3359 RB_FOREACH_FROM(ims
, ip_msource_tree
, nims
) {
3360 IGMP_PRINTF(("%s: visit node %s\n",
3361 __func__
, inet_ntoa_haddr(ims
->ims_haddr
)));
3362 now
= ims_get_mode(inm
, ims
, 1);
3363 then
= ims_get_mode(inm
, ims
, 0);
3364 IGMP_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3365 __func__
, then
, now
));
3367 IGMP_PRINTF(("%s: skip unchanged\n",
3371 if (mode
== MCAST_EXCLUDE
&&
3372 now
== MCAST_INCLUDE
) {
3373 IGMP_PRINTF(("%s: skip IN src on EX "
3374 "group\n", __func__
));
3377 nrt
= (rectype_t
)now
;
3378 if (nrt
== REC_NONE
)
3379 nrt
= (rectype_t
)(~mode
& REC_FULL
);
3380 if (schanged
++ == 0) {
3382 } else if (crt
!= nrt
)
3384 naddr
= htonl(ims
->ims_haddr
);
3385 if (!m_append(m
, sizeof(in_addr_t
),
3389 IGMP_PRINTF(("%s: m_append() failed\n",
3393 nallow
+= !!(crt
== REC_ALLOW
);
3394 nblock
+= !!(crt
== REC_BLOCK
);
3395 if (++rsrcs
== m0srcs
)
3399 * If we did not append any tree nodes on this
3400 * pass, back out of allocations.
3403 npbytes
-= sizeof(struct igmp_grouprec
);
3405 IGMP_PRINTF(("%s: m_free(m)\n",
3409 IGMP_PRINTF(("%s: m_adj(m, -ig)\n",
3411 m_adj(m
, -((int)sizeof(
3412 struct igmp_grouprec
)));
3416 npbytes
+= (rsrcs
* sizeof(in_addr_t
));
3417 if (crt
== REC_ALLOW
)
3418 pig
->ig_type
= IGMP_ALLOW_NEW_SOURCES
;
3419 else if (crt
== REC_BLOCK
)
3420 pig
->ig_type
= IGMP_BLOCK_OLD_SOURCES
;
3421 ig_numsrc
= htons(rsrcs
);
3422 bcopy(&ig_numsrc
, &pig
->ig_numsrc
, sizeof (ig_numsrc
));
3424 * Count the new group record, and enqueue this
3425 * packet if it wasn't already queued.
3427 m
->m_pkthdr
.vt_nrecs
++;
3428 m
->m_pkthdr
.rcvif
= ifp
;
3432 } while (nims
!= NULL
);
3434 crt
= (~crt
& REC_FULL
);
3437 IGMP_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__
,
3444 igmp_v3_merge_state_changes(struct in_multi
*inm
, struct ifqueue
*ifscq
)
3447 struct mbuf
*m
; /* pending state-change */
3448 struct mbuf
*m0
; /* copy of pending state-change */
3449 struct mbuf
*mt
; /* last state-change in packet */
3451 int docopy
, domerge
;
3454 INM_LOCK_ASSERT_HELD(inm
);
3461 * If there are further pending retransmissions, make a writable
3462 * copy of each queued state-change message before merging.
3464 if (inm
->inm_scrv
> 0)
3469 if (gq
->ifq_head
== NULL
) {
3470 IGMP_PRINTF(("%s: WARNING: queue for inm %p is empty\n",
3476 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3477 * packet might not always be at the head of the ifqueue.
3482 * Only merge the report into the current packet if
3483 * there is sufficient space to do so; an IGMPv3 report
3484 * packet may only contain 65,535 group records.
3485 * Always use a simple mbuf chain concatentation to do this,
3486 * as large state changes for single groups may have
3487 * allocated clusters.
3490 mt
= ifscq
->ifq_tail
;
3492 recslen
= m_length(m
);
3494 if ((mt
->m_pkthdr
.vt_nrecs
+
3495 m
->m_pkthdr
.vt_nrecs
<=
3496 IGMP_V3_REPORT_MAXRECS
) &&
3497 (mt
->m_pkthdr
.len
+ recslen
<=
3498 (inm
->inm_ifp
->if_mtu
- IGMP_LEADINGSPACE
)))
3502 if (!domerge
&& IF_QFULL(gq
)) {
3503 IGMP_PRINTF(("%s: outbound queue full, skipping whole "
3504 "packet %p\n", __func__
, m
));
3515 IGMP_PRINTF(("%s: dequeueing %p\n", __func__
, m
));
3521 IGMP_PRINTF(("%s: copying %p\n", __func__
, m
));
3522 m0
= m_dup(m
, M_NOWAIT
);
3525 m0
->m_nextpkt
= NULL
;
3530 IGMP_PRINTF(("%s: queueing %p to ifscq %p)\n",
3531 __func__
, m0
, ifscq
));
3532 m0
->m_pkthdr
.rcvif
= inm
->inm_ifp
;
3533 IF_ENQUEUE(ifscq
, m0
);
3535 struct mbuf
*mtl
; /* last mbuf of packet mt */
3537 IGMP_PRINTF(("%s: merging %p with ifscq tail %p)\n",
3541 m0
->m_flags
&= ~M_PKTHDR
;
3542 mt
->m_pkthdr
.len
+= recslen
;
3543 mt
->m_pkthdr
.vt_nrecs
+=
3544 m0
->m_pkthdr
.vt_nrecs
;
3554 * Respond to a pending IGMPv3 General Query.
3557 igmp_v3_dispatch_general_query(struct igmp_ifinfo
*igi
)
3560 struct in_multi
*inm
;
3561 struct in_multistep step
;
3564 IGI_LOCK_ASSERT_HELD(igi
);
3566 VERIFY(igi
->igi_version
== IGMP_VERSION_3
);
3571 in_multihead_lock_shared();
3572 IN_FIRST_MULTI(step
, inm
);
3573 while (inm
!= NULL
) {
3575 if (inm
->inm_ifp
!= ifp
)
3578 switch (inm
->inm_state
) {
3579 case IGMP_NOT_MEMBER
:
3580 case IGMP_SILENT_MEMBER
:
3582 case IGMP_REPORTING_MEMBER
:
3583 case IGMP_IDLE_MEMBER
:
3584 case IGMP_LAZY_MEMBER
:
3585 case IGMP_SLEEPING_MEMBER
:
3586 case IGMP_AWAKENING_MEMBER
:
3587 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
3589 retval
= igmp_v3_enqueue_group_record(&igi
->igi_gq
,
3592 IGMP_PRINTF(("%s: enqueue record = %d\n",
3595 case IGMP_G_QUERY_PENDING_MEMBER
:
3596 case IGMP_SG_QUERY_PENDING_MEMBER
:
3597 case IGMP_LEAVING_MEMBER
:
3602 IN_NEXT_MULTI(step
, inm
);
3604 in_multihead_lock_done();
3607 loop
= (igi
->igi_flags
& IGIF_LOOPBACK
) ? 1 : 0;
3608 igmp_dispatch_queue(igi
, &igi
->igi_gq
, IGMP_MAX_RESPONSE_BURST
,
3610 IGI_LOCK_ASSERT_HELD(igi
);
3612 * Slew transmission of bursts over 500ms intervals.
3614 if (igi
->igi_gq
.ifq_head
!= NULL
) {
3615 igi
->igi_v3_timer
= 1 + IGMP_RANDOM_DELAY(
3616 IGMP_RESPONSE_BURST_INTERVAL
);
3617 interface_timers_running
= 1;
3622 * Transmit the next pending IGMP message in the output queue.
3624 * Must not be called with inm_lock or igi_lock held.
3627 igmp_sendpkt(struct mbuf
*m
, struct ifnet
*ifp
)
3629 struct ip_moptions
*imo
;
3630 struct mbuf
*ipopts
, *m0
;
3634 IGMP_PRINTF(("%s: transmit %p\n", __func__
, m
));
3637 * Check if the ifnet is still attached.
3639 if (ifp
== NULL
|| !ifnet_is_attached(ifp
, 0)) {
3640 IGMP_PRINTF(("%s: dropped %p as ifp u went away.\n",
3643 OSAddAtomic(1, &ipstat
.ips_noroute
);
3647 ipopts
= igmp_sendra
? m_raopt
: NULL
;
3649 imo
= ip_allocmoptions(M_WAITOK
);
3655 imo
->imo_multicast_ttl
= 1;
3656 imo
->imo_multicast_vif
= -1;
3658 imo
->imo_multicast_loop
= (ip_mrouter
!= NULL
);
3660 imo
->imo_multicast_loop
= 0;
3664 * If the user requested that IGMP traffic be explicitly
3665 * redirected to the loopback interface (e.g. they are running a
3666 * MANET interface and the routing protocol needs to see the
3667 * updates), handle this now.
3669 if (m
->m_flags
& M_IGMP_LOOP
)
3670 imo
->imo_multicast_ifp
= lo_ifp
;
3672 imo
->imo_multicast_ifp
= ifp
;
3674 if (m
->m_flags
& M_IGMPV2
) {
3677 m0
= igmp_v3_encap_report(ifp
, m
);
3680 * If igmp_v3_encap_report() failed, then M_PREPEND()
3681 * already freed the original mbuf chain.
3682 * This means that we don't have to m_freem(m) here.
3684 IGMP_PRINTF(("%s: dropped %p\n", __func__
, m
));
3686 atomic_add_32(&ipstat
.ips_odropped
, 1);
3691 m
->m_flags
&= ~(M_PROTOFLAGS
| M_IGMP_LOOP
);
3692 m0
->m_pkthdr
.rcvif
= lo_ifp
;
3694 mac_netinet_igmp_send(ifp
, m0
);
3697 if (ifp
->if_eflags
& IFEF_TXSTART
) {
3698 /* Use control service class if the interface supports
3699 * transmit-start model.
3701 (void) m_set_service_class(m0
, MBUF_SC_CTL
);
3703 bzero(&ro
, sizeof (ro
));
3704 error
= ip_output(m0
, ipopts
, &ro
, 0, imo
, NULL
);
3705 if (ro
.ro_rt
!= NULL
) {
3713 IGMP_PRINTF(("%s: ip_output(%p) = %d\n", __func__
, m0
, error
));
3717 IGMPSTAT_INC(igps_snd_reports
);
3718 OIGMPSTAT_INC(igps_snd_reports
);
3721 * Encapsulate an IGMPv3 report.
3723 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3724 * chain has already had its IP/IGMPv3 header prepended. In this case
3725 * the function will not attempt to prepend; the lengths and checksums
3726 * will however be re-computed.
3728 * Returns a pointer to the new mbuf chain head, or NULL if the
3729 * allocation failed.
3731 static struct mbuf
*
3732 igmp_v3_encap_report(struct ifnet
*ifp
, struct mbuf
*m
)
3734 struct igmp_report
*igmp
;
3736 int hdrlen
, igmpreclen
;
3738 VERIFY((m
->m_flags
& M_PKTHDR
));
3740 igmpreclen
= m_length(m
);
3741 hdrlen
= sizeof(struct ip
) + sizeof(struct igmp_report
);
3743 if (m
->m_flags
& M_IGMPV3_HDR
) {
3744 igmpreclen
-= hdrlen
;
3746 M_PREPEND(m
, hdrlen
, M_DONTWAIT
);
3749 m
->m_flags
|= M_IGMPV3_HDR
;
3752 IGMP_PRINTF(("%s: igmpreclen is %d\n", __func__
, igmpreclen
));
3754 m
->m_data
+= sizeof(struct ip
);
3755 m
->m_len
-= sizeof(struct ip
);
3757 igmp
= mtod(m
, struct igmp_report
*);
3758 igmp
->ir_type
= IGMP_v3_HOST_MEMBERSHIP_REPORT
;
3761 igmp
->ir_numgrps
= htons(m
->m_pkthdr
.vt_nrecs
);
3763 igmp
->ir_cksum
= in_cksum(m
, sizeof(struct igmp_report
) + igmpreclen
);
3764 m
->m_pkthdr
.vt_nrecs
= 0;
3766 m
->m_data
-= sizeof(struct ip
);
3767 m
->m_len
+= sizeof(struct ip
);
3769 ip
= mtod(m
, struct ip
*);
3770 ip
->ip_tos
= IPTOS_PREC_INTERNETCONTROL
;
3771 ip
->ip_len
= hdrlen
+ igmpreclen
;
3773 ip
->ip_p
= IPPROTO_IGMP
;
3776 ip
->ip_src
.s_addr
= INADDR_ANY
;
3778 if (m
->m_flags
& M_IGMP_LOOP
) {
3779 struct in_ifaddr
*ia
;
3783 IFA_LOCK(&ia
->ia_ifa
);
3784 ip
->ip_src
= ia
->ia_addr
.sin_addr
;
3785 IFA_UNLOCK(&ia
->ia_ifa
);
3786 IFA_REMREF(&ia
->ia_ifa
);
3790 ip
->ip_dst
.s_addr
= htonl(INADDR_ALLRPTS_GROUP
);
3797 igmp_rec_type_to_str(const int type
)
3800 case IGMP_CHANGE_TO_EXCLUDE_MODE
:
3803 case IGMP_CHANGE_TO_INCLUDE_MODE
:
3806 case IGMP_MODE_IS_EXCLUDE
:
3809 case IGMP_MODE_IS_INCLUDE
:
3812 case IGMP_ALLOW_NEW_SOURCES
:
3815 case IGMP_BLOCK_OLD_SOURCES
:
3829 IGMP_PRINTF(("%s: initializing\n", __func__
));
3831 igmp_timers_are_running
= 0;
3833 /* Setup lock group and attribute for igmp_mtx */
3834 igmp_mtx_grp_attr
= lck_grp_attr_alloc_init();
3835 igmp_mtx_grp
= lck_grp_alloc_init("igmp_mtx", igmp_mtx_grp_attr
);
3836 igmp_mtx_attr
= lck_attr_alloc_init();
3837 lck_mtx_init(&igmp_mtx
, igmp_mtx_grp
, igmp_mtx_attr
);
3839 LIST_INIT(&igi_head
);
3840 m_raopt
= igmp_ra_alloc();
3842 igi_size
= sizeof (struct igmp_ifinfo
);
3843 igi_zone
= zinit(igi_size
, IGI_ZONE_MAX
* igi_size
,
3845 if (igi_zone
== NULL
) {
3846 panic("%s: failed allocating %s", __func__
, IGI_ZONE_NAME
);
3849 zone_change(igi_zone
, Z_EXPAND
, TRUE
);
3850 zone_change(igi_zone
, Z_CALLERACCT
, FALSE
);