2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 2007-2009 Bruce Simpson.
30 * Copyright (c) 1988 Stephen Deering.
31 * Copyright (c) 1992, 1993
32 * The Regents of the University of California. All rights reserved.
34 * This code is derived from software contributed to Berkeley by
35 * Stephen Deering of Stanford University.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
75 * Internet Group Management Protocol (IGMP) routines.
76 * [RFC1112, RFC2236, RFC3376]
78 * Written by Steve Deering, Stanford, May 1988.
79 * Modified by Rosen Sharma, Stanford, Aug 1994.
80 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
81 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
82 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
84 * MULTICAST Revision: 3.5.1.4
87 #include <sys/cdefs.h>
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/malloc.h>
93 #include <sys/socket.h>
94 #include <sys/protosw.h>
95 #include <sys/kernel.h>
96 #include <sys/sysctl.h>
97 #include <sys/mcache.h>
99 #include <libkern/libkern.h>
100 #include <kern/zalloc.h>
103 #include <net/route.h>
105 #include <netinet/in.h>
106 #include <netinet/in_var.h>
107 #include <netinet/in_systm.h>
108 #include <netinet/ip.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/igmp.h>
111 #include <netinet/igmp_var.h>
112 #include <netinet/kpi_ipfilter_var.h>
116 inet_ntoa(struct in_addr ina
)
118 static char buf
[4*sizeof "123"];
119 unsigned char *ucp
= (unsigned char *)&ina
;
121 snprintf(buf
, sizeof(buf
), "%d.%d.%d.%d",
130 SLIST_HEAD(igmp_inm_relhead
, in_multi
);
132 static void igi_initvar(struct igmp_ifinfo
*, struct ifnet
*, int);
133 static struct igmp_ifinfo
*igi_alloc(int);
134 static void igi_free(struct igmp_ifinfo
*);
135 static void igi_delete(const struct ifnet
*, struct igmp_inm_relhead
*);
136 static void igmp_dispatch_queue(struct igmp_ifinfo
*, struct ifqueue
*,
137 int, const int, struct ifnet
*);
138 static void igmp_final_leave(struct in_multi
*, struct igmp_ifinfo
*);
139 static int igmp_handle_state_change(struct in_multi
*,
140 struct igmp_ifinfo
*);
141 static int igmp_initial_join(struct in_multi
*, struct igmp_ifinfo
*);
142 static int igmp_input_v1_query(struct ifnet
*, const struct ip
*,
143 const struct igmp
*);
144 static int igmp_input_v2_query(struct ifnet
*, const struct ip
*,
145 const struct igmp
*);
146 static int igmp_input_v3_query(struct ifnet
*, const struct ip
*,
147 /*const*/ struct igmpv3
*);
148 static int igmp_input_v3_group_query(struct in_multi
*,
149 int, /*const*/ struct igmpv3
*);
150 static int igmp_input_v1_report(struct ifnet
*, /*const*/ struct ip
*,
151 /*const*/ struct igmp
*);
152 static int igmp_input_v2_report(struct ifnet
*, /*const*/ struct ip
*,
153 /*const*/ struct igmp
*);
154 void igmp_sendpkt(struct mbuf
*, struct ifnet
*);
155 static __inline__
int igmp_isgroupreported(const struct in_addr
);
159 static const char * igmp_rec_type_to_str(const int);
161 static void igmp_set_version(struct igmp_ifinfo
*, const int);
162 static void igmp_flush_relq(struct igmp_ifinfo
*,
163 struct igmp_inm_relhead
*);
164 static int igmp_v1v2_queue_report(struct in_multi
*, const int);
165 static void igmp_v1v2_process_group_timer(struct in_multi
*, const int);
166 static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo
*);
167 static void igmp_v2_update_group(struct in_multi
*, const int);
168 static void igmp_v3_cancel_link_timers(struct igmp_ifinfo
*);
169 static void igmp_v3_dispatch_general_query(struct igmp_ifinfo
*);
171 igmp_v3_encap_report(struct ifnet
*, struct mbuf
*);
172 static int igmp_v3_enqueue_group_record(struct ifqueue
*,
173 struct in_multi
*, const int, const int, const int);
174 static int igmp_v3_enqueue_filter_change(struct ifqueue
*,
176 static void igmp_v3_process_group_timers(struct igmp_ifinfo
*,
177 struct ifqueue
*, struct ifqueue
*, struct in_multi
*,
179 static int igmp_v3_merge_state_changes(struct in_multi
*,
181 static void igmp_v3_suppress_group_record(struct in_multi
*);
182 static int sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS
;
183 static int sysctl_igmp_gsr SYSCTL_HANDLER_ARGS
;
184 static int sysctl_igmp_default_version SYSCTL_HANDLER_ARGS
;
186 struct mbuf
*m_raopt
; /* Router Alert option */
188 static int interface_timers_running
; /* IGMPv3 general
190 static int state_change_timers_running
; /* IGMPv3 state-change
192 static int current_state_timers_running
; /* IGMPv1/v2 host
193 * report; IGMPv3 g/sg
196 static LIST_HEAD(, igmp_ifinfo
) igi_head
;
197 static struct igmpstat_v3 igmpstat_v3
= {
198 .igps_version
= IGPS_VERSION_3
,
199 .igps_len
= sizeof(struct igmpstat_v3
),
201 static struct igmpstat igmpstat
; /* old IGMPv2 stats structure */
202 static struct timeval igmp_gsrdelay
= {10, 0};
204 static int igmp_recvifkludge
= 1;
205 static int igmp_sendra
= 1;
206 static int igmp_sendlocal
= 1;
207 static int igmp_v1enable
= 1;
208 static int igmp_v2enable
= 1;
209 static int igmp_legacysupp
= 0;
210 static int igmp_default_version
= IGMP_VERSION_3
;
212 SYSCTL_STRUCT(_net_inet_igmp
, IGMPCTL_STATS
, stats
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
213 &igmpstat
, igmpstat
, "");
214 SYSCTL_STRUCT(_net_inet_igmp
, OID_AUTO
, v3stats
,
215 CTLFLAG_RD
| CTLFLAG_LOCKED
, &igmpstat_v3
, igmpstat_v3
, "");
216 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, recvifkludge
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
217 &igmp_recvifkludge
, 0,
218 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
219 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, sendra
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
221 "Send IP Router Alert option in IGMPv2/v3 messages");
222 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, sendlocal
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
224 "Send IGMP membership reports for 224.0.0.0/24 groups");
225 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, v1enable
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
227 "Enable backwards compatibility with IGMPv1");
228 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, v2enable
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
230 "Enable backwards compatibility with IGMPv2");
231 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
, legacysupp
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
233 "Allow v1/v2 reports to suppress v3 group responses");
234 SYSCTL_PROC(_net_inet_igmp
, OID_AUTO
, default_version
,
235 CTLTYPE_INT
| CTLFLAG_RW
,
236 &igmp_default_version
, 0, sysctl_igmp_default_version
, "I",
237 "Default version of IGMP to run on each interface");
238 SYSCTL_PROC(_net_inet_igmp
, OID_AUTO
, gsrdelay
,
239 CTLTYPE_INT
| CTLFLAG_RW
,
240 &igmp_gsrdelay
.tv_sec
, 0, sysctl_igmp_gsr
, "I",
241 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
244 SYSCTL_INT(_net_inet_igmp
, OID_AUTO
,
245 debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &igmp_debug
, 0, "");
248 SYSCTL_NODE(_net_inet_igmp
, OID_AUTO
, ifinfo
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
249 sysctl_igmp_ifinfo
, "Per-interface IGMPv3 state");
251 /* Lock group and attribute for igmp_mtx */
252 static lck_attr_t
*igmp_mtx_attr
;
253 static lck_grp_t
*igmp_mtx_grp
;
254 static lck_grp_attr_t
*igmp_mtx_grp_attr
;
257 * Locking and reference counting:
259 * igmp_mtx mainly protects igi_head. In cases where both igmp_mtx and
260 * in_multihead_lock must be held, the former must be acquired first in order
261 * to maintain lock ordering. It is not a requirement that igmp_mtx be
262 * acquired first before in_multihead_lock, but in case both must be acquired
263 * in succession, the correct lock ordering must be followed.
265 * Instead of walking the if_multiaddrs list at the interface and returning
266 * the ifma_protospec value of a matching entry, we search the global list
267 * of in_multi records and find it that way; this is done with in_multihead
268 * lock held. Doing so avoids the race condition issues that many other BSDs
269 * suffer from (therefore in our implementation, ifma_protospec will never be
270 * NULL for as long as the in_multi is valid.)
272 * The above creates a requirement for the in_multi to stay in in_multihead
273 * list even after the final IGMP leave (in IGMPv3 mode) until no longer needs
274 * be retransmitted (this is not required for IGMPv1/v2.) In order to handle
275 * this, the request and reference counts of the in_multi are bumped up when
276 * the state changes to IGMP_LEAVING_MEMBER, and later dropped in the timeout
277 * handler. Each in_multi holds a reference to the underlying igmp_ifinfo.
279 * Thus, the permitted lock oder is:
281 * igmp_mtx, in_multihead_lock, inm_lock, igi_lock
283 * Any may be taken independently, but if any are held at the same time,
284 * the above lock order must be followed.
286 static decl_lck_mtx_data(, igmp_mtx
);
287 static int igmp_timers_are_running
;
289 #define IGMP_ADD_DETACHED_INM(_head, _inm) { \
290 SLIST_INSERT_HEAD(_head, _inm, inm_dtle); \
293 #define IGMP_REMOVE_DETACHED_INM(_head) { \
294 struct in_multi *_inm, *_inm_tmp; \
295 SLIST_FOREACH_SAFE(_inm, _head, inm_dtle, _inm_tmp) { \
296 SLIST_REMOVE(_head, _inm, in_multi, inm_dtle); \
299 VERIFY(SLIST_EMPTY(_head)); \
302 #define IGI_ZONE_MAX 64 /* maximum elements in zone */
303 #define IGI_ZONE_NAME "igmp_ifinfo" /* zone name */
305 static unsigned int igi_size
; /* size of zone element */
306 static struct zone
*igi_zone
; /* zone for igmp_ifinfo */
309 static __inline
char *
310 inet_ntoa_haddr(in_addr_t haddr
)
314 ia
.s_addr
= htonl(haddr
);
315 return (inet_ntoa(ia
));
319 * Retrieve or set default IGMP version.
322 sysctl_igmp_default_version SYSCTL_HANDLER_ARGS
324 #pragma unused(oidp, arg2)
328 lck_mtx_lock(&igmp_mtx
);
330 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
331 if (error
|| !req
->newptr
)
334 new = igmp_default_version
;
336 error
= SYSCTL_IN(req
, &new, sizeof(int));
340 if (new < IGMP_VERSION_1
|| new > IGMP_VERSION_3
) {
345 IGMP_PRINTF(("change igmp_default_version from %d to %d\n",
346 igmp_default_version
, new));
348 igmp_default_version
= new;
351 lck_mtx_unlock(&igmp_mtx
);
356 * Retrieve or set threshold between group-source queries in seconds.
360 sysctl_igmp_gsr SYSCTL_HANDLER_ARGS
362 #pragma unused(arg1, arg2)
366 lck_mtx_lock(&igmp_mtx
);
368 i
= igmp_gsrdelay
.tv_sec
;
370 error
= sysctl_handle_int(oidp
, &i
, 0, req
);
371 if (error
|| !req
->newptr
)
374 if (i
< -1 || i
>= 60) {
379 igmp_gsrdelay
.tv_sec
= i
;
382 lck_mtx_unlock(&igmp_mtx
);
387 * Expose struct igmp_ifinfo to userland, keyed by ifindex.
388 * For use by ifmcstat(8).
392 sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS
399 struct igmp_ifinfo
*igi
;
400 struct igmp_ifinfo_u igi_u
;
405 if (req
->newptr
!= USER_ADDR_NULL
)
411 lck_mtx_lock(&igmp_mtx
);
413 if (name
[0] <= 0 || name
[0] > (u_int
)if_index
) {
420 ifnet_head_lock_shared();
421 ifp
= ifindex2ifnet
[name
[0]];
426 bzero(&igi_u
, sizeof (igi_u
));
428 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
430 if (ifp
!= igi
->igi_ifp
) {
434 igi_u
.igi_ifindex
= igi
->igi_ifp
->if_index
;
435 igi_u
.igi_version
= igi
->igi_version
;
436 igi_u
.igi_v1_timer
= igi
->igi_v1_timer
;
437 igi_u
.igi_v2_timer
= igi
->igi_v2_timer
;
438 igi_u
.igi_v3_timer
= igi
->igi_v3_timer
;
439 igi_u
.igi_flags
= igi
->igi_flags
;
440 igi_u
.igi_rv
= igi
->igi_rv
;
441 igi_u
.igi_qi
= igi
->igi_qi
;
442 igi_u
.igi_qri
= igi
->igi_qri
;
443 igi_u
.igi_uri
= igi
->igi_uri
;
446 error
= SYSCTL_OUT(req
, &igi_u
, sizeof (igi_u
));
451 lck_mtx_unlock(&igmp_mtx
);
456 * Dispatch an entire queue of pending packet chains
458 * Must not be called with inm_lock held.
461 igmp_dispatch_queue(struct igmp_ifinfo
*igi
, struct ifqueue
*ifq
, int limit
,
462 const int loop
, struct ifnet
*ifp
)
468 IGI_LOCK_ASSERT_HELD(igi
);
474 IGMP_PRINTF(("%s: dispatch %p from %p\n", __func__
, ifq
, m
));
475 ip
= mtod(m
, struct ip
*);
477 m
->m_flags
|= M_IGMP_LOOP
;
480 igmp_sendpkt(m
, ifp
);
488 IGI_LOCK_ASSERT_HELD(igi
);
492 * Filter outgoing IGMP report state by group.
494 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
495 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
496 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
497 * this may break certain IGMP snooping switches which rely on the old
500 * Return zero if the given group is one for which IGMP reports
501 * should be suppressed, or non-zero if reports should be issued.
505 int igmp_isgroupreported(const struct in_addr addr
)
508 if (in_allhosts(addr
) ||
509 ((!igmp_sendlocal
&& IN_LOCAL_GROUP(ntohl(addr
.s_addr
)))))
516 * Construct a Router Alert option to use in outgoing packets.
524 MGET(m
, M_WAITOK
, MT_DATA
);
525 p
= mtod(m
, struct ipoption
*);
526 p
->ipopt_dst
.s_addr
= INADDR_ANY
;
527 p
->ipopt_list
[0] = IPOPT_RA
; /* Router Alert Option */
528 p
->ipopt_list
[1] = 0x04; /* 4 bytes long */
529 p
->ipopt_list
[2] = IPOPT_EOL
; /* End of IP option list */
530 p
->ipopt_list
[3] = 0x00; /* pad byte */
531 m
->m_len
= sizeof(p
->ipopt_dst
) + p
->ipopt_list
[1];
537 * Attach IGMP when PF_INET is attached to an interface.
540 igmp_domifattach(struct ifnet
*ifp
, int how
)
542 struct igmp_ifinfo
*igi
;
544 IGMP_PRINTF(("%s: called for ifp %p(%s)\n",
545 __func__
, ifp
, ifp
->if_name
));
547 igi
= igi_alloc(how
);
551 lck_mtx_lock(&igmp_mtx
);
554 igi_initvar(igi
, ifp
, 0);
555 igi
->igi_debug
|= IFD_ATTACHED
;
556 IGI_ADDREF_LOCKED(igi
); /* hold a reference for igi_head */
557 IGI_ADDREF_LOCKED(igi
); /* hold a reference for caller */
560 LIST_INSERT_HEAD(&igi_head
, igi
, igi_link
);
562 lck_mtx_unlock(&igmp_mtx
);
564 IGMP_PRINTF(("allocate igmp_ifinfo for ifp %p(%s)\n",
571 * Attach IGMP when PF_INET is reattached to an interface. Caller is
572 * expected to have an outstanding reference to the igi.
575 igmp_domifreattach(struct igmp_ifinfo
*igi
)
579 lck_mtx_lock(&igmp_mtx
);
582 VERIFY(!(igi
->igi_debug
& IFD_ATTACHED
));
585 igi_initvar(igi
, ifp
, 1);
586 igi
->igi_debug
|= IFD_ATTACHED
;
587 IGI_ADDREF_LOCKED(igi
); /* hold a reference for igi_head */
590 LIST_INSERT_HEAD(&igi_head
, igi
, igi_link
);
592 lck_mtx_unlock(&igmp_mtx
);
594 IGMP_PRINTF(("reattached igmp_ifinfo for ifp %p(%s)\n",
599 * Hook for domifdetach.
602 igmp_domifdetach(struct ifnet
*ifp
)
604 SLIST_HEAD(, in_multi
) inm_dthead
;
606 SLIST_INIT(&inm_dthead
);
608 IGMP_PRINTF(("%s: called for ifp %p(%s%d)\n",
609 __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
));
611 lck_mtx_lock(&igmp_mtx
);
612 igi_delete(ifp
, (struct igmp_inm_relhead
*)&inm_dthead
);
613 lck_mtx_unlock(&igmp_mtx
);
615 /* Now that we're dropped all locks, release detached records */
616 IGMP_REMOVE_DETACHED_INM(&inm_dthead
);
620 * Called at interface detach time. Note that we only flush all deferred
621 * responses and record releases; all remaining inm records and their source
622 * entries related to this interface are left intact, in order to handle
626 igi_delete(const struct ifnet
*ifp
, struct igmp_inm_relhead
*inm_dthead
)
628 struct igmp_ifinfo
*igi
, *tigi
;
630 lck_mtx_assert(&igmp_mtx
, LCK_MTX_ASSERT_OWNED
);
632 LIST_FOREACH_SAFE(igi
, &igi_head
, igi_link
, tigi
) {
634 if (igi
->igi_ifp
== ifp
) {
636 * Free deferred General Query responses.
638 IF_DRAIN(&igi
->igi_gq
);
639 IF_DRAIN(&igi
->igi_v2q
);
640 igmp_flush_relq(igi
, inm_dthead
);
641 VERIFY(SLIST_EMPTY(&igi
->igi_relinmhead
));
642 igi
->igi_debug
&= ~IFD_ATTACHED
;
645 LIST_REMOVE(igi
, igi_link
);
646 IGI_REMREF(igi
); /* release igi_head reference */
651 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__
, ifp
);
655 igi_initvar(struct igmp_ifinfo
*igi
, struct ifnet
*ifp
, int reattach
)
657 IGI_LOCK_ASSERT_HELD(igi
);
660 igi
->igi_version
= igmp_default_version
;
662 igi
->igi_rv
= IGMP_RV_INIT
;
663 igi
->igi_qi
= IGMP_QI_INIT
;
664 igi
->igi_qri
= IGMP_QRI_INIT
;
665 igi
->igi_uri
= IGMP_URI_INIT
;
667 /* ifnet is not yet attached; no need to hold ifnet lock */
668 if (!(ifp
->if_flags
& IFF_MULTICAST
))
669 igi
->igi_flags
|= IGIF_SILENT
;
672 SLIST_INIT(&igi
->igi_relinmhead
);
675 * Responses to general queries are subject to bounds.
677 igi
->igi_gq
.ifq_maxlen
= IGMP_MAX_RESPONSE_PACKETS
;
678 igi
->igi_v2q
.ifq_maxlen
= IGMP_MAX_RESPONSE_PACKETS
;
681 static struct igmp_ifinfo
*
684 struct igmp_ifinfo
*igi
;
686 igi
= (how
== M_WAITOK
) ? zalloc(igi_zone
) : zalloc_noblock(igi_zone
);
688 bzero(igi
, igi_size
);
689 lck_mtx_init(&igi
->igi_lock
, igmp_mtx_grp
, igmp_mtx_attr
);
690 igi
->igi_debug
|= IFD_ALLOC
;
696 igi_free(struct igmp_ifinfo
*igi
)
699 if (igi
->igi_debug
& IFD_ATTACHED
) {
700 panic("%s: attached igi=%p is being freed", __func__
, igi
);
702 } else if (igi
->igi_ifp
!= NULL
) {
703 panic("%s: ifp not NULL for igi=%p", __func__
, igi
);
705 } else if (!(igi
->igi_debug
& IFD_ALLOC
)) {
706 panic("%s: igi %p cannot be freed", __func__
, igi
);
708 } else if (igi
->igi_refcnt
!= 0) {
709 panic("%s: non-zero refcnt igi=%p", __func__
, igi
);
712 igi
->igi_debug
&= ~IFD_ALLOC
;
715 lck_mtx_destroy(&igi
->igi_lock
, igmp_mtx_grp
);
716 zfree(igi_zone
, igi
);
720 igi_addref(struct igmp_ifinfo
*igi
, int locked
)
725 IGI_LOCK_ASSERT_HELD(igi
);
727 if (++igi
->igi_refcnt
== 0) {
728 panic("%s: igi=%p wraparound refcnt", __func__
, igi
);
736 igi_remref(struct igmp_ifinfo
*igi
)
738 SLIST_HEAD(, in_multi
) inm_dthead
;
743 if (igi
->igi_refcnt
== 0) {
744 panic("%s: igi=%p negative refcnt", __func__
, igi
);
749 if (igi
->igi_refcnt
> 0) {
756 IF_DRAIN(&igi
->igi_gq
);
757 IF_DRAIN(&igi
->igi_v2q
);
758 SLIST_INIT(&inm_dthead
);
759 igmp_flush_relq(igi
, (struct igmp_inm_relhead
*)&inm_dthead
);
760 VERIFY(SLIST_EMPTY(&igi
->igi_relinmhead
));
763 /* Now that we're dropped all locks, release detached records */
764 IGMP_REMOVE_DETACHED_INM(&inm_dthead
);
766 IGMP_PRINTF(("%s: freeing igmp_ifinfo for ifp %p(%s%d)\n",
767 __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
));
773 * Process a received IGMPv1 query.
774 * Return non-zero if the message should be dropped.
777 igmp_input_v1_query(struct ifnet
*ifp
, const struct ip
*ip
,
778 const struct igmp
*igmp
)
780 struct igmp_ifinfo
*igi
;
781 struct in_multi
*inm
;
782 struct in_multistep step
;
785 * IGMPv1 Host Membership Queries SHOULD always be addressed to
786 * 224.0.0.1. They are always treated as General Queries.
787 * igmp_group is always ignored. Do not drop it as a userland
788 * daemon may wish to see it.
790 if (!in_allhosts(ip
->ip_dst
) || !in_nullhost(igmp
->igmp_group
)) {
791 IGMPSTAT_INC(igps_rcv_badqueries
);
792 OIGMPSTAT_INC(igps_rcv_badqueries
);
795 IGMPSTAT_INC(igps_rcv_gen_queries
);
797 igi
= IGMP_IFINFO(ifp
);
801 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
802 IGMP_PRINTF(("ignore v1 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
803 ifp
, ifp
->if_name
, ifp
->if_unit
));
808 * Switch to IGMPv1 host compatibility mode.
810 igmp_set_version(igi
, IGMP_VERSION_1
);
813 IGMP_PRINTF(("process v1 query on ifp %p(%s%d)\n", ifp
, ifp
->if_name
,
817 * Start the timers in all of our group records
818 * for the interface on which the query arrived,
819 * except those which are already running.
821 in_multihead_lock_shared();
822 IN_FIRST_MULTI(step
, inm
);
823 while (inm
!= NULL
) {
825 if (inm
->inm_ifp
!= ifp
)
827 if (inm
->inm_timer
!= 0)
830 switch (inm
->inm_state
) {
831 case IGMP_NOT_MEMBER
:
832 case IGMP_SILENT_MEMBER
:
834 case IGMP_G_QUERY_PENDING_MEMBER
:
835 case IGMP_SG_QUERY_PENDING_MEMBER
:
836 case IGMP_REPORTING_MEMBER
:
837 case IGMP_IDLE_MEMBER
:
838 case IGMP_LAZY_MEMBER
:
839 case IGMP_SLEEPING_MEMBER
:
840 case IGMP_AWAKENING_MEMBER
:
841 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
842 inm
->inm_timer
= IGMP_RANDOM_DELAY(
843 IGMP_V1V2_MAX_RI
* PR_SLOWHZ
);
844 current_state_timers_running
= 1;
846 case IGMP_LEAVING_MEMBER
:
851 IN_NEXT_MULTI(step
, inm
);
853 in_multihead_lock_done();
859 * Process a received IGMPv2 general or group-specific query.
862 igmp_input_v2_query(struct ifnet
*ifp
, const struct ip
*ip
,
863 const struct igmp
*igmp
)
865 struct igmp_ifinfo
*igi
;
866 struct in_multi
*inm
;
867 int is_general_query
;
870 is_general_query
= 0;
873 * Validate address fields upfront.
875 if (in_nullhost(igmp
->igmp_group
)) {
877 * IGMPv2 General Query.
878 * If this was not sent to the all-hosts group, ignore it.
880 if (!in_allhosts(ip
->ip_dst
))
882 IGMPSTAT_INC(igps_rcv_gen_queries
);
883 is_general_query
= 1;
885 /* IGMPv2 Group-Specific Query. */
886 IGMPSTAT_INC(igps_rcv_group_queries
);
889 igi
= IGMP_IFINFO(ifp
);
893 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
894 IGMP_PRINTF(("ignore v2 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
895 ifp
, ifp
->if_name
, ifp
->if_unit
));
900 * Ignore v2 query if in v1 Compatibility Mode.
902 if (igi
->igi_version
== IGMP_VERSION_1
) {
906 igmp_set_version(igi
, IGMP_VERSION_2
);
909 timer
= igmp
->igmp_code
* PR_SLOWHZ
/ IGMP_TIMER_SCALE
;
913 if (is_general_query
) {
914 struct in_multistep step
;
916 IGMP_PRINTF(("process v2 general query on ifp %p(%s%d)\n",
917 ifp
, ifp
->if_name
, ifp
->if_unit
));
919 * For each reporting group joined on this
920 * interface, kick the report timer.
922 in_multihead_lock_shared();
923 IN_FIRST_MULTI(step
, inm
);
924 while (inm
!= NULL
) {
926 if (inm
->inm_ifp
== ifp
)
927 igmp_v2_update_group(inm
, timer
);
929 IN_NEXT_MULTI(step
, inm
);
931 in_multihead_lock_done();
934 * Group-specific IGMPv2 query, we need only
935 * look up the single group to process it.
937 in_multihead_lock_shared();
938 IN_LOOKUP_MULTI(&igmp
->igmp_group
, ifp
, inm
);
939 in_multihead_lock_done();
942 IGMP_PRINTF(("process v2 query %s on ifp %p(%s%d)\n",
943 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
945 igmp_v2_update_group(inm
, timer
);
947 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
955 * Update the report timer on a group in response to an IGMPv2 query.
957 * If we are becoming the reporting member for this group, start the timer.
958 * If we already are the reporting member for this group, and timer is
959 * below the threshold, reset it.
961 * We may be updating the group for the first time since we switched
962 * to IGMPv3. If we are, then we must clear any recorded source lists,
963 * and transition to REPORTING state; the group timer is overloaded
964 * for group and group-source query responses.
966 * Unlike IGMPv3, the delay per group should be jittered
967 * to avoid bursts of IGMPv2 reports.
970 igmp_v2_update_group(struct in_multi
*inm
, const int timer
)
973 IGMP_PRINTF(("%s: %s/%s%d timer=%d\n", __func__
,
974 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
975 inm
->inm_ifp
->if_unit
, timer
));
977 INM_LOCK_ASSERT_HELD(inm
);
979 switch (inm
->inm_state
) {
980 case IGMP_NOT_MEMBER
:
981 case IGMP_SILENT_MEMBER
:
983 case IGMP_REPORTING_MEMBER
:
984 if (inm
->inm_timer
!= 0 &&
985 inm
->inm_timer
<= timer
) {
986 IGMP_PRINTF(("%s: REPORTING and timer running, "
987 "skipping.\n", __func__
));
991 case IGMP_SG_QUERY_PENDING_MEMBER
:
992 case IGMP_G_QUERY_PENDING_MEMBER
:
993 case IGMP_IDLE_MEMBER
:
994 case IGMP_LAZY_MEMBER
:
995 case IGMP_AWAKENING_MEMBER
:
996 IGMP_PRINTF(("%s: ->REPORTING\n", __func__
));
997 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
998 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
999 current_state_timers_running
= 1;
1001 case IGMP_SLEEPING_MEMBER
:
1002 IGMP_PRINTF(("%s: ->AWAKENING\n", __func__
));
1003 inm
->inm_state
= IGMP_AWAKENING_MEMBER
;
1005 case IGMP_LEAVING_MEMBER
:
1011 * Process a received IGMPv3 general, group-specific or
1012 * group-and-source-specific query.
1013 * Assumes m has already been pulled up to the full IGMP message length.
1014 * Return 0 if successful, otherwise an appropriate error code is returned.
1017 igmp_input_v3_query(struct ifnet
*ifp
, const struct ip
*ip
,
1018 /*const*/ struct igmpv3
*igmpv3
)
1020 struct igmp_ifinfo
*igi
;
1021 struct in_multi
*inm
;
1022 int is_general_query
;
1023 uint32_t maxresp
, nsrc
, qqi
;
1027 is_general_query
= 0;
1029 IGMP_PRINTF(("process v3 query on ifp %p(%s%d)\n", ifp
, ifp
->if_name
,
1032 maxresp
= igmpv3
->igmp_code
; /* in 1/10ths of a second */
1033 if (maxresp
>= 128) {
1034 maxresp
= IGMP_MANT(igmpv3
->igmp_code
) <<
1035 (IGMP_EXP(igmpv3
->igmp_code
) + 3);
1039 * Robustness must never be less than 2 for on-wire IGMPv3.
1040 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
1041 * an exception for interfaces whose IGMPv3 state changes
1042 * are redirected to loopback (e.g. MANET).
1044 qrv
= IGMP_QRV(igmpv3
->igmp_misc
);
1046 IGMP_PRINTF(("%s: clamping qrv %d to %d\n", __func__
,
1047 qrv
, IGMP_RV_INIT
));
1051 qqi
= igmpv3
->igmp_qqi
;
1053 qqi
= IGMP_MANT(igmpv3
->igmp_qqi
) <<
1054 (IGMP_EXP(igmpv3
->igmp_qqi
) + 3);
1057 timer
= maxresp
* PR_SLOWHZ
/ IGMP_TIMER_SCALE
;
1061 nsrc
= ntohs(igmpv3
->igmp_numsrc
);
1064 * Validate address fields and versions upfront before
1065 * accepting v3 query.
1067 if (in_nullhost(igmpv3
->igmp_group
)) {
1069 * IGMPv3 General Query.
1071 * General Queries SHOULD be directed to 224.0.0.1.
1072 * A general query with a source list has undefined
1073 * behaviour; discard it.
1075 IGMPSTAT_INC(igps_rcv_gen_queries
);
1076 if (!in_allhosts(ip
->ip_dst
) || nsrc
> 0) {
1077 IGMPSTAT_INC(igps_rcv_badqueries
);
1078 OIGMPSTAT_INC(igps_rcv_badqueries
);
1081 is_general_query
= 1;
1083 /* Group or group-source specific query. */
1085 IGMPSTAT_INC(igps_rcv_group_queries
);
1087 IGMPSTAT_INC(igps_rcv_gsr_queries
);
1090 igi
= IGMP_IFINFO(ifp
);
1091 VERIFY(igi
!= NULL
);
1094 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
1095 IGMP_PRINTF(("ignore v3 query on IGIF_LOOPBACK ifp %p(%s%d)\n",
1096 ifp
, ifp
->if_name
, ifp
->if_unit
));
1102 * Discard the v3 query if we're in Compatibility Mode.
1103 * The RFC is not obviously worded that hosts need to stay in
1104 * compatibility mode until the Old Version Querier Present
1107 if (igi
->igi_version
!= IGMP_VERSION_3
) {
1108 IGMP_PRINTF(("ignore v3 query in v%d mode on ifp %p(%s%d)\n",
1109 igi
->igi_version
, ifp
, ifp
->if_name
, ifp
->if_unit
));
1114 igmp_set_version(igi
, IGMP_VERSION_3
);
1117 igi
->igi_qri
= maxresp
;
1120 IGMP_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__
, qrv
, qqi
,
1123 if (is_general_query
) {
1125 * Schedule a current-state report on this ifp for
1126 * all groups, possibly containing source lists.
1127 * If there is a pending General Query response
1128 * scheduled earlier than the selected delay, do
1129 * not schedule any other reports.
1130 * Otherwise, reset the interface timer.
1132 IGMP_PRINTF(("process v3 general query on ifp %p(%s%d)\n",
1133 ifp
, ifp
->if_name
, ifp
->if_unit
));
1134 if (igi
->igi_v3_timer
== 0 || igi
->igi_v3_timer
>= timer
) {
1135 igi
->igi_v3_timer
= IGMP_RANDOM_DELAY(timer
);
1136 interface_timers_running
= 1;
1142 * Group-source-specific queries are throttled on
1143 * a per-group basis to defeat denial-of-service attempts.
1144 * Queries for groups we are not a member of on this
1145 * link are simply ignored.
1147 in_multihead_lock_shared();
1148 IN_LOOKUP_MULTI(&igmpv3
->igmp_group
, ifp
, inm
);
1149 in_multihead_lock_done();
1155 /* TODO: need ratecheck equivalent */
1157 if (!ratecheck(&inm
->inm_lastgsrtv
,
1159 IGMP_PRINTF(("%s: GS query throttled.\n",
1161 IGMPSTAT_INC(igps_drop_gsr_queries
);
1163 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1168 IGMP_PRINTF(("process v3 %s query on ifp %p(%s%d)\n",
1169 inet_ntoa(igmpv3
->igmp_group
), ifp
, ifp
->if_name
,
1172 * If there is a pending General Query response
1173 * scheduled sooner than the selected delay, no
1174 * further report need be scheduled.
1175 * Otherwise, prepare to respond to the
1176 * group-specific or group-and-source query.
1179 if (igi
->igi_v3_timer
== 0 || igi
->igi_v3_timer
>= timer
) {
1181 igmp_input_v3_group_query(inm
, timer
, igmpv3
);
1186 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1193 * Process a recieved IGMPv3 group-specific or group-and-source-specific
1195 * Return <0 if any error occured. Currently this is ignored.
1198 igmp_input_v3_group_query(struct in_multi
*inm
,
1199 int timer
, /*const*/ struct igmpv3
*igmpv3
)
1204 INM_LOCK_ASSERT_HELD(inm
);
1208 switch (inm
->inm_state
) {
1209 case IGMP_NOT_MEMBER
:
1210 case IGMP_SILENT_MEMBER
:
1211 case IGMP_SLEEPING_MEMBER
:
1212 case IGMP_LAZY_MEMBER
:
1213 case IGMP_AWAKENING_MEMBER
:
1214 case IGMP_IDLE_MEMBER
:
1215 case IGMP_LEAVING_MEMBER
:
1217 case IGMP_REPORTING_MEMBER
:
1218 case IGMP_G_QUERY_PENDING_MEMBER
:
1219 case IGMP_SG_QUERY_PENDING_MEMBER
:
1223 nsrc
= ntohs(igmpv3
->igmp_numsrc
);
1226 * Deal with group-specific queries upfront.
1227 * If any group query is already pending, purge any recorded
1228 * source-list state if it exists, and schedule a query response
1229 * for this group-specific query.
1232 if (inm
->inm_state
== IGMP_G_QUERY_PENDING_MEMBER
||
1233 inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
) {
1234 inm_clear_recorded(inm
);
1235 timer
= min(inm
->inm_timer
, timer
);
1237 inm
->inm_state
= IGMP_G_QUERY_PENDING_MEMBER
;
1238 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1239 current_state_timers_running
= 1;
1244 * Deal with the case where a group-and-source-specific query has
1245 * been received but a group-specific query is already pending.
1247 if (inm
->inm_state
== IGMP_G_QUERY_PENDING_MEMBER
) {
1248 timer
= min(inm
->inm_timer
, timer
);
1249 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1250 current_state_timers_running
= 1;
1255 * Finally, deal with the case where a group-and-source-specific
1256 * query has been received, where a response to a previous g-s-r
1257 * query exists, or none exists.
1258 * In this case, we need to parse the source-list which the Querier
1259 * has provided us with and check if we have any source list filter
1260 * entries at T1 for these sources. If we do not, there is no need
1261 * schedule a report and the query may be dropped.
1262 * If we do, we must record them and schedule a current-state
1263 * report for those sources.
1264 * FIXME: Handling source lists larger than 1 mbuf requires that
1265 * we pass the mbuf chain pointer down to this function, and use
1266 * m_getptr() to walk the chain.
1268 if (inm
->inm_nsrc
> 0) {
1269 const struct in_addr
*ap
;
1272 ap
= (const struct in_addr
*)(igmpv3
+ 1);
1274 for (i
= 0; i
< nsrc
; i
++, ap
++) {
1275 retval
= inm_record_source(inm
, ap
->s_addr
);
1278 nrecorded
+= retval
;
1280 if (nrecorded
> 0) {
1281 IGMP_PRINTF(("%s: schedule response to SG query\n",
1283 inm
->inm_state
= IGMP_SG_QUERY_PENDING_MEMBER
;
1284 inm
->inm_timer
= IGMP_RANDOM_DELAY(timer
);
1285 current_state_timers_running
= 1;
1293 * Process a received IGMPv1 host membership report.
1295 * NOTE: 0.0.0.0 workaround breaks const correctness.
1298 igmp_input_v1_report(struct ifnet
*ifp
, /*const*/ struct ip
*ip
,
1299 /*const*/ struct igmp
*igmp
)
1301 struct in_ifaddr
*ia
;
1302 struct in_multi
*inm
;
1304 IGMPSTAT_INC(igps_rcv_reports
);
1305 OIGMPSTAT_INC(igps_rcv_reports
);
1307 if (ifp
->if_flags
& IFF_LOOPBACK
)
1310 if (!IN_MULTICAST(ntohl(igmp
->igmp_group
.s_addr
) ||
1311 !in_hosteq(igmp
->igmp_group
, ip
->ip_dst
))) {
1312 IGMPSTAT_INC(igps_rcv_badreports
);
1313 OIGMPSTAT_INC(igps_rcv_badreports
);
1318 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1319 * Booting clients may use the source address 0.0.0.0. Some
1320 * IGMP daemons may not know how to use IP_RECVIF to determine
1321 * the interface upon which this message was received.
1322 * Replace 0.0.0.0 with the subnet address if told to do so.
1324 if (igmp_recvifkludge
&& in_nullhost(ip
->ip_src
)) {
1327 IFA_LOCK(&ia
->ia_ifa
);
1328 ip
->ip_src
.s_addr
= htonl(ia
->ia_subnet
);
1329 IFA_UNLOCK(&ia
->ia_ifa
);
1330 IFA_REMREF(&ia
->ia_ifa
);
1334 IGMP_PRINTF(("process v1 report %s on ifp %p(%s%d)\n",
1335 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
, ifp
->if_unit
));
1338 * IGMPv1 report suppression.
1339 * If we are a member of this group, and our membership should be
1340 * reported, stop our group timer and transition to the 'lazy' state.
1342 in_multihead_lock_shared();
1343 IN_LOOKUP_MULTI(&igmp
->igmp_group
, ifp
, inm
);
1344 in_multihead_lock_done();
1346 struct igmp_ifinfo
*igi
;
1351 VERIFY(igi
!= NULL
);
1353 IGMPSTAT_INC(igps_rcv_ourreports
);
1354 OIGMPSTAT_INC(igps_rcv_ourreports
);
1357 * If we are in IGMPv3 host mode, do not allow the
1358 * other host's IGMPv1 report to suppress our reports
1359 * unless explicitly configured to do so.
1362 if (igi
->igi_version
== IGMP_VERSION_3
) {
1363 if (igmp_legacysupp
)
1364 igmp_v3_suppress_group_record(inm
);
1367 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1371 INM_LOCK_ASSERT_HELD(inm
);
1374 switch (inm
->inm_state
) {
1375 case IGMP_NOT_MEMBER
:
1376 case IGMP_SILENT_MEMBER
:
1378 case IGMP_IDLE_MEMBER
:
1379 case IGMP_LAZY_MEMBER
:
1380 case IGMP_AWAKENING_MEMBER
:
1381 IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1382 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
1384 case IGMP_SLEEPING_MEMBER
:
1385 inm
->inm_state
= IGMP_SLEEPING_MEMBER
;
1387 case IGMP_REPORTING_MEMBER
:
1388 IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1389 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
1391 if (igi
->igi_version
== IGMP_VERSION_1
)
1392 inm
->inm_state
= IGMP_LAZY_MEMBER
;
1393 else if (igi
->igi_version
== IGMP_VERSION_2
)
1394 inm
->inm_state
= IGMP_SLEEPING_MEMBER
;
1396 case IGMP_G_QUERY_PENDING_MEMBER
:
1397 case IGMP_SG_QUERY_PENDING_MEMBER
:
1398 case IGMP_LEAVING_MEMBER
:
1403 INM_REMREF(inm
); /* from IN_LOOKUP_MULTI */
1410 * Process a received IGMPv2 host membership report.
1412 * NOTE: 0.0.0.0 workaround breaks const correctness.
1415 igmp_input_v2_report(struct ifnet
*ifp
, /*const*/ struct ip
*ip
,
1416 /*const*/ struct igmp
*igmp
)
1418 struct in_ifaddr
*ia
;
1419 struct in_multi
*inm
;
1422 * Make sure we don't hear our own membership report. Fast
1423 * leave requires knowing that we are the only member of a
1428 IFA_LOCK(&ia
->ia_ifa
);
1429 if (in_hosteq(ip
->ip_src
, IA_SIN(ia
)->sin_addr
)) {
1430 IFA_UNLOCK(&ia
->ia_ifa
);
1431 IFA_REMREF(&ia
->ia_ifa
);
1434 IFA_UNLOCK(&ia
->ia_ifa
);
1437 IGMPSTAT_INC(igps_rcv_reports
);
1438 OIGMPSTAT_INC(igps_rcv_reports
);
1440 if (ifp
->if_flags
& IFF_LOOPBACK
) {
1442 IFA_REMREF(&ia
->ia_ifa
);
1446 if (!IN_MULTICAST(ntohl(igmp
->igmp_group
.s_addr
)) ||
1447 !in_hosteq(igmp
->igmp_group
, ip
->ip_dst
)) {
1449 IFA_REMREF(&ia
->ia_ifa
);
1450 IGMPSTAT_INC(igps_rcv_badreports
);
1451 OIGMPSTAT_INC(igps_rcv_badreports
);
1456 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1457 * Booting clients may use the source address 0.0.0.0. Some
1458 * IGMP daemons may not know how to use IP_RECVIF to determine
1459 * the interface upon which this message was received.
1460 * Replace 0.0.0.0 with the subnet address if told to do so.
1462 if (igmp_recvifkludge
&& in_nullhost(ip
->ip_src
)) {
1464 IFA_LOCK(&ia
->ia_ifa
);
1465 ip
->ip_src
.s_addr
= htonl(ia
->ia_subnet
);
1466 IFA_UNLOCK(&ia
->ia_ifa
);
1470 IFA_REMREF(&ia
->ia_ifa
);
1472 IGMP_PRINTF(("process v2 report %s on ifp %p(%s%d)\n",
1473 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
, ifp
->if_unit
));
1476 * IGMPv2 report suppression.
1477 * If we are a member of this group, and our membership should be
1478 * reported, and our group timer is pending or about to be reset,
1479 * stop our group timer by transitioning to the 'lazy' state.
1481 in_multihead_lock_shared();
1482 IN_LOOKUP_MULTI(&igmp
->igmp_group
, ifp
, inm
);
1483 in_multihead_lock_done();
1485 struct igmp_ifinfo
*igi
;
1489 VERIFY(igi
!= NULL
);
1491 IGMPSTAT_INC(igps_rcv_ourreports
);
1492 OIGMPSTAT_INC(igps_rcv_ourreports
);
1495 * If we are in IGMPv3 host mode, do not allow the
1496 * other host's IGMPv1 report to suppress our reports
1497 * unless explicitly configured to do so.
1500 if (igi
->igi_version
== IGMP_VERSION_3
) {
1501 if (igmp_legacysupp
)
1502 igmp_v3_suppress_group_record(inm
);
1511 switch (inm
->inm_state
) {
1512 case IGMP_NOT_MEMBER
:
1513 case IGMP_SILENT_MEMBER
:
1514 case IGMP_SLEEPING_MEMBER
:
1516 case IGMP_REPORTING_MEMBER
:
1517 case IGMP_IDLE_MEMBER
:
1518 case IGMP_AWAKENING_MEMBER
:
1519 IGMP_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1520 inet_ntoa(igmp
->igmp_group
), ifp
, ifp
->if_name
,
1522 case IGMP_LAZY_MEMBER
:
1523 inm
->inm_state
= IGMP_LAZY_MEMBER
;
1525 case IGMP_G_QUERY_PENDING_MEMBER
:
1526 case IGMP_SG_QUERY_PENDING_MEMBER
:
1527 case IGMP_LEAVING_MEMBER
:
1539 igmp_input(struct mbuf
*m
, int off
)
1549 IGMP_PRINTF(("%s: called w/mbuf (%p,%d)\n", __func__
, m
, off
));
1551 ifp
= m
->m_pkthdr
.rcvif
;
1553 IGMPSTAT_INC(igps_rcv_total
);
1554 OIGMPSTAT_INC(igps_rcv_total
);
1556 ip
= mtod(m
, struct ip
*);
1559 /* By now, ip_len no longer contains the length of IP header */
1560 igmplen
= ip
->ip_len
;
1565 if (igmplen
< IGMP_MINLEN
) {
1566 IGMPSTAT_INC(igps_rcv_tooshort
);
1567 OIGMPSTAT_INC(igps_rcv_tooshort
);
1573 * Always pullup to the minimum size for v1/v2 or v3
1574 * to amortize calls to m_pulldown().
1576 if (igmplen
>= IGMP_V3_QUERY_MINLEN
)
1577 minlen
= IGMP_V3_QUERY_MINLEN
;
1579 minlen
= IGMP_MINLEN
;
1581 M_STRUCT_GET(igmp
, struct igmp
*, m
, off
, minlen
);
1583 IGMPSTAT_INC(igps_rcv_tooshort
);
1584 OIGMPSTAT_INC(igps_rcv_tooshort
);
1589 * Validate checksum.
1591 m
->m_data
+= iphlen
;
1593 if (in_cksum(m
, igmplen
)) {
1594 IGMPSTAT_INC(igps_rcv_badsum
);
1595 OIGMPSTAT_INC(igps_rcv_badsum
);
1599 m
->m_data
-= iphlen
;
1603 * IGMP control traffic is link-scope, and must have a TTL of 1.
1604 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1605 * probe packets may come from beyond the LAN.
1607 if (igmp
->igmp_type
!= IGMP_DVMRP
&& ip
->ip_ttl
!= 1) {
1608 IGMPSTAT_INC(igps_rcv_badttl
);
1613 switch (igmp
->igmp_type
) {
1614 case IGMP_HOST_MEMBERSHIP_QUERY
:
1615 if (igmplen
== IGMP_MINLEN
) {
1616 if (igmp
->igmp_code
== 0)
1617 queryver
= IGMP_VERSION_1
;
1619 queryver
= IGMP_VERSION_2
;
1620 } else if (igmplen
>= IGMP_V3_QUERY_MINLEN
) {
1621 queryver
= IGMP_VERSION_3
;
1623 IGMPSTAT_INC(igps_rcv_tooshort
);
1624 OIGMPSTAT_INC(igps_rcv_tooshort
);
1629 OIGMPSTAT_INC(igps_rcv_queries
);
1632 case IGMP_VERSION_1
:
1633 IGMPSTAT_INC(igps_rcv_v1v2_queries
);
1636 if (igmp_input_v1_query(ifp
, ip
, igmp
) != 0) {
1642 case IGMP_VERSION_2
:
1643 IGMPSTAT_INC(igps_rcv_v1v2_queries
);
1646 if (igmp_input_v2_query(ifp
, ip
, igmp
) != 0) {
1652 case IGMP_VERSION_3
: {
1653 struct igmpv3
*igmpv3
;
1658 IGMPSTAT_INC(igps_rcv_v3_queries
);
1659 igmpv3
= (struct igmpv3
*)igmp
;
1661 * Validate length based on source count.
1663 nsrc
= ntohs(igmpv3
->igmp_numsrc
);
1664 srclen
= sizeof(struct in_addr
) * nsrc
;
1665 if (igmplen
< (IGMP_V3_QUERY_MINLEN
+ srclen
)) {
1666 IGMPSTAT_INC(igps_rcv_tooshort
);
1667 OIGMPSTAT_INC(igps_rcv_tooshort
);
1671 igmpv3len
= IGMP_V3_QUERY_MINLEN
+ srclen
;
1672 M_STRUCT_GET(igmpv3
, struct igmpv3
*, m
,
1674 if (igmpv3
== NULL
) {
1675 IGMPSTAT_INC(igps_rcv_tooshort
);
1676 OIGMPSTAT_INC(igps_rcv_tooshort
);
1679 if (igmp_input_v3_query(ifp
, ip
, igmpv3
) != 0) {
1688 case IGMP_v1_HOST_MEMBERSHIP_REPORT
:
1691 if (igmp_input_v1_report(ifp
, ip
, igmp
) != 0) {
1697 case IGMP_v2_HOST_MEMBERSHIP_REPORT
:
1701 if (!ip_checkrouteralert(m
))
1702 IGMPSTAT_INC(igps_rcv_nora
);
1704 if (igmp_input_v2_report(ifp
, ip
, igmp
) != 0) {
1710 case IGMP_v3_HOST_MEMBERSHIP_REPORT
:
1712 * Hosts do not need to process IGMPv3 membership reports,
1713 * as report suppression is no longer required.
1716 if (!ip_checkrouteralert(m
))
1717 IGMPSTAT_INC(igps_rcv_nora
);
1725 lck_mtx_assert(&igmp_mtx
, LCK_MTX_ASSERT_NOTOWNED
);
1727 * Pass all valid IGMP packets up to any process(es) listening on a
1735 * IGMP slowtimo handler.
1736 * Combiles both the slow and fast timer into one. We loose some responsivness but
1737 * allows the system to avoid having a pr_fasttimo, thus allowing for power savings.
1743 struct ifqueue scq
; /* State-change packets */
1744 struct ifqueue qrq
; /* Query response packets */
1746 struct igmp_ifinfo
*igi
;
1747 struct in_multi
*inm
;
1748 int loop
= 0, uri_fasthz
= 0;
1749 SLIST_HEAD(, in_multi
) inm_dthead
;
1751 SLIST_INIT(&inm_dthead
);
1753 lck_mtx_lock(&igmp_mtx
);
1755 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
1757 igmp_v1v2_process_querier_timers(igi
);
1762 * NOTE: previously handled by fasttimo
1764 * Quick check to see if any work needs to be done, in order to
1765 * minimize the overhead of fasttimo processing.
1767 if (!current_state_timers_running
&&
1768 !interface_timers_running
&&
1769 !state_change_timers_running
) {
1770 lck_mtx_unlock(&igmp_mtx
);
1775 * IGMPv3 General Query response timer processing.
1777 if (interface_timers_running
) {
1778 interface_timers_running
= 0;
1779 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
1781 if (igi
->igi_v3_timer
== 0) {
1783 } else if (--igi
->igi_v3_timer
== 0) {
1784 igmp_v3_dispatch_general_query(igi
);
1786 interface_timers_running
= 1;
1792 if (!current_state_timers_running
&&
1793 !state_change_timers_running
)
1796 current_state_timers_running
= 0;
1797 state_change_timers_running
= 0;
1799 memset(&qrq
, 0, sizeof(struct ifqueue
));
1800 qrq
.ifq_maxlen
= IGMP_MAX_G_GS_PACKETS
;
1802 memset(&scq
, 0, sizeof(struct ifqueue
));
1803 scq
.ifq_maxlen
= IGMP_MAX_STATE_CHANGE_PACKETS
;
1806 * IGMPv1/v2/v3 host report and state-change timer processing.
1807 * Note: Processing a v3 group timer may remove a node.
1809 LIST_FOREACH(igi
, &igi_head
, igi_link
) {
1810 struct in_multistep step
;
1814 loop
= (igi
->igi_flags
& IGIF_LOOPBACK
) ? 1 : 0;
1815 uri_fasthz
= IGMP_RANDOM_DELAY(igi
->igi_uri
* PR_SLOWHZ
);
1818 in_multihead_lock_shared();
1819 IN_FIRST_MULTI(step
, inm
);
1820 while (inm
!= NULL
) {
1822 if (inm
->inm_ifp
!= ifp
)
1826 switch (igi
->igi_version
) {
1827 case IGMP_VERSION_1
:
1828 case IGMP_VERSION_2
:
1829 igmp_v1v2_process_group_timer(inm
,
1832 case IGMP_VERSION_3
:
1833 igmp_v3_process_group_timers(igi
, &qrq
,
1834 &scq
, inm
, uri_fasthz
);
1840 IN_NEXT_MULTI(step
, inm
);
1842 in_multihead_lock_done();
1845 if (igi
->igi_version
== IGMP_VERSION_1
||
1846 igi
->igi_version
== IGMP_VERSION_2
) {
1847 igmp_dispatch_queue(igi
, &igi
->igi_v2q
, 0, loop
, ifp
);
1848 } else if (igi
->igi_version
== IGMP_VERSION_3
) {
1850 igmp_dispatch_queue(NULL
, &qrq
, 0, loop
, ifp
);
1851 igmp_dispatch_queue(NULL
, &scq
, 0, loop
, ifp
);
1852 VERIFY(qrq
.ifq_len
== 0);
1853 VERIFY(scq
.ifq_len
== 0);
1857 * In case there are still any pending membership reports
1858 * which didn't get drained at version change time.
1860 IF_DRAIN(&igi
->igi_v2q
);
1862 * Release all deferred inm records, and drain any locally
1863 * enqueued packets; do it even if the current IGMP version
1864 * for the link is no longer IGMPv3, in order to handle the
1865 * version change case.
1867 igmp_flush_relq(igi
, (struct igmp_inm_relhead
*)&inm_dthead
);
1868 VERIFY(SLIST_EMPTY(&igi
->igi_relinmhead
));
1876 lck_mtx_unlock(&igmp_mtx
);
1878 /* Now that we're dropped all locks, release detached records */
1879 IGMP_REMOVE_DETACHED_INM(&inm_dthead
);
1883 * Free the in_multi reference(s) for this IGMP lifecycle.
1885 * Caller must be holding igi_lock.
1888 igmp_flush_relq(struct igmp_ifinfo
*igi
, struct igmp_inm_relhead
*inm_dthead
)
1890 struct in_multi
*inm
;
1893 IGI_LOCK_ASSERT_HELD(igi
);
1894 inm
= SLIST_FIRST(&igi
->igi_relinmhead
);
1898 SLIST_REMOVE_HEAD(&igi
->igi_relinmhead
, inm_nrele
);
1901 in_multihead_lock_exclusive();
1903 VERIFY(inm
->inm_nrelecnt
!= 0);
1904 inm
->inm_nrelecnt
--;
1905 lastref
= in_multi_detach(inm
);
1906 VERIFY(!lastref
|| (!(inm
->inm_debug
& IFD_ATTACHED
) &&
1907 inm
->inm_reqcnt
== 0));
1909 in_multihead_lock_done();
1910 /* from igi_relinmhead */
1912 /* from in_multihead list */
1915 * Defer releasing our final reference, as we
1916 * are holding the IGMP lock at this point, and
1917 * we could end up with locking issues later on
1918 * (while issuing SIOCDELMULTI) when this is the
1919 * final reference count. Let the caller do it
1922 IGMP_ADD_DETACHED_INM(inm_dthead
, inm
);
1930 * Update host report group timer for IGMPv1/v2.
1931 * Will update the global pending timer flags.
1934 igmp_v1v2_process_group_timer(struct in_multi
*inm
, const int igmp_version
)
1936 int report_timer_expired
;
1938 INM_LOCK_ASSERT_HELD(inm
);
1939 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
1941 if (inm
->inm_timer
== 0) {
1942 report_timer_expired
= 0;
1943 } else if (--inm
->inm_timer
== 0) {
1944 report_timer_expired
= 1;
1946 current_state_timers_running
= 1;
1950 switch (inm
->inm_state
) {
1951 case IGMP_NOT_MEMBER
:
1952 case IGMP_SILENT_MEMBER
:
1953 case IGMP_IDLE_MEMBER
:
1954 case IGMP_LAZY_MEMBER
:
1955 case IGMP_SLEEPING_MEMBER
:
1956 case IGMP_AWAKENING_MEMBER
:
1958 case IGMP_REPORTING_MEMBER
:
1959 if (report_timer_expired
) {
1960 inm
->inm_state
= IGMP_IDLE_MEMBER
;
1961 (void) igmp_v1v2_queue_report(inm
,
1962 (igmp_version
== IGMP_VERSION_2
) ?
1963 IGMP_v2_HOST_MEMBERSHIP_REPORT
:
1964 IGMP_v1_HOST_MEMBERSHIP_REPORT
);
1965 INM_LOCK_ASSERT_HELD(inm
);
1966 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
1969 case IGMP_G_QUERY_PENDING_MEMBER
:
1970 case IGMP_SG_QUERY_PENDING_MEMBER
:
1971 case IGMP_LEAVING_MEMBER
:
1977 * Update a group's timers for IGMPv3.
1978 * Will update the global pending timer flags.
1979 * Note: Unlocked read from igi.
1982 igmp_v3_process_group_timers(struct igmp_ifinfo
*igi
,
1983 struct ifqueue
*qrq
, struct ifqueue
*scq
,
1984 struct in_multi
*inm
, const int uri_fasthz
)
1986 int query_response_timer_expired
;
1987 int state_change_retransmit_timer_expired
;
1989 INM_LOCK_ASSERT_HELD(inm
);
1990 IGI_LOCK_ASSERT_HELD(igi
);
1991 VERIFY(igi
== inm
->inm_igi
);
1993 query_response_timer_expired
= 0;
1994 state_change_retransmit_timer_expired
= 0;
1997 * During a transition from v1/v2 compatibility mode back to v3,
1998 * a group record in REPORTING state may still have its group
1999 * timer active. This is a no-op in this function; it is easier
2000 * to deal with it here than to complicate the slow-timeout path.
2002 if (inm
->inm_timer
== 0) {
2003 query_response_timer_expired
= 0;
2004 } else if (--inm
->inm_timer
== 0) {
2005 query_response_timer_expired
= 1;
2007 current_state_timers_running
= 1;
2010 if (inm
->inm_sctimer
== 0) {
2011 state_change_retransmit_timer_expired
= 0;
2012 } else if (--inm
->inm_sctimer
== 0) {
2013 state_change_retransmit_timer_expired
= 1;
2015 state_change_timers_running
= 1;
2018 /* We are in fasttimo, so be quick about it. */
2019 if (!state_change_retransmit_timer_expired
&&
2020 !query_response_timer_expired
)
2023 switch (inm
->inm_state
) {
2024 case IGMP_NOT_MEMBER
:
2025 case IGMP_SILENT_MEMBER
:
2026 case IGMP_SLEEPING_MEMBER
:
2027 case IGMP_LAZY_MEMBER
:
2028 case IGMP_AWAKENING_MEMBER
:
2029 case IGMP_IDLE_MEMBER
:
2031 case IGMP_G_QUERY_PENDING_MEMBER
:
2032 case IGMP_SG_QUERY_PENDING_MEMBER
:
2034 * Respond to a previously pending Group-Specific
2035 * or Group-and-Source-Specific query by enqueueing
2036 * the appropriate Current-State report for
2037 * immediate transmission.
2039 if (query_response_timer_expired
) {
2042 retval
= igmp_v3_enqueue_group_record(qrq
, inm
, 0, 1,
2043 (inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
));
2044 IGMP_PRINTF(("%s: enqueue record = %d\n",
2046 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2047 /* XXX Clear recorded sources for next time. */
2048 inm_clear_recorded(inm
);
2051 case IGMP_REPORTING_MEMBER
:
2052 case IGMP_LEAVING_MEMBER
:
2053 if (state_change_retransmit_timer_expired
) {
2055 * State-change retransmission timer fired.
2056 * If there are any further pending retransmissions,
2057 * set the global pending state-change flag, and
2060 if (--inm
->inm_scrv
> 0) {
2061 inm
->inm_sctimer
= uri_fasthz
;
2062 state_change_timers_running
= 1;
2065 * Retransmit the previously computed state-change
2066 * report. If there are no further pending
2067 * retransmissions, the mbuf queue will be consumed.
2068 * Update T0 state to T1 as we have now sent
2071 (void) igmp_v3_merge_state_changes(inm
, scq
);
2074 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
,
2075 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2076 inm
->inm_ifp
->if_unit
));
2079 * If we are leaving the group for good, make sure
2080 * we release IGMP's reference to it.
2081 * This release must be deferred using a SLIST,
2082 * as we are called from a loop which traverses
2083 * the in_multihead list.
2085 if (inm
->inm_state
== IGMP_LEAVING_MEMBER
&&
2086 inm
->inm_scrv
== 0) {
2087 inm
->inm_state
= IGMP_NOT_MEMBER
;
2089 * A reference has already been held in
2090 * igmp_final_leave() for this inm, so
2091 * no need to hold another one. We also
2092 * bumped up its request count then, so
2093 * that it stays in in_multihead. Both
2094 * of them will be released when it is
2095 * dequeued later on.
2097 VERIFY(inm
->inm_nrelecnt
!= 0);
2098 SLIST_INSERT_HEAD(&igi
->igi_relinmhead
,
2107 * Suppress a group's pending response to a group or source/group query.
2109 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
2110 * Do NOT update ST1/ST0 as this operation merely suppresses
2111 * the currently pending group record.
2112 * Do NOT suppress the response to a general query. It is possible but
2113 * it would require adding another state or flag.
2116 igmp_v3_suppress_group_record(struct in_multi
*inm
)
2119 INM_LOCK_ASSERT_HELD(inm
);
2120 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
2122 VERIFY(inm
->inm_igi
->igi_version
== IGMP_VERSION_3
);
2124 if (inm
->inm_state
!= IGMP_G_QUERY_PENDING_MEMBER
||
2125 inm
->inm_state
!= IGMP_SG_QUERY_PENDING_MEMBER
)
2128 if (inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
)
2129 inm_clear_recorded(inm
);
2132 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2136 * Switch to a different IGMP version on the given interface,
2137 * as per Section 7.2.1.
2140 igmp_set_version(struct igmp_ifinfo
*igi
, const int igmp_version
)
2142 int old_version_timer
;
2144 IGI_LOCK_ASSERT_HELD(igi
);
2146 IGMP_PRINTF(("%s: switching to v%d on ifp %p(%s%d)\n", __func__
,
2147 igmp_version
, igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2148 igi
->igi_ifp
->if_unit
));
2150 if (igmp_version
== IGMP_VERSION_1
|| igmp_version
== IGMP_VERSION_2
) {
2152 * Compute the "Older Version Querier Present" timer as per
2155 old_version_timer
= igi
->igi_rv
* igi
->igi_qi
+ igi
->igi_qri
;
2156 old_version_timer
*= PR_SLOWHZ
;
2158 if (igmp_version
== IGMP_VERSION_1
) {
2159 igi
->igi_v1_timer
= old_version_timer
;
2160 igi
->igi_v2_timer
= 0;
2161 } else if (igmp_version
== IGMP_VERSION_2
) {
2162 igi
->igi_v1_timer
= 0;
2163 igi
->igi_v2_timer
= old_version_timer
;
2167 if (igi
->igi_v1_timer
== 0 && igi
->igi_v2_timer
> 0) {
2168 if (igi
->igi_version
!= IGMP_VERSION_2
) {
2169 igi
->igi_version
= IGMP_VERSION_2
;
2170 igmp_v3_cancel_link_timers(igi
);
2172 } else if (igi
->igi_v1_timer
> 0) {
2173 if (igi
->igi_version
!= IGMP_VERSION_1
) {
2174 igi
->igi_version
= IGMP_VERSION_1
;
2175 igmp_v3_cancel_link_timers(igi
);
2179 IGI_LOCK_ASSERT_HELD(igi
);
2183 * Cancel pending IGMPv3 timers for the given link and all groups
2184 * joined on it; state-change, general-query, and group-query timers.
2186 * Only ever called on a transition from v3 to Compatibility mode. Kill
2187 * the timers stone dead (this may be expensive for large N groups), they
2188 * will be restarted if Compatibility Mode deems that they must be due to
2192 igmp_v3_cancel_link_timers(struct igmp_ifinfo
*igi
)
2195 struct in_multi
*inm
;
2196 struct in_multistep step
;
2198 IGI_LOCK_ASSERT_HELD(igi
);
2200 IGMP_PRINTF(("%s: cancel v3 timers on ifp %p(%s%d)\n", __func__
,
2201 igi
->igi_ifp
, igi
->igi_ifp
->if_name
, igi
->igi_ifp
->if_unit
));
2204 * Stop the v3 General Query Response on this link stone dead.
2205 * If fasttimo is woken up due to interface_timers_running,
2206 * the flag will be cleared if there are no pending link timers.
2208 igi
->igi_v3_timer
= 0;
2211 * Now clear the current-state and state-change report timers
2212 * for all memberships scoped to this link.
2217 in_multihead_lock_shared();
2218 IN_FIRST_MULTI(step
, inm
);
2219 while (inm
!= NULL
) {
2221 if (inm
->inm_ifp
!= ifp
)
2224 switch (inm
->inm_state
) {
2225 case IGMP_NOT_MEMBER
:
2226 case IGMP_SILENT_MEMBER
:
2227 case IGMP_IDLE_MEMBER
:
2228 case IGMP_LAZY_MEMBER
:
2229 case IGMP_SLEEPING_MEMBER
:
2230 case IGMP_AWAKENING_MEMBER
:
2232 * These states are either not relevant in v3 mode,
2233 * or are unreported. Do nothing.
2236 case IGMP_LEAVING_MEMBER
:
2238 * If we are leaving the group and switching to
2239 * compatibility mode, we need to release the final
2240 * reference held for issuing the INCLUDE {}, and
2241 * transition to REPORTING to ensure the host leave
2242 * message is sent upstream to the old querier --
2243 * transition to NOT would lose the leave and race.
2244 * During igmp_final_leave(), we bumped up both the
2245 * request and reference counts. Since we cannot
2246 * call in_multi_detach() here, defer this task to
2247 * the timer routine.
2249 VERIFY(inm
->inm_nrelecnt
!= 0);
2251 SLIST_INSERT_HEAD(&igi
->igi_relinmhead
, inm
, inm_nrele
);
2254 case IGMP_G_QUERY_PENDING_MEMBER
:
2255 case IGMP_SG_QUERY_PENDING_MEMBER
:
2256 inm_clear_recorded(inm
);
2258 case IGMP_REPORTING_MEMBER
:
2259 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2263 * Always clear state-change and group report timers.
2264 * Free any pending IGMPv3 state-change records.
2266 inm
->inm_sctimer
= 0;
2268 IF_DRAIN(&inm
->inm_scq
);
2271 IN_NEXT_MULTI(step
, inm
);
2273 in_multihead_lock_done();
2279 * Update the Older Version Querier Present timers for a link.
2280 * See Section 7.2.1 of RFC 3376.
2283 igmp_v1v2_process_querier_timers(struct igmp_ifinfo
*igi
)
2285 IGI_LOCK_ASSERT_HELD(igi
);
2287 if (igi
->igi_v1_timer
== 0 && igi
->igi_v2_timer
== 0) {
2289 * IGMPv1 and IGMPv2 Querier Present timers expired.
2293 if (igi
->igi_version
!= IGMP_VERSION_3
) {
2294 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2295 __func__
, igi
->igi_version
, IGMP_VERSION_3
,
2296 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2297 igi
->igi_ifp
->if_unit
));
2298 igi
->igi_version
= IGMP_VERSION_3
;
2299 IF_DRAIN(&igi
->igi_v2q
);
2301 } else if (igi
->igi_v1_timer
== 0 && igi
->igi_v2_timer
> 0) {
2303 * IGMPv1 Querier Present timer expired,
2304 * IGMPv2 Querier Present timer running.
2305 * If IGMPv2 was disabled since last timeout,
2307 * If IGMPv2 is enabled, revert to IGMPv2.
2309 if (!igmp_v2enable
) {
2310 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2311 __func__
, igi
->igi_version
, IGMP_VERSION_3
,
2312 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2313 igi
->igi_ifp
->if_unit
));
2314 igi
->igi_v2_timer
= 0;
2315 igi
->igi_version
= IGMP_VERSION_3
;
2316 IF_DRAIN(&igi
->igi_v2q
);
2318 --igi
->igi_v2_timer
;
2319 if (igi
->igi_version
!= IGMP_VERSION_2
) {
2320 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2321 __func__
, igi
->igi_version
, IGMP_VERSION_2
,
2322 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2323 igi
->igi_ifp
->if_unit
));
2324 igi
->igi_version
= IGMP_VERSION_2
;
2325 IF_DRAIN(&igi
->igi_gq
);
2328 } else if (igi
->igi_v1_timer
> 0) {
2330 * IGMPv1 Querier Present timer running.
2331 * Stop IGMPv2 timer if running.
2333 * If IGMPv1 was disabled since last timeout,
2335 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2337 if (!igmp_v1enable
) {
2338 IGMP_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
2339 __func__
, igi
->igi_version
, IGMP_VERSION_3
,
2340 igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2341 igi
->igi_ifp
->if_unit
));
2342 igi
->igi_v1_timer
= 0;
2343 igi
->igi_version
= IGMP_VERSION_3
;
2344 IF_DRAIN(&igi
->igi_v2q
);
2346 --igi
->igi_v1_timer
;
2348 if (igi
->igi_v2_timer
> 0) {
2349 IGMP_PRINTF(("%s: cancel v2 timer on %p(%s%d)\n",
2350 __func__
, igi
->igi_ifp
, igi
->igi_ifp
->if_name
,
2351 igi
->igi_ifp
->if_unit
));
2352 igi
->igi_v2_timer
= 0;
2358 * Dispatch an IGMPv1/v2 host report or leave message.
2359 * These are always small enough to fit inside a single mbuf.
2362 igmp_v1v2_queue_report(struct in_multi
*inm
, const int type
)
2370 INM_LOCK_ASSERT_HELD(inm
);
2371 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
2375 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
2378 MH_ALIGN(m
, sizeof(struct ip
) + sizeof(struct igmp
));
2380 m
->m_pkthdr
.len
= sizeof(struct ip
) + sizeof(struct igmp
);
2382 m
->m_data
+= sizeof(struct ip
);
2383 m
->m_len
= sizeof(struct igmp
);
2385 igmp
= mtod(m
, struct igmp
*);
2386 igmp
->igmp_type
= type
;
2387 igmp
->igmp_code
= 0;
2388 igmp
->igmp_group
= inm
->inm_addr
;
2389 igmp
->igmp_cksum
= 0;
2390 igmp
->igmp_cksum
= in_cksum(m
, sizeof(struct igmp
));
2392 m
->m_data
-= sizeof(struct ip
);
2393 m
->m_len
+= sizeof(struct ip
);
2395 ip
= mtod(m
, struct ip
*);
2397 ip
->ip_len
= sizeof(struct ip
) + sizeof(struct igmp
);
2399 ip
->ip_p
= IPPROTO_IGMP
;
2400 ip
->ip_src
.s_addr
= INADDR_ANY
;
2402 if (type
== IGMP_HOST_LEAVE_MESSAGE
)
2403 ip
->ip_dst
.s_addr
= htonl(INADDR_ALLRTRS_GROUP
);
2405 ip
->ip_dst
= inm
->inm_addr
;
2407 m
->m_flags
|= M_IGMPV2
;
2408 if (inm
->inm_igi
->igi_flags
& IGIF_LOOPBACK
)
2409 m
->m_flags
|= M_IGMP_LOOP
;
2412 * Due to the fact that at this point we are possibly holding
2413 * in_multihead_lock in shared or exclusive mode, we can't call
2414 * igmp_sendpkt() here since that will eventually call ip_output(),
2415 * which will try to lock in_multihead_lock and cause a deadlock.
2416 * Instead we defer the work to the igmp_slowtimo() thread, thus
2417 * avoiding unlocking in_multihead_lock here.
2419 if (IF_QFULL(&inm
->inm_igi
->igi_v2q
)) {
2420 IGMP_PRINTF(("%s: v1/v2 outbound queue full\n", __func__
));
2424 IF_ENQUEUE(&inm
->inm_igi
->igi_v2q
, m
);
2430 * Process a state change from the upper layer for the given IPv4 group.
2432 * Each socket holds a reference on the in_multi in its own ip_moptions.
2433 * The socket layer will have made the necessary updates to the group
2434 * state, it is now up to IGMP to issue a state change report if there
2435 * has been any change between T0 (when the last state-change was issued)
2438 * We use the IGMPv3 state machine at group level. The IGMP module
2439 * however makes the decision as to which IGMP protocol version to speak.
2440 * A state change *from* INCLUDE {} always means an initial join.
2441 * A state change *to* INCLUDE {} always means a final leave.
2443 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2444 * save ourselves a bunch of work; any exclusive mode groups need not
2445 * compute source filter lists.
2448 igmp_change_state(struct in_multi
*inm
)
2450 struct igmp_ifinfo
*igi
;
2454 INM_LOCK_ASSERT_HELD(inm
);
2455 VERIFY(inm
->inm_igi
!= NULL
);
2456 IGI_LOCK_ASSERT_NOTHELD(inm
->inm_igi
);
2459 * Try to detect if the upper layer just asked us to change state
2460 * for an interface which has now gone away.
2462 VERIFY(inm
->inm_ifma
!= NULL
);
2463 ifp
= inm
->inm_ifma
->ifma_ifp
;
2465 * Sanity check that netinet's notion of ifp is the same as net's.
2467 VERIFY(inm
->inm_ifp
== ifp
);
2469 igi
= IGMP_IFINFO(ifp
);
2470 VERIFY(igi
!= NULL
);
2473 * If we detect a state transition to or from MCAST_UNDEFINED
2474 * for this group, then we are starting or finishing an IGMP
2475 * life cycle for this group.
2477 if (inm
->inm_st
[1].iss_fmode
!= inm
->inm_st
[0].iss_fmode
) {
2478 IGMP_PRINTF(("%s: inm transition %d -> %d\n", __func__
,
2479 inm
->inm_st
[0].iss_fmode
, inm
->inm_st
[1].iss_fmode
));
2480 if (inm
->inm_st
[0].iss_fmode
== MCAST_UNDEFINED
) {
2481 IGMP_PRINTF(("%s: initial join\n", __func__
));
2482 error
= igmp_initial_join(inm
, igi
);
2484 } else if (inm
->inm_st
[1].iss_fmode
== MCAST_UNDEFINED
) {
2485 IGMP_PRINTF(("%s: final leave\n", __func__
));
2486 igmp_final_leave(inm
, igi
);
2490 IGMP_PRINTF(("%s: filter set change\n", __func__
));
2493 error
= igmp_handle_state_change(inm
, igi
);
2499 * Perform the initial join for an IGMP group.
2501 * When joining a group:
2502 * If the group should have its IGMP traffic suppressed, do nothing.
2503 * IGMPv1 starts sending IGMPv1 host membership reports.
2504 * IGMPv2 starts sending IGMPv2 host membership reports.
2505 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2506 * initial state of the membership.
2509 igmp_initial_join(struct in_multi
*inm
, struct igmp_ifinfo
*igi
)
2512 struct ifqueue
*ifq
;
2513 int error
, retval
, syncstates
;
2515 INM_LOCK_ASSERT_HELD(inm
);
2516 IGI_LOCK_ASSERT_NOTHELD(igi
);
2518 IGMP_PRINTF(("%s: initial join %s on ifp %p(%s%d)\n",
2519 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
,
2520 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2528 VERIFY(igi
->igi_ifp
== ifp
);
2531 * Groups joined on loopback or marked as 'not reported',
2532 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2533 * are never reported in any IGMP protocol exchanges.
2534 * All other groups enter the appropriate IGMP state machine
2535 * for the version in use on this link.
2536 * A link marked as IGIF_SILENT causes IGMP to be completely
2537 * disabled for the link.
2539 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2540 (igi
->igi_flags
& IGIF_SILENT
) ||
2541 !igmp_isgroupreported(inm
->inm_addr
)) {
2542 IGMP_PRINTF(("%s: not kicking state machine for silent group\n",
2544 inm
->inm_state
= IGMP_SILENT_MEMBER
;
2548 * Deal with overlapping in_multi lifecycle.
2549 * If this group was LEAVING, then make sure
2550 * we drop the reference we picked up to keep the
2551 * group around for the final INCLUDE {} enqueue.
2552 * Since we cannot call in_multi_detach() here,
2553 * defer this task to the timer routine.
2555 if (igi
->igi_version
== IGMP_VERSION_3
&&
2556 inm
->inm_state
== IGMP_LEAVING_MEMBER
) {
2557 VERIFY(inm
->inm_nrelecnt
!= 0);
2558 SLIST_INSERT_HEAD(&igi
->igi_relinmhead
, inm
, inm_nrele
);
2561 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
2563 switch (igi
->igi_version
) {
2564 case IGMP_VERSION_1
:
2565 case IGMP_VERSION_2
:
2566 inm
->inm_state
= IGMP_IDLE_MEMBER
;
2567 error
= igmp_v1v2_queue_report(inm
,
2568 (igi
->igi_version
== IGMP_VERSION_2
) ?
2569 IGMP_v2_HOST_MEMBERSHIP_REPORT
:
2570 IGMP_v1_HOST_MEMBERSHIP_REPORT
);
2572 INM_LOCK_ASSERT_HELD(inm
);
2573 IGI_LOCK_ASSERT_HELD(igi
);
2576 inm
->inm_timer
= IGMP_RANDOM_DELAY(
2577 IGMP_V1V2_MAX_RI
* PR_SLOWHZ
);
2578 current_state_timers_running
= 1;
2582 case IGMP_VERSION_3
:
2584 * Defer update of T0 to T1, until the first copy
2585 * of the state change has been transmitted.
2590 * Immediately enqueue a State-Change Report for
2591 * this interface, freeing any previous reports.
2592 * Don't kick the timers if there is nothing to do,
2593 * or if an error occurred.
2595 ifq
= &inm
->inm_scq
;
2597 retval
= igmp_v3_enqueue_group_record(ifq
, inm
, 1,
2599 IGMP_PRINTF(("%s: enqueue record = %d\n",
2602 error
= retval
* -1;
2607 * Schedule transmission of pending state-change
2608 * report up to RV times for this link. The timer
2609 * will fire at the next igmp_fasttimo (~200ms),
2610 * giving us an opportunity to merge the reports.
2612 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
2615 VERIFY(igi
->igi_rv
> 1);
2616 inm
->inm_scrv
= igi
->igi_rv
;
2618 inm
->inm_sctimer
= 1;
2619 state_change_timers_running
= 1;
2628 * Only update the T0 state if state change is atomic,
2629 * i.e. we don't need to wait for a timer to fire before we
2630 * can consider the state change to have been communicated.
2634 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
,
2635 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2636 inm
->inm_ifp
->if_unit
));
2643 * Issue an intermediate state change during the IGMP life-cycle.
2646 igmp_handle_state_change(struct in_multi
*inm
, struct igmp_ifinfo
*igi
)
2651 INM_LOCK_ASSERT_HELD(inm
);
2652 IGI_LOCK_ASSERT_NOTHELD(igi
);
2654 IGMP_PRINTF(("%s: state change for %s on ifp %p(%s%d)\n",
2655 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
,
2656 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2661 VERIFY(igi
->igi_ifp
== ifp
);
2663 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2664 (igi
->igi_flags
& IGIF_SILENT
) ||
2665 !igmp_isgroupreported(inm
->inm_addr
) ||
2666 (igi
->igi_version
!= IGMP_VERSION_3
)) {
2668 if (!igmp_isgroupreported(inm
->inm_addr
)) {
2669 IGMP_PRINTF(("%s: not kicking state "
2670 "machine for silent group\n", __func__
));
2672 IGMP_PRINTF(("%s: nothing to do\n", __func__
));
2674 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2675 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
));
2679 IF_DRAIN(&inm
->inm_scq
);
2681 retval
= igmp_v3_enqueue_group_record(&inm
->inm_scq
, inm
, 1, 0, 0);
2682 IGMP_PRINTF(("%s: enqueue record = %d\n", __func__
, retval
));
2688 * If record(s) were enqueued, start the state-change
2689 * report timer for this group.
2691 inm
->inm_scrv
= ((igi
->igi_flags
& IGIF_LOOPBACK
) ? 1 : igi
->igi_rv
);
2692 inm
->inm_sctimer
= 1;
2693 state_change_timers_running
= 1;
2700 * Perform the final leave for an IGMP group.
2702 * When leaving a group:
2703 * IGMPv1 does nothing.
2704 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2705 * IGMPv3 enqueues a state-change report containing a transition
2706 * to INCLUDE {} for immediate transmission.
2709 igmp_final_leave(struct in_multi
*inm
, struct igmp_ifinfo
*igi
)
2713 INM_LOCK_ASSERT_HELD(inm
);
2714 IGI_LOCK_ASSERT_NOTHELD(igi
);
2716 IGMP_PRINTF(("%s: final leave %s on ifp %p(%s%d)\n",
2717 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
,
2718 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2720 switch (inm
->inm_state
) {
2721 case IGMP_NOT_MEMBER
:
2722 case IGMP_SILENT_MEMBER
:
2723 case IGMP_LEAVING_MEMBER
:
2724 /* Already leaving or left; do nothing. */
2725 IGMP_PRINTF(("%s: not kicking state machine for silent group\n",
2728 case IGMP_REPORTING_MEMBER
:
2729 case IGMP_IDLE_MEMBER
:
2730 case IGMP_G_QUERY_PENDING_MEMBER
:
2731 case IGMP_SG_QUERY_PENDING_MEMBER
:
2733 if (igi
->igi_version
== IGMP_VERSION_2
) {
2734 if (inm
->inm_state
== IGMP_G_QUERY_PENDING_MEMBER
||
2735 inm
->inm_state
== IGMP_SG_QUERY_PENDING_MEMBER
) {
2736 panic("%s: IGMPv3 state reached, not IGMPv3 "
2737 "mode\n", __func__
);
2740 igmp_v1v2_queue_report(inm
, IGMP_HOST_LEAVE_MESSAGE
);
2742 INM_LOCK_ASSERT_HELD(inm
);
2743 IGI_LOCK_ASSERT_HELD(igi
);
2745 inm
->inm_state
= IGMP_NOT_MEMBER
;
2746 } else if (igi
->igi_version
== IGMP_VERSION_3
) {
2748 * Stop group timer and all pending reports.
2749 * Immediately enqueue a state-change report
2750 * TO_IN {} to be sent on the next fast timeout,
2751 * giving us an opportunity to merge reports.
2753 IF_DRAIN(&inm
->inm_scq
);
2755 if (igi
->igi_flags
& IGIF_LOOPBACK
) {
2758 inm
->inm_scrv
= igi
->igi_rv
;
2760 IGMP_PRINTF(("%s: Leaving %s/%s%d with %d "
2761 "pending retransmissions.\n", __func__
,
2762 inet_ntoa(inm
->inm_addr
),
2763 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
,
2765 if (inm
->inm_scrv
== 0) {
2766 inm
->inm_state
= IGMP_NOT_MEMBER
;
2767 inm
->inm_sctimer
= 0;
2771 * Stick around in the in_multihead list;
2772 * the final detach will be issued by
2773 * igmp_v3_process_group_timers() when
2774 * the retransmit timer expires.
2776 INM_ADDREF_LOCKED(inm
);
2777 VERIFY(inm
->inm_debug
& IFD_ATTACHED
);
2779 VERIFY(inm
->inm_reqcnt
>= 1);
2780 inm
->inm_nrelecnt
++;
2781 VERIFY(inm
->inm_nrelecnt
!= 0);
2783 retval
= igmp_v3_enqueue_group_record(
2784 &inm
->inm_scq
, inm
, 1, 0, 0);
2785 KASSERT(retval
!= 0,
2786 ("%s: enqueue record = %d\n", __func__
,
2789 inm
->inm_state
= IGMP_LEAVING_MEMBER
;
2790 inm
->inm_sctimer
= 1;
2791 state_change_timers_running
= 1;
2797 case IGMP_LAZY_MEMBER
:
2798 case IGMP_SLEEPING_MEMBER
:
2799 case IGMP_AWAKENING_MEMBER
:
2800 /* Our reports are suppressed; do nothing. */
2806 IGMP_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
,
2807 inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2808 inm
->inm_ifp
->if_unit
));
2809 inm
->inm_st
[1].iss_fmode
= MCAST_UNDEFINED
;
2810 IGMP_PRINTF(("%s: T1 now MCAST_UNDEFINED for %s/%s%d\n",
2811 __func__
, inet_ntoa(inm
->inm_addr
), inm
->inm_ifp
->if_name
,
2812 inm
->inm_ifp
->if_unit
));
2817 * Enqueue an IGMPv3 group record to the given output queue.
2819 * XXX This function could do with having the allocation code
2820 * split out, and the multiple-tree-walks coalesced into a single
2821 * routine as has been done in igmp_v3_enqueue_filter_change().
2823 * If is_state_change is zero, a current-state record is appended.
2824 * If is_state_change is non-zero, a state-change report is appended.
2826 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2827 * If is_group_query is zero, and if there is a packet with free space
2828 * at the tail of the queue, it will be appended to providing there
2829 * is enough free space.
2830 * Otherwise a new mbuf packet chain is allocated.
2832 * If is_source_query is non-zero, each source is checked to see if
2833 * it was recorded for a Group-Source query, and will be omitted if
2834 * it is not both in-mode and recorded.
2836 * The function will attempt to allocate leading space in the packet
2837 * for the IP/IGMP header to be prepended without fragmenting the chain.
2839 * If successful the size of all data appended to the queue is returned,
2840 * otherwise an error code less than zero is returned, or zero if
2841 * no record(s) were appended.
2844 igmp_v3_enqueue_group_record(struct ifqueue
*ifq
, struct in_multi
*inm
,
2845 const int is_state_change
, const int is_group_query
,
2846 const int is_source_query
)
2848 struct igmp_grouprec ig
;
2849 struct igmp_grouprec
*pig
;
2851 struct ip_msource
*ims
, *nims
;
2852 struct mbuf
*m0
, *m
, *md
;
2853 int error
, is_filter_list_change
;
2854 int minrec0len
, m0srcs
, msrcs
, nbytes
, off
;
2855 int record_has_sources
;
2861 INM_LOCK_ASSERT_HELD(inm
);
2862 IGI_LOCK_ASSERT_HELD(inm
->inm_igi
);
2866 is_filter_list_change
= 0;
2873 record_has_sources
= 1;
2875 type
= IGMP_DO_NOTHING
;
2876 mode
= inm
->inm_st
[1].iss_fmode
;
2879 * If we did not transition out of ASM mode during t0->t1,
2880 * and there are no source nodes to process, we can skip
2881 * the generation of source records.
2883 if (inm
->inm_st
[0].iss_asm
> 0 && inm
->inm_st
[1].iss_asm
> 0 &&
2885 record_has_sources
= 0;
2887 if (is_state_change
) {
2889 * Queue a state change record.
2890 * If the mode did not change, and there are non-ASM
2891 * listeners or source filters present,
2892 * we potentially need to issue two records for the group.
2893 * If we are transitioning to MCAST_UNDEFINED, we need
2894 * not send any sources.
2895 * If there are ASM listeners, and there was no filter
2896 * mode transition of any kind, do nothing.
2898 if (mode
!= inm
->inm_st
[0].iss_fmode
) {
2899 if (mode
== MCAST_EXCLUDE
) {
2900 IGMP_PRINTF(("%s: change to EXCLUDE\n",
2902 type
= IGMP_CHANGE_TO_EXCLUDE_MODE
;
2904 IGMP_PRINTF(("%s: change to INCLUDE\n",
2906 type
= IGMP_CHANGE_TO_INCLUDE_MODE
;
2907 if (mode
== MCAST_UNDEFINED
)
2908 record_has_sources
= 0;
2911 if (record_has_sources
) {
2912 is_filter_list_change
= 1;
2914 type
= IGMP_DO_NOTHING
;
2919 * Queue a current state record.
2921 if (mode
== MCAST_EXCLUDE
) {
2922 type
= IGMP_MODE_IS_EXCLUDE
;
2923 } else if (mode
== MCAST_INCLUDE
) {
2924 type
= IGMP_MODE_IS_INCLUDE
;
2925 VERIFY(inm
->inm_st
[1].iss_asm
== 0);
2930 * Generate the filter list changes using a separate function.
2932 if (is_filter_list_change
)
2933 return (igmp_v3_enqueue_filter_change(ifq
, inm
));
2935 if (type
== IGMP_DO_NOTHING
) {
2936 IGMP_PRINTF(("%s: nothing to do for %s/%s%d\n",
2937 __func__
, inet_ntoa(inm
->inm_addr
),
2938 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2943 * If any sources are present, we must be able to fit at least
2944 * one in the trailing space of the tail packet's mbuf,
2947 minrec0len
= sizeof(struct igmp_grouprec
);
2948 if (record_has_sources
)
2949 minrec0len
+= sizeof(in_addr_t
);
2951 IGMP_PRINTF(("%s: queueing %s for %s/%s%d\n", __func__
,
2952 igmp_rec_type_to_str(type
), inet_ntoa(inm
->inm_addr
),
2953 inm
->inm_ifp
->if_name
, inm
->inm_ifp
->if_unit
));
2956 * Check if we have a packet in the tail of the queue for this
2957 * group into which the first group record for this group will fit.
2958 * Otherwise allocate a new packet.
2959 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2960 * Note: Group records for G/GSR query responses MUST be sent
2961 * in their own packet.
2964 if (!is_group_query
&&
2966 (m0
->m_pkthdr
.vt_nrecs
+ 1 <= IGMP_V3_REPORT_MAXRECS
) &&
2967 (m0
->m_pkthdr
.len
+ minrec0len
) <
2968 (ifp
->if_mtu
- IGMP_LEADINGSPACE
)) {
2969 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
2970 sizeof(struct igmp_grouprec
)) / sizeof(in_addr_t
);
2972 IGMP_PRINTF(("%s: use existing packet\n", __func__
));
2974 if (IF_QFULL(ifq
)) {
2975 IGMP_PRINTF(("%s: outbound queue full\n", __func__
));
2979 m0srcs
= (ifp
->if_mtu
- IGMP_LEADINGSPACE
-
2980 sizeof(struct igmp_grouprec
)) / sizeof(in_addr_t
);
2981 if (!is_state_change
&& !is_group_query
) {
2982 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
2984 m
->m_data
+= IGMP_LEADINGSPACE
;
2987 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
2989 MH_ALIGN(m
, IGMP_LEADINGSPACE
);
2994 IGMP_PRINTF(("%s: allocated first packet\n", __func__
));
2998 * Append group record.
2999 * If we have sources, we don't know how many yet.
3004 ig
.ig_group
= inm
->inm_addr
;
3005 if (!m_append(m
, sizeof(struct igmp_grouprec
), (void *)&ig
)) {
3008 IGMP_PRINTF(("%s: m_append() failed.\n", __func__
));
3011 nbytes
+= sizeof(struct igmp_grouprec
);
3014 * Append as many sources as will fit in the first packet.
3015 * If we are appending to a new packet, the chain allocation
3016 * may potentially use clusters; use m_getptr() in this case.
3017 * If we are appending to an existing packet, we need to obtain
3018 * a pointer to the group record after m_append(), in case a new
3019 * mbuf was allocated.
3020 * Only append sources which are in-mode at t1. If we are
3021 * transitioning to MCAST_UNDEFINED state on the group, do not
3022 * include source entries.
3023 * Only report recorded sources in our filter set when responding
3024 * to a group-source query.
3026 if (record_has_sources
) {
3029 pig
= (struct igmp_grouprec
*)(mtod(md
, uint8_t *) +
3030 md
->m_len
- nbytes
);
3032 md
= m_getptr(m
, 0, &off
);
3033 pig
= (struct igmp_grouprec
*)(mtod(md
, uint8_t *) +
3037 RB_FOREACH_SAFE(ims
, ip_msource_tree
, &inm
->inm_srcs
, nims
) {
3038 IGMP_PRINTF(("%s: visit node %s\n", __func__
,
3039 inet_ntoa_haddr(ims
->ims_haddr
)));
3040 now
= ims_get_mode(inm
, ims
, 1);
3041 IGMP_PRINTF(("%s: node is %d\n", __func__
, now
));
3042 if ((now
!= mode
) ||
3043 (now
== mode
&& mode
== MCAST_UNDEFINED
)) {
3044 IGMP_PRINTF(("%s: skip node\n", __func__
));
3047 if (is_source_query
&& ims
->ims_stp
== 0) {
3048 IGMP_PRINTF(("%s: skip unrecorded node\n",
3052 IGMP_PRINTF(("%s: append node\n", __func__
));
3053 naddr
= htonl(ims
->ims_haddr
);
3054 if (!m_append(m
, sizeof(in_addr_t
), (void *)&naddr
)) {
3057 IGMP_PRINTF(("%s: m_append() failed.\n",
3061 nbytes
+= sizeof(in_addr_t
);
3063 if (msrcs
== m0srcs
)
3066 IGMP_PRINTF(("%s: msrcs is %d this packet\n", __func__
,
3068 pig
->ig_numsrc
= htons(msrcs
);
3069 nbytes
+= (msrcs
* sizeof(in_addr_t
));
3072 if (is_source_query
&& msrcs
== 0) {
3073 IGMP_PRINTF(("%s: no recorded sources to report\n", __func__
));
3080 * We are good to go with first packet.
3083 IGMP_PRINTF(("%s: enqueueing first packet\n", __func__
));
3084 m
->m_pkthdr
.vt_nrecs
= 1;
3085 m
->m_pkthdr
.rcvif
= ifp
;
3088 m
->m_pkthdr
.vt_nrecs
++;
3091 * No further work needed if no source list in packet(s).
3093 if (!record_has_sources
)
3097 * Whilst sources remain to be announced, we need to allocate
3098 * a new packet and fill out as many sources as will fit.
3099 * Always try for a cluster first.
3101 while (nims
!= NULL
) {
3102 if (IF_QFULL(ifq
)) {
3103 IGMP_PRINTF(("%s: outbound queue full\n", __func__
));
3106 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3108 m
->m_data
+= IGMP_LEADINGSPACE
;
3110 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3112 MH_ALIGN(m
, IGMP_LEADINGSPACE
);
3116 md
= m_getptr(m
, 0, &off
);
3117 pig
= (struct igmp_grouprec
*)(mtod(md
, uint8_t *) + off
);
3118 IGMP_PRINTF(("%s: allocated next packet\n", __func__
));
3120 if (!m_append(m
, sizeof(struct igmp_grouprec
), (void *)&ig
)) {
3123 IGMP_PRINTF(("%s: m_append() failed.\n", __func__
));
3126 m
->m_pkthdr
.vt_nrecs
= 1;
3127 nbytes
+= sizeof(struct igmp_grouprec
);
3129 m0srcs
= (ifp
->if_mtu
- IGMP_LEADINGSPACE
-
3130 sizeof(struct igmp_grouprec
)) / sizeof(in_addr_t
);
3133 RB_FOREACH_FROM(ims
, ip_msource_tree
, nims
) {
3134 IGMP_PRINTF(("%s: visit node %s\n", __func__
,
3135 inet_ntoa_haddr(ims
->ims_haddr
)));
3136 now
= ims_get_mode(inm
, ims
, 1);
3137 if ((now
!= mode
) ||
3138 (now
== mode
&& mode
== MCAST_UNDEFINED
)) {
3139 IGMP_PRINTF(("%s: skip node\n", __func__
));
3142 if (is_source_query
&& ims
->ims_stp
== 0) {
3143 IGMP_PRINTF(("%s: skip unrecorded node\n",
3147 IGMP_PRINTF(("%s: append node\n", __func__
));
3148 naddr
= htonl(ims
->ims_haddr
);
3149 if (!m_append(m
, sizeof(in_addr_t
), (void *)&naddr
)) {
3152 IGMP_PRINTF(("%s: m_append() failed.\n",
3157 if (msrcs
== m0srcs
)
3160 pig
->ig_numsrc
= htons(msrcs
);
3161 nbytes
+= (msrcs
* sizeof(in_addr_t
));
3163 IGMP_PRINTF(("%s: enqueueing next packet\n", __func__
));
3164 m
->m_pkthdr
.rcvif
= ifp
;
3172 * Type used to mark record pass completion.
3173 * We exploit the fact we can cast to this easily from the
3174 * current filter modes on each ip_msource node.
3177 REC_NONE
= 0x00, /* MCAST_UNDEFINED */
3178 REC_ALLOW
= 0x01, /* MCAST_INCLUDE */
3179 REC_BLOCK
= 0x02, /* MCAST_EXCLUDE */
3180 REC_FULL
= REC_ALLOW
| REC_BLOCK
3184 * Enqueue an IGMPv3 filter list change to the given output queue.
3186 * Source list filter state is held in an RB-tree. When the filter list
3187 * for a group is changed without changing its mode, we need to compute
3188 * the deltas between T0 and T1 for each source in the filter set,
3189 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3191 * As we may potentially queue two record types, and the entire R-B tree
3192 * needs to be walked at once, we break this out into its own function
3193 * so we can generate a tightly packed queue of packets.
3195 * XXX This could be written to only use one tree walk, although that makes
3196 * serializing into the mbuf chains a bit harder. For now we do two walks
3197 * which makes things easier on us, and it may or may not be harder on
3200 * If successful the size of all data appended to the queue is returned,
3201 * otherwise an error code less than zero is returned, or zero if
3202 * no record(s) were appended.
3205 igmp_v3_enqueue_filter_change(struct ifqueue
*ifq
, struct in_multi
*inm
)
3207 static const int MINRECLEN
=
3208 sizeof(struct igmp_grouprec
) + sizeof(in_addr_t
);
3210 struct igmp_grouprec ig
;
3211 struct igmp_grouprec
*pig
;
3212 struct ip_msource
*ims
, *nims
;
3213 struct mbuf
*m
, *m0
, *md
;
3215 int m0srcs
, nbytes
, npbytes
, off
, rsrcs
, schanged
;
3217 uint8_t mode
, now
, then
;
3218 rectype_t crt
, drt
, nrt
;
3220 INM_LOCK_ASSERT_HELD(inm
);
3222 if (inm
->inm_nsrc
== 0 ||
3223 (inm
->inm_st
[0].iss_asm
> 0 && inm
->inm_st
[1].iss_asm
> 0))
3226 ifp
= inm
->inm_ifp
; /* interface */
3227 mode
= inm
->inm_st
[1].iss_fmode
; /* filter mode at t1 */
3228 crt
= REC_NONE
; /* current group record type */
3229 drt
= REC_NONE
; /* mask of completed group record types */
3230 nrt
= REC_NONE
; /* record type for current node */
3231 m0srcs
= 0; /* # source which will fit in current mbuf chain */
3232 nbytes
= 0; /* # of bytes appended to group's state-change queue */
3233 npbytes
= 0; /* # of bytes appended this packet */
3234 rsrcs
= 0; /* # sources encoded in current record */
3235 schanged
= 0; /* # nodes encoded in overall filter change */
3236 nallow
= 0; /* # of source entries in ALLOW_NEW */
3237 nblock
= 0; /* # of source entries in BLOCK_OLD */
3238 nims
= NULL
; /* next tree node pointer */
3241 * For each possible filter record mode.
3242 * The first kind of source we encounter tells us which
3243 * is the first kind of record we start appending.
3244 * If a node transitioned to UNDEFINED at t1, its mode is treated
3245 * as the inverse of the group's filter mode.
3247 while (drt
!= REC_FULL
) {
3251 (m0
->m_pkthdr
.vt_nrecs
+ 1 <=
3252 IGMP_V3_REPORT_MAXRECS
) &&
3253 (m0
->m_pkthdr
.len
+ MINRECLEN
) <
3254 (ifp
->if_mtu
- IGMP_LEADINGSPACE
)) {
3256 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
3257 sizeof(struct igmp_grouprec
)) /
3259 IGMP_PRINTF(("%s: use previous packet\n",
3262 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3264 m
->m_data
+= IGMP_LEADINGSPACE
;
3266 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3268 MH_ALIGN(m
, IGMP_LEADINGSPACE
);
3271 IGMP_PRINTF(("%s: m_get*() failed\n",
3275 m
->m_pkthdr
.vt_nrecs
= 0;
3276 m0srcs
= (ifp
->if_mtu
- IGMP_LEADINGSPACE
-
3277 sizeof(struct igmp_grouprec
)) /
3280 IGMP_PRINTF(("%s: allocated new packet\n",
3284 * Append the IGMP group record header to the
3285 * current packet's data area.
3286 * Recalculate pointer to free space for next
3287 * group record, in case m_append() allocated
3288 * a new mbuf or cluster.
3290 memset(&ig
, 0, sizeof(ig
));
3291 ig
.ig_group
= inm
->inm_addr
;
3292 if (!m_append(m
, sizeof(ig
), (void *)&ig
)) {
3295 IGMP_PRINTF(("%s: m_append() failed\n",
3299 npbytes
+= sizeof(struct igmp_grouprec
);
3301 /* new packet; offset in c hain */
3302 md
= m_getptr(m
, npbytes
-
3303 sizeof(struct igmp_grouprec
), &off
);
3304 pig
= (struct igmp_grouprec
*)(mtod(md
,
3307 /* current packet; offset from last append */
3309 pig
= (struct igmp_grouprec
*)(mtod(md
,
3310 uint8_t *) + md
->m_len
-
3311 sizeof(struct igmp_grouprec
));
3314 * Begin walking the tree for this record type
3315 * pass, or continue from where we left off
3316 * previously if we had to allocate a new packet.
3317 * Only report deltas in-mode at t1.
3318 * We need not report included sources as allowed
3319 * if we are in inclusive mode on the group,
3320 * however the converse is not true.
3324 nims
= RB_MIN(ip_msource_tree
, &inm
->inm_srcs
);
3325 RB_FOREACH_FROM(ims
, ip_msource_tree
, nims
) {
3326 IGMP_PRINTF(("%s: visit node %s\n",
3327 __func__
, inet_ntoa_haddr(ims
->ims_haddr
)));
3328 now
= ims_get_mode(inm
, ims
, 1);
3329 then
= ims_get_mode(inm
, ims
, 0);
3330 IGMP_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3331 __func__
, then
, now
));
3333 IGMP_PRINTF(("%s: skip unchanged\n",
3337 if (mode
== MCAST_EXCLUDE
&&
3338 now
== MCAST_INCLUDE
) {
3339 IGMP_PRINTF(("%s: skip IN src on EX "
3340 "group\n", __func__
));
3343 nrt
= (rectype_t
)now
;
3344 if (nrt
== REC_NONE
)
3345 nrt
= (rectype_t
)(~mode
& REC_FULL
);
3346 if (schanged
++ == 0) {
3348 } else if (crt
!= nrt
)
3350 naddr
= htonl(ims
->ims_haddr
);
3351 if (!m_append(m
, sizeof(in_addr_t
),
3355 IGMP_PRINTF(("%s: m_append() failed\n",
3359 nallow
+= !!(crt
== REC_ALLOW
);
3360 nblock
+= !!(crt
== REC_BLOCK
);
3361 if (++rsrcs
== m0srcs
)
3365 * If we did not append any tree nodes on this
3366 * pass, back out of allocations.
3369 npbytes
-= sizeof(struct igmp_grouprec
);
3371 IGMP_PRINTF(("%s: m_free(m)\n",
3375 IGMP_PRINTF(("%s: m_adj(m, -ig)\n",
3377 m_adj(m
, -((int)sizeof(
3378 struct igmp_grouprec
)));
3382 npbytes
+= (rsrcs
* sizeof(in_addr_t
));
3383 if (crt
== REC_ALLOW
)
3384 pig
->ig_type
= IGMP_ALLOW_NEW_SOURCES
;
3385 else if (crt
== REC_BLOCK
)
3386 pig
->ig_type
= IGMP_BLOCK_OLD_SOURCES
;
3387 pig
->ig_numsrc
= htons(rsrcs
);
3389 * Count the new group record, and enqueue this
3390 * packet if it wasn't already queued.
3392 m
->m_pkthdr
.vt_nrecs
++;
3393 m
->m_pkthdr
.rcvif
= ifp
;
3397 } while (nims
!= NULL
);
3399 crt
= (~crt
& REC_FULL
);
3402 IGMP_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__
,
3409 igmp_v3_merge_state_changes(struct in_multi
*inm
, struct ifqueue
*ifscq
)
3412 struct mbuf
*m
; /* pending state-change */
3413 struct mbuf
*m0
; /* copy of pending state-change */
3414 struct mbuf
*mt
; /* last state-change in packet */
3416 int docopy
, domerge
;
3419 INM_LOCK_ASSERT_HELD(inm
);
3426 * If there are further pending retransmissions, make a writable
3427 * copy of each queued state-change message before merging.
3429 if (inm
->inm_scrv
> 0)
3434 if (gq
->ifq_head
== NULL
) {
3435 IGMP_PRINTF(("%s: WARNING: queue for inm %p is empty\n",
3441 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3442 * packet might not always be at the head of the ifqueue.
3447 * Only merge the report into the current packet if
3448 * there is sufficient space to do so; an IGMPv3 report
3449 * packet may only contain 65,535 group records.
3450 * Always use a simple mbuf chain concatentation to do this,
3451 * as large state changes for single groups may have
3452 * allocated clusters.
3455 mt
= ifscq
->ifq_tail
;
3457 recslen
= m_length(m
);
3459 if ((mt
->m_pkthdr
.vt_nrecs
+
3460 m
->m_pkthdr
.vt_nrecs
<=
3461 IGMP_V3_REPORT_MAXRECS
) &&
3462 (mt
->m_pkthdr
.len
+ recslen
<=
3463 (inm
->inm_ifp
->if_mtu
- IGMP_LEADINGSPACE
)))
3467 if (!domerge
&& IF_QFULL(gq
)) {
3468 IGMP_PRINTF(("%s: outbound queue full, skipping whole "
3469 "packet %p\n", __func__
, m
));
3480 IGMP_PRINTF(("%s: dequeueing %p\n", __func__
, m
));
3486 IGMP_PRINTF(("%s: copying %p\n", __func__
, m
));
3487 m0
= m_dup(m
, M_NOWAIT
);
3490 m0
->m_nextpkt
= NULL
;
3495 IGMP_PRINTF(("%s: queueing %p to ifscq %p)\n",
3496 __func__
, m0
, ifscq
));
3497 m0
->m_pkthdr
.rcvif
= inm
->inm_ifp
;
3498 IF_ENQUEUE(ifscq
, m0
);
3500 struct mbuf
*mtl
; /* last mbuf of packet mt */
3502 IGMP_PRINTF(("%s: merging %p with ifscq tail %p)\n",
3506 m0
->m_flags
&= ~M_PKTHDR
;
3507 mt
->m_pkthdr
.len
+= recslen
;
3508 mt
->m_pkthdr
.vt_nrecs
+=
3509 m0
->m_pkthdr
.vt_nrecs
;
3519 * Respond to a pending IGMPv3 General Query.
3522 igmp_v3_dispatch_general_query(struct igmp_ifinfo
*igi
)
3525 struct in_multi
*inm
;
3526 struct in_multistep step
;
3529 IGI_LOCK_ASSERT_HELD(igi
);
3531 VERIFY(igi
->igi_version
== IGMP_VERSION_3
);
3536 in_multihead_lock_shared();
3537 IN_FIRST_MULTI(step
, inm
);
3538 while (inm
!= NULL
) {
3540 if (inm
->inm_ifp
!= ifp
)
3543 switch (inm
->inm_state
) {
3544 case IGMP_NOT_MEMBER
:
3545 case IGMP_SILENT_MEMBER
:
3547 case IGMP_REPORTING_MEMBER
:
3548 case IGMP_IDLE_MEMBER
:
3549 case IGMP_LAZY_MEMBER
:
3550 case IGMP_SLEEPING_MEMBER
:
3551 case IGMP_AWAKENING_MEMBER
:
3552 inm
->inm_state
= IGMP_REPORTING_MEMBER
;
3554 retval
= igmp_v3_enqueue_group_record(&igi
->igi_gq
,
3557 IGMP_PRINTF(("%s: enqueue record = %d\n",
3560 case IGMP_G_QUERY_PENDING_MEMBER
:
3561 case IGMP_SG_QUERY_PENDING_MEMBER
:
3562 case IGMP_LEAVING_MEMBER
:
3567 IN_NEXT_MULTI(step
, inm
);
3569 in_multihead_lock_done();
3572 loop
= (igi
->igi_flags
& IGIF_LOOPBACK
) ? 1 : 0;
3573 igmp_dispatch_queue(igi
, &igi
->igi_gq
, IGMP_MAX_RESPONSE_BURST
,
3575 IGI_LOCK_ASSERT_HELD(igi
);
3577 * Slew transmission of bursts over 500ms intervals.
3579 if (igi
->igi_gq
.ifq_head
!= NULL
) {
3580 igi
->igi_v3_timer
= 1 + IGMP_RANDOM_DELAY(
3581 IGMP_RESPONSE_BURST_INTERVAL
);
3582 interface_timers_running
= 1;
3587 * Transmit the next pending IGMP message in the output queue.
3589 * Must not be called with inm_lock or igi_lock held.
3592 igmp_sendpkt(struct mbuf
*m
, struct ifnet
*ifp
)
3594 struct ip_moptions
*imo
;
3595 struct mbuf
*ipopts
, *m0
;
3599 IGMP_PRINTF(("%s: transmit %p\n", __func__
, m
));
3602 * Check if the ifnet is still attached.
3604 if (ifp
== NULL
|| !ifnet_is_attached(ifp
, 0)) {
3605 IGMP_PRINTF(("%s: dropped %p as ifp u went away.\n",
3608 OSAddAtomic(1, &ipstat
.ips_noroute
);
3612 ipopts
= igmp_sendra
? m_raopt
: NULL
;
3614 imo
= ip_allocmoptions(M_WAITOK
);
3620 imo
->imo_multicast_ttl
= 1;
3621 imo
->imo_multicast_vif
= -1;
3623 imo
->imo_multicast_loop
= (ip_mrouter
!= NULL
);
3625 imo
->imo_multicast_loop
= 0;
3629 * If the user requested that IGMP traffic be explicitly
3630 * redirected to the loopback interface (e.g. they are running a
3631 * MANET interface and the routing protocol needs to see the
3632 * updates), handle this now.
3634 if (m
->m_flags
& M_IGMP_LOOP
)
3635 imo
->imo_multicast_ifp
= lo_ifp
;
3637 imo
->imo_multicast_ifp
= ifp
;
3639 if (m
->m_flags
& M_IGMPV2
) {
3642 m0
= igmp_v3_encap_report(ifp
, m
);
3645 * If igmp_v3_encap_report() failed, then M_PREPEND()
3646 * already freed the original mbuf chain.
3647 * This means that we don't have to m_freem(m) here.
3649 IGMP_PRINTF(("%s: dropped %p\n", __func__
, m
));
3651 atomic_add_32(&ipstat
.ips_odropped
, 1);
3656 m
->m_flags
&= ~(M_PROTOFLAGS
| M_IGMP_LOOP
);
3657 m0
->m_pkthdr
.rcvif
= lo_ifp
;
3659 mac_netinet_igmp_send(ifp
, m0
);
3661 bzero(&ro
, sizeof (ro
));
3662 error
= ip_output(m0
, ipopts
, &ro
, 0, imo
, NULL
);
3663 if (ro
.ro_rt
!= NULL
) {
3671 IGMP_PRINTF(("%s: ip_output(%p) = %d\n", __func__
, m0
, error
));
3675 IGMPSTAT_INC(igps_snd_reports
);
3676 OIGMPSTAT_INC(igps_snd_reports
);
3679 * Encapsulate an IGMPv3 report.
3681 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3682 * chain has already had its IP/IGMPv3 header prepended. In this case
3683 * the function will not attempt to prepend; the lengths and checksums
3684 * will however be re-computed.
3686 * Returns a pointer to the new mbuf chain head, or NULL if the
3687 * allocation failed.
3689 static struct mbuf
*
3690 igmp_v3_encap_report(struct ifnet
*ifp
, struct mbuf
*m
)
3692 struct igmp_report
*igmp
;
3694 int hdrlen
, igmpreclen
;
3696 VERIFY((m
->m_flags
& M_PKTHDR
));
3698 igmpreclen
= m_length(m
);
3699 hdrlen
= sizeof(struct ip
) + sizeof(struct igmp_report
);
3701 if (m
->m_flags
& M_IGMPV3_HDR
) {
3702 igmpreclen
-= hdrlen
;
3704 M_PREPEND(m
, hdrlen
, M_DONTWAIT
);
3707 m
->m_flags
|= M_IGMPV3_HDR
;
3710 IGMP_PRINTF(("%s: igmpreclen is %d\n", __func__
, igmpreclen
));
3712 m
->m_data
+= sizeof(struct ip
);
3713 m
->m_len
-= sizeof(struct ip
);
3715 igmp
= mtod(m
, struct igmp_report
*);
3716 igmp
->ir_type
= IGMP_v3_HOST_MEMBERSHIP_REPORT
;
3719 igmp
->ir_numgrps
= htons(m
->m_pkthdr
.vt_nrecs
);
3721 igmp
->ir_cksum
= in_cksum(m
, sizeof(struct igmp_report
) + igmpreclen
);
3722 m
->m_pkthdr
.vt_nrecs
= 0;
3724 m
->m_data
-= sizeof(struct ip
);
3725 m
->m_len
+= sizeof(struct ip
);
3727 ip
= mtod(m
, struct ip
*);
3728 ip
->ip_tos
= IPTOS_PREC_INTERNETCONTROL
;
3729 ip
->ip_len
= hdrlen
+ igmpreclen
;
3731 ip
->ip_p
= IPPROTO_IGMP
;
3734 ip
->ip_src
.s_addr
= INADDR_ANY
;
3736 if (m
->m_flags
& M_IGMP_LOOP
) {
3737 struct in_ifaddr
*ia
;
3741 IFA_LOCK(&ia
->ia_ifa
);
3742 ip
->ip_src
= ia
->ia_addr
.sin_addr
;
3743 IFA_UNLOCK(&ia
->ia_ifa
);
3744 IFA_REMREF(&ia
->ia_ifa
);
3748 ip
->ip_dst
.s_addr
= htonl(INADDR_ALLRPTS_GROUP
);
3755 igmp_rec_type_to_str(const int type
)
3758 case IGMP_CHANGE_TO_EXCLUDE_MODE
:
3761 case IGMP_CHANGE_TO_INCLUDE_MODE
:
3764 case IGMP_MODE_IS_EXCLUDE
:
3767 case IGMP_MODE_IS_INCLUDE
:
3770 case IGMP_ALLOW_NEW_SOURCES
:
3773 case IGMP_BLOCK_OLD_SOURCES
:
3787 IGMP_PRINTF(("%s: initializing\n", __func__
));
3789 igmp_timers_are_running
= 0;
3791 /* Setup lock group and attribute for igmp_mtx */
3792 igmp_mtx_grp_attr
= lck_grp_attr_alloc_init();
3793 igmp_mtx_grp
= lck_grp_alloc_init("igmp_mtx", igmp_mtx_grp_attr
);
3794 igmp_mtx_attr
= lck_attr_alloc_init();
3795 lck_mtx_init(&igmp_mtx
, igmp_mtx_grp
, igmp_mtx_attr
);
3797 LIST_INIT(&igi_head
);
3798 m_raopt
= igmp_ra_alloc();
3800 igi_size
= sizeof (struct igmp_ifinfo
);
3801 igi_zone
= zinit(igi_size
, IGI_ZONE_MAX
* igi_size
,
3803 if (igi_zone
== NULL
) {
3804 panic("%s: failed allocating %s", __func__
, IGI_ZONE_NAME
);
3807 zone_change(igi_zone
, Z_EXPAND
, TRUE
);
3808 zone_change(igi_zone
, Z_CALLERACCT
, FALSE
);