2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 2009 Bruce Simpson.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. The name of the author may not be used to endorse or promote
40 * products derived from this software without specific prior written
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * Copyright (c) 1988 Stephen Deering.
58 * Copyright (c) 1992, 1993
59 * The Regents of the University of California. All rights reserved.
61 * This code is derived from software contributed to Berkeley by
62 * Stephen Deering of Stanford University.
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
72 * 3. All advertising materials mentioning features or use of this software
73 * must display the following acknowledgement:
74 * This product includes software developed by the University of
75 * California, Berkeley and its contributors.
76 * 4. Neither the name of the University nor the names of its contributors
77 * may be used to endorse or promote products derived from this software
78 * without specific prior written permission.
80 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
81 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
82 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
83 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
84 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
85 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
86 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
87 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
88 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
89 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
92 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
95 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
96 * support for mandatory and extensible security protections. This notice
97 * is included in support of clause 2.2 (b) of the Apple Public License,
101 #include <sys/cdefs.h>
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/socket.h>
107 #include <sys/protosw.h>
108 #include <sys/sysctl.h>
109 #include <sys/kernel.h>
110 #include <sys/malloc.h>
111 #include <sys/mcache.h>
113 #include <dev/random/randomdev.h>
115 #include <kern/zalloc.h>
118 #include <net/route.h>
120 #include <netinet/in.h>
121 #include <netinet/in_var.h>
122 #include <netinet6/in6_var.h>
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/scope6_var.h>
126 #include <netinet/icmp6.h>
127 #include <netinet6/mld6.h>
128 #include <netinet6/mld6_var.h>
130 /* Lock group and attribute for mld_mtx */
131 static lck_attr_t
*mld_mtx_attr
;
132 static lck_grp_t
*mld_mtx_grp
;
133 static lck_grp_attr_t
*mld_mtx_grp_attr
;
136 * Locking and reference counting:
138 * mld_mtx mainly protects mli_head. In cases where both mld_mtx and
139 * in6_multihead_lock must be held, the former must be acquired first in order
140 * to maintain lock ordering. It is not a requirement that mld_mtx be
141 * acquired first before in6_multihead_lock, but in case both must be acquired
142 * in succession, the correct lock ordering must be followed.
144 * Instead of walking the if_multiaddrs list at the interface and returning
145 * the ifma_protospec value of a matching entry, we search the global list
146 * of in6_multi records and find it that way; this is done with in6_multihead
147 * lock held. Doing so avoids the race condition issues that many other BSDs
148 * suffer from (therefore in our implementation, ifma_protospec will never be
149 * NULL for as long as the in6_multi is valid.)
151 * The above creates a requirement for the in6_multi to stay in in6_multihead
152 * list even after the final MLD leave (in MLDv2 mode) until no longer needs
153 * be retransmitted (this is not required for MLDv1.) In order to handle
154 * this, the request and reference counts of the in6_multi are bumped up when
155 * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
156 * handler. Each in6_multi holds a reference to the underlying mld_ifinfo.
158 * Thus, the permitted lock order is:
160 * mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
162 * Any may be taken independently, but if any are held at the same time,
163 * the above lock order must be followed.
165 static decl_lck_mtx_data(, mld_mtx
);
167 SLIST_HEAD(mld_in6m_relhead
, in6_multi
);
169 static void mli_initvar(struct mld_ifinfo
*, struct ifnet
*, int);
170 static struct mld_ifinfo
*mli_alloc(int);
171 static void mli_free(struct mld_ifinfo
*);
172 static void mli_delete(const struct ifnet
*, struct mld_in6m_relhead
*);
173 static void mld_dispatch_packet(struct mbuf
*);
174 static void mld_final_leave(struct in6_multi
*, struct mld_ifinfo
*,
175 struct mld_tparams
*);
176 static int mld_handle_state_change(struct in6_multi
*, struct mld_ifinfo
*,
177 struct mld_tparams
*);
178 static int mld_initial_join(struct in6_multi
*, struct mld_ifinfo
*,
179 struct mld_tparams
*, const int);
181 static const char * mld_rec_type_to_str(const int);
183 static uint32_t mld_set_version(struct mld_ifinfo
*, const int);
184 static void mld_flush_relq(struct mld_ifinfo
*, struct mld_in6m_relhead
*);
185 static void mld_dispatch_queue_locked(struct mld_ifinfo
*, struct ifqueue
*, int);
186 static int mld_v1_input_query(struct ifnet
*, const struct ip6_hdr
*,
187 /*const*/ struct mld_hdr
*);
188 static int mld_v1_input_report(struct ifnet
*, struct mbuf
*,
189 const struct ip6_hdr
*, /*const*/ struct mld_hdr
*);
190 static void mld_v1_process_group_timer(struct in6_multi
*, const int);
191 static void mld_v1_process_querier_timers(struct mld_ifinfo
*);
192 static int mld_v1_transmit_report(struct in6_multi
*, const int);
193 static uint32_t mld_v1_update_group(struct in6_multi
*, const int);
194 static void mld_v2_cancel_link_timers(struct mld_ifinfo
*);
195 static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo
*);
197 mld_v2_encap_report(struct ifnet
*, struct mbuf
*);
198 static int mld_v2_enqueue_filter_change(struct ifqueue
*,
200 static int mld_v2_enqueue_group_record(struct ifqueue
*,
201 struct in6_multi
*, const int, const int, const int,
203 static int mld_v2_input_query(struct ifnet
*, const struct ip6_hdr
*,
204 struct mbuf
*, const int, const int);
205 static int mld_v2_merge_state_changes(struct in6_multi
*,
207 static void mld_v2_process_group_timers(struct mld_ifinfo
*,
208 struct ifqueue
*, struct ifqueue
*,
209 struct in6_multi
*, const int);
210 static int mld_v2_process_group_query(struct in6_multi
*,
211 int, struct mbuf
*, const int);
212 static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS
;
213 static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
;
214 static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
;
216 static int mld_timeout_run
; /* MLD timer is scheduled to run */
217 static void mld_timeout(void *);
218 static void mld_sched_timeout(void);
221 * Normative references: RFC 2710, RFC 3590, RFC 3810.
223 static struct timeval mld_gsrdelay
= {10, 0};
224 static LIST_HEAD(, mld_ifinfo
) mli_head
;
226 static int querier_present_timers_running6
;
227 static int interface_timers_running6
;
228 static int state_change_timers_running6
;
229 static int current_state_timers_running6
;
231 static unsigned int mld_mli_list_genid
;
233 * Subsystem lock macros.
236 lck_mtx_lock(&mld_mtx)
237 #define MLD_LOCK_ASSERT_HELD() \
238 LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
239 #define MLD_LOCK_ASSERT_NOTHELD() \
240 LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
241 #define MLD_UNLOCK() \
242 lck_mtx_unlock(&mld_mtx)
244 #define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
245 SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
248 #define MLD_REMOVE_DETACHED_IN6M(_head) { \
249 struct in6_multi *_in6m, *_inm_tmp; \
250 SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
251 SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
252 IN6M_REMREF(_in6m); \
254 VERIFY(SLIST_EMPTY(_head)); \
257 #define MLI_ZONE_MAX 64 /* maximum elements in zone */
258 #define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
260 static unsigned int mli_size
; /* size of zone element */
261 static struct zone
*mli_zone
; /* zone for mld_ifinfo */
263 SYSCTL_DECL(_net_inet6
); /* Note: Not in any common header. */
265 SYSCTL_NODE(_net_inet6
, OID_AUTO
, mld
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
266 "IPv6 Multicast Listener Discovery");
267 SYSCTL_PROC(_net_inet6_mld
, OID_AUTO
, gsrdelay
,
268 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
269 &mld_gsrdelay
.tv_sec
, 0, sysctl_mld_gsr
, "I",
270 "Rate limit for MLDv2 Group-and-Source queries in seconds");
272 SYSCTL_NODE(_net_inet6_mld
, OID_AUTO
, ifinfo
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
273 sysctl_mld_ifinfo
, "Per-interface MLDv2 state");
275 static int mld_v1enable
= 1;
276 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, v1enable
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
277 &mld_v1enable
, 0, "Enable fallback to MLDv1");
279 static int mld_v2enable
= 1;
280 SYSCTL_PROC(_net_inet6_mld
, OID_AUTO
, v2enable
,
281 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
282 &mld_v2enable
, 0, sysctl_mld_v2enable
, "I",
283 "Enable MLDv2 (debug purposes only)");
285 static int mld_use_allow
= 1;
286 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, use_allow
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
287 &mld_use_allow
, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
291 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
,
292 debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &mld_debug
, 0, "");
295 * Packed Router Alert option structure declaration.
300 struct ip6_opt_router ra
;
304 * Router Alert hop-by-hop option header.
306 static struct mld_raopt mld_ra
= {
308 .pad
= { .ip6o_type
= IP6OPT_PADN
, 0 },
310 .ip6or_type
= (u_int8_t
)IP6OPT_ROUTER_ALERT
,
311 .ip6or_len
= (u_int8_t
)(IP6OPT_RTALERT_LEN
- 2),
312 .ip6or_value
= {((IP6OPT_RTALERT_MLD
>> 8) & 0xFF),
313 (IP6OPT_RTALERT_MLD
& 0xFF) }
316 static struct ip6_pktopts mld_po
;
318 /* Store MLDv2 record count in the module private scratch space */
319 #define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
322 mld_save_context(struct mbuf
*m
, struct ifnet
*ifp
)
324 m
->m_pkthdr
.rcvif
= ifp
;
328 mld_scrub_context(struct mbuf
*m
)
330 m
->m_pkthdr
.rcvif
= NULL
;
334 * Restore context from a queued output chain.
337 static __inline
struct ifnet
*
338 mld_restore_context(struct mbuf
*m
)
340 return (m
->m_pkthdr
.rcvif
);
344 * Retrieve or set threshold between group-source queries in seconds.
347 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
349 #pragma unused(arg1, arg2)
355 i
= mld_gsrdelay
.tv_sec
;
357 error
= sysctl_handle_int(oidp
, &i
, 0, req
);
358 if (error
|| !req
->newptr
)
361 if (i
< -1 || i
>= 60) {
366 mld_gsrdelay
.tv_sec
= i
;
373 * Expose struct mld_ifinfo to userland, keyed by ifindex.
374 * For use by ifmcstat(8).
378 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
385 struct mld_ifinfo
*mli
;
386 struct mld_ifinfo_u mli_u
;
391 if (req
->newptr
!= USER_ADDR_NULL
)
399 if (name
[0] <= 0 || name
[0] > (u_int
)if_index
) {
406 ifnet_head_lock_shared();
407 ifp
= ifindex2ifnet
[name
[0]];
412 bzero(&mli_u
, sizeof (mli_u
));
414 LIST_FOREACH(mli
, &mli_head
, mli_link
) {
416 if (ifp
!= mli
->mli_ifp
) {
421 mli_u
.mli_ifindex
= mli
->mli_ifp
->if_index
;
422 mli_u
.mli_version
= mli
->mli_version
;
423 mli_u
.mli_v1_timer
= mli
->mli_v1_timer
;
424 mli_u
.mli_v2_timer
= mli
->mli_v2_timer
;
425 mli_u
.mli_flags
= mli
->mli_flags
;
426 mli_u
.mli_rv
= mli
->mli_rv
;
427 mli_u
.mli_qi
= mli
->mli_qi
;
428 mli_u
.mli_qri
= mli
->mli_qri
;
429 mli_u
.mli_uri
= mli
->mli_uri
;
432 error
= SYSCTL_OUT(req
, &mli_u
, sizeof (mli_u
));
442 sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
444 #pragma unused(arg1, arg2)
447 struct mld_ifinfo
*mli
;
448 struct mld_tparams mtp
= { 0, 0, 0, 0 };
454 error
= sysctl_handle_int(oidp
, &i
, 0, req
);
455 if (error
|| !req
->newptr
)
458 if (i
< 0 || i
> 1) {
465 * If we enabled v2, the state transition will take care of upgrading
466 * the MLD version back to v2. Otherwise, we have to explicitly
467 * downgrade. Note that this functionality is to be used for debugging.
469 if (mld_v2enable
== 1)
472 LIST_FOREACH(mli
, &mli_head
, mli_link
) {
474 if (mld_set_version(mli
, MLD_VERSION_1
) > 0)
482 mld_set_timeout(&mtp
);
488 * Dispatch an entire queue of pending packet chains.
490 * Must not be called with in6m_lock held.
491 * XXX This routine unlocks MLD global lock and also mli locks.
492 * Make sure that the calling routine takes reference on the mli
493 * before calling this routine.
494 * Also if we are traversing mli_head, remember to check for
495 * mli list generation count and restart the loop if generation count
499 mld_dispatch_queue_locked(struct mld_ifinfo
*mli
, struct ifqueue
*ifq
, int limit
)
503 MLD_LOCK_ASSERT_HELD();
506 MLI_LOCK_ASSERT_HELD(mli
);
512 MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__
,
513 (uint64_t)VM_KERNEL_ADDRPERM(ifq
),
514 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
520 mld_dispatch_packet(m
);
531 MLI_LOCK_ASSERT_HELD(mli
);
535 * Filter outgoing MLD report state by group.
537 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
538 * and node-local addresses. However, kernel and socket consumers
539 * always embed the KAME scope ID in the address provided, so strip it
540 * when performing comparison.
541 * Note: This is not the same as the *multicast* scope.
543 * Return zero if the given group is one for which MLD reports
544 * should be suppressed, or non-zero if reports should be issued.
546 static __inline__
int
547 mld_is_addr_reported(const struct in6_addr
*addr
)
550 VERIFY(IN6_IS_ADDR_MULTICAST(addr
));
552 if (IPV6_ADDR_MC_SCOPE(addr
) == IPV6_ADDR_SCOPE_NODELOCAL
)
555 if (IPV6_ADDR_MC_SCOPE(addr
) == IPV6_ADDR_SCOPE_LINKLOCAL
) {
556 struct in6_addr tmp
= *addr
;
557 in6_clearscope(&tmp
);
558 if (IN6_ARE_ADDR_EQUAL(&tmp
, &in6addr_linklocal_allnodes
))
566 * Attach MLD when PF_INET6 is attached to an interface.
569 mld_domifattach(struct ifnet
*ifp
, int how
)
571 struct mld_ifinfo
*mli
;
573 MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__
,
574 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
576 mli
= mli_alloc(how
);
583 mli_initvar(mli
, ifp
, 0);
584 mli
->mli_debug
|= IFD_ATTACHED
;
585 MLI_ADDREF_LOCKED(mli
); /* hold a reference for mli_head */
586 MLI_ADDREF_LOCKED(mli
); /* hold a reference for caller */
588 ifnet_lock_shared(ifp
);
589 mld6_initsilent(ifp
, mli
);
590 ifnet_lock_done(ifp
);
592 LIST_INSERT_HEAD(&mli_head
, mli
, mli_link
);
593 mld_mli_list_genid
++;
597 MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n",
598 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
604 * Attach MLD when PF_INET6 is reattached to an interface. Caller is
605 * expected to have an outstanding reference to the mli.
608 mld_domifreattach(struct mld_ifinfo
*mli
)
615 VERIFY(!(mli
->mli_debug
& IFD_ATTACHED
));
618 mli_initvar(mli
, ifp
, 1);
619 mli
->mli_debug
|= IFD_ATTACHED
;
620 MLI_ADDREF_LOCKED(mli
); /* hold a reference for mli_head */
622 ifnet_lock_shared(ifp
);
623 mld6_initsilent(ifp
, mli
);
624 ifnet_lock_done(ifp
);
626 LIST_INSERT_HEAD(&mli_head
, mli
, mli_link
);
627 mld_mli_list_genid
++;
631 MLD_PRINTF(("%s: reattached mld_ifinfo for ifp 0x%llx(%s)\n",
632 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
636 * Hook for domifdetach.
639 mld_domifdetach(struct ifnet
*ifp
)
641 SLIST_HEAD(, in6_multi
) in6m_dthead
;
643 SLIST_INIT(&in6m_dthead
);
645 MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__
,
646 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
649 mli_delete(ifp
, (struct mld_in6m_relhead
*)&in6m_dthead
);
652 /* Now that we're dropped all locks, release detached records */
653 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead
);
657 * Called at interface detach time. Note that we only flush all deferred
658 * responses and record releases; all remaining inm records and their source
659 * entries related to this interface are left intact, in order to handle
663 mli_delete(const struct ifnet
*ifp
, struct mld_in6m_relhead
*in6m_dthead
)
665 struct mld_ifinfo
*mli
, *tmli
;
667 MLD_LOCK_ASSERT_HELD();
669 LIST_FOREACH_SAFE(mli
, &mli_head
, mli_link
, tmli
) {
671 if (mli
->mli_ifp
== ifp
) {
673 * Free deferred General Query responses.
675 IF_DRAIN(&mli
->mli_gq
);
676 IF_DRAIN(&mli
->mli_v1q
);
677 mld_flush_relq(mli
, in6m_dthead
);
678 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
));
679 mli
->mli_debug
&= ~IFD_ATTACHED
;
682 LIST_REMOVE(mli
, mli_link
);
683 MLI_REMREF(mli
); /* release mli_head reference */
684 mld_mli_list_genid
++;
689 panic("%s: mld_ifinfo not found for ifp %p(%s)\n", __func__
,
693 __private_extern__
void
694 mld6_initsilent(struct ifnet
*ifp
, struct mld_ifinfo
*mli
)
696 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_OWNED
);
698 MLI_LOCK_ASSERT_NOTHELD(mli
);
700 if (!(ifp
->if_flags
& IFF_MULTICAST
) &&
701 (ifp
->if_eflags
& (IFEF_IPV6_ND6ALT
|IFEF_LOCALNET_PRIVATE
)))
702 mli
->mli_flags
|= MLIF_SILENT
;
704 mli
->mli_flags
&= ~MLIF_SILENT
;
709 mli_initvar(struct mld_ifinfo
*mli
, struct ifnet
*ifp
, int reattach
)
711 MLI_LOCK_ASSERT_HELD(mli
);
715 mli
->mli_version
= MLD_VERSION_2
;
717 mli
->mli_version
= MLD_VERSION_1
;
719 mli
->mli_rv
= MLD_RV_INIT
;
720 mli
->mli_qi
= MLD_QI_INIT
;
721 mli
->mli_qri
= MLD_QRI_INIT
;
722 mli
->mli_uri
= MLD_URI_INIT
;
725 mli
->mli_flags
|= MLIF_USEALLOW
;
727 SLIST_INIT(&mli
->mli_relinmhead
);
730 * Responses to general queries are subject to bounds.
732 mli
->mli_gq
.ifq_maxlen
= MLD_MAX_RESPONSE_PACKETS
;
733 mli
->mli_v1q
.ifq_maxlen
= MLD_MAX_RESPONSE_PACKETS
;
736 static struct mld_ifinfo
*
739 struct mld_ifinfo
*mli
;
741 mli
= (how
== M_WAITOK
) ? zalloc(mli_zone
) : zalloc_noblock(mli_zone
);
743 bzero(mli
, mli_size
);
744 lck_mtx_init(&mli
->mli_lock
, mld_mtx_grp
, mld_mtx_attr
);
745 mli
->mli_debug
|= IFD_ALLOC
;
751 mli_free(struct mld_ifinfo
*mli
)
754 if (mli
->mli_debug
& IFD_ATTACHED
) {
755 panic("%s: attached mli=%p is being freed", __func__
, mli
);
757 } else if (mli
->mli_ifp
!= NULL
) {
758 panic("%s: ifp not NULL for mli=%p", __func__
, mli
);
760 } else if (!(mli
->mli_debug
& IFD_ALLOC
)) {
761 panic("%s: mli %p cannot be freed", __func__
, mli
);
763 } else if (mli
->mli_refcnt
!= 0) {
764 panic("%s: non-zero refcnt mli=%p", __func__
, mli
);
767 mli
->mli_debug
&= ~IFD_ALLOC
;
770 lck_mtx_destroy(&mli
->mli_lock
, mld_mtx_grp
);
771 zfree(mli_zone
, mli
);
775 mli_addref(struct mld_ifinfo
*mli
, int locked
)
780 MLI_LOCK_ASSERT_HELD(mli
);
782 if (++mli
->mli_refcnt
== 0) {
783 panic("%s: mli=%p wraparound refcnt", __func__
, mli
);
791 mli_remref(struct mld_ifinfo
*mli
)
793 SLIST_HEAD(, in6_multi
) in6m_dthead
;
798 if (mli
->mli_refcnt
== 0) {
799 panic("%s: mli=%p negative refcnt", __func__
, mli
);
804 if (mli
->mli_refcnt
> 0) {
811 IF_DRAIN(&mli
->mli_gq
);
812 IF_DRAIN(&mli
->mli_v1q
);
813 SLIST_INIT(&in6m_dthead
);
814 mld_flush_relq(mli
, (struct mld_in6m_relhead
*)&in6m_dthead
);
815 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
));
818 /* Now that we're dropped all locks, release detached records */
819 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead
);
821 MLD_PRINTF(("%s: freeing mld_ifinfo for ifp 0x%llx(%s)\n",
822 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
828 * Process a received MLDv1 general or address-specific query.
829 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
831 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
832 * mld_addr. This is OK as we own the mbuf chain.
835 mld_v1_input_query(struct ifnet
*ifp
, const struct ip6_hdr
*ip6
,
836 /*const*/ struct mld_hdr
*mld
)
838 struct mld_ifinfo
*mli
;
839 struct in6_multi
*inm
;
840 int err
= 0, is_general_query
;
842 struct mld_tparams mtp
= { 0, 0, 0, 0 };
844 MLD_LOCK_ASSERT_NOTHELD();
846 is_general_query
= 0;
849 MLD_PRINTF(("%s: ignore v1 query %s on ifp 0x%llx(%s)\n",
850 __func__
, ip6_sprintf(&mld
->mld_addr
),
851 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
856 * RFC3810 Section 6.2: MLD queries must originate from
857 * a router's link-local address.
859 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_src
)) {
860 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
861 __func__
, ip6_sprintf(&ip6
->ip6_src
),
862 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
867 * Do address field validation upfront before we accept
870 if (IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) {
872 * MLDv1 General Query.
873 * If this was not sent to the all-nodes group, ignore it.
878 in6_clearscope(&dst
);
879 if (!IN6_ARE_ADDR_EQUAL(&dst
, &in6addr_linklocal_allnodes
)) {
883 is_general_query
= 1;
886 * Embed scope ID of receiving interface in MLD query for
887 * lookup whilst we don't hold other locks.
889 in6_setscope(&mld
->mld_addr
, ifp
, NULL
);
893 * Switch to MLDv1 host compatibility mode.
895 mli
= MLD_IFINFO(ifp
);
899 mtp
.qpt
= mld_set_version(mli
, MLD_VERSION_1
);
902 timer
= ntohs(mld
->mld_maxdelay
) / MLD_TIMER_SCALE
;
906 if (is_general_query
) {
907 struct in6_multistep step
;
909 MLD_PRINTF(("%s: process v1 general query on ifp 0x%llx(%s)\n",
910 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
912 * For each reporting group joined on this
913 * interface, kick the report timer.
915 in6_multihead_lock_shared();
916 IN6_FIRST_MULTI(step
, inm
);
917 while (inm
!= NULL
) {
919 if (inm
->in6m_ifp
== ifp
)
920 mtp
.cst
+= mld_v1_update_group(inm
, timer
);
922 IN6_NEXT_MULTI(step
, inm
);
924 in6_multihead_lock_done();
927 * MLDv1 Group-Specific Query.
928 * If this is a group-specific MLDv1 query, we need only
929 * look up the single group to process it.
931 in6_multihead_lock_shared();
932 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
);
933 in6_multihead_lock_done();
937 MLD_PRINTF(("%s: process v1 query %s on "
938 "ifp 0x%llx(%s)\n", __func__
,
939 ip6_sprintf(&mld
->mld_addr
),
940 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
941 mtp
.cst
= mld_v1_update_group(inm
, timer
);
943 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
945 /* XXX Clear embedded scope ID as userland won't expect it. */
946 in6_clearscope(&mld
->mld_addr
);
949 mld_set_timeout(&mtp
);
955 * Update the report timer on a group in response to an MLDv1 query.
957 * If we are becoming the reporting member for this group, start the timer.
958 * If we already are the reporting member for this group, and timer is
959 * below the threshold, reset it.
961 * We may be updating the group for the first time since we switched
962 * to MLDv2. If we are, then we must clear any recorded source lists,
963 * and transition to REPORTING state; the group timer is overloaded
964 * for group and group-source query responses.
966 * Unlike MLDv2, the delay per group should be jittered
967 * to avoid bursts of MLDv1 reports.
970 mld_v1_update_group(struct in6_multi
*inm
, const int timer
)
972 IN6M_LOCK_ASSERT_HELD(inm
);
974 MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__
,
975 ip6_sprintf(&inm
->in6m_addr
),
976 if_name(inm
->in6m_ifp
), timer
));
978 switch (inm
->in6m_state
) {
980 case MLD_SILENT_MEMBER
:
982 case MLD_REPORTING_MEMBER
:
983 if (inm
->in6m_timer
!= 0 &&
984 inm
->in6m_timer
<= timer
) {
985 MLD_PRINTF(("%s: REPORTING and timer running, "
986 "skipping.\n", __func__
));
990 case MLD_SG_QUERY_PENDING_MEMBER
:
991 case MLD_G_QUERY_PENDING_MEMBER
:
992 case MLD_IDLE_MEMBER
:
993 case MLD_LAZY_MEMBER
:
994 case MLD_AWAKENING_MEMBER
:
995 MLD_PRINTF(("%s: ->REPORTING\n", __func__
));
996 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
997 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
999 case MLD_SLEEPING_MEMBER
:
1000 MLD_PRINTF(("%s: ->AWAKENING\n", __func__
));
1001 inm
->in6m_state
= MLD_AWAKENING_MEMBER
;
1003 case MLD_LEAVING_MEMBER
:
1007 return (inm
->in6m_timer
);
1011 * Process a received MLDv2 general, group-specific or
1012 * group-and-source-specific query.
1014 * Assumes that the query header has been pulled up to sizeof(mldv2_query).
1016 * Return 0 if successful, otherwise an appropriate error code is returned.
1019 mld_v2_input_query(struct ifnet
*ifp
, const struct ip6_hdr
*ip6
,
1020 struct mbuf
*m
, const int off
, const int icmp6len
)
1022 struct mld_ifinfo
*mli
;
1023 struct mldv2_query
*mld
;
1024 struct in6_multi
*inm
;
1025 uint32_t maxdelay
, nsrc
, qqi
;
1026 int err
= 0, is_general_query
;
1029 struct mld_tparams mtp
= { 0, 0, 0, 0 };
1031 MLD_LOCK_ASSERT_NOTHELD();
1033 is_general_query
= 0;
1035 if (!mld_v2enable
) {
1036 MLD_PRINTF(("%s: ignore v2 query %s on ifp 0x%llx(%s)\n",
1037 __func__
, ip6_sprintf(&ip6
->ip6_src
),
1038 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1043 * RFC3810 Section 6.2: MLD queries must originate from
1044 * a router's link-local address.
1046 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_src
)) {
1047 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
1048 __func__
, ip6_sprintf(&ip6
->ip6_src
),
1049 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1053 MLD_PRINTF(("%s: input v2 query on ifp 0x%llx(%s)\n", __func__
,
1054 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1056 mld
= (struct mldv2_query
*)(mtod(m
, uint8_t *) + off
);
1058 maxdelay
= ntohs(mld
->mld_maxdelay
); /* in 1/10ths of a second */
1059 if (maxdelay
>= 32768) {
1060 maxdelay
= (MLD_MRC_MANT(maxdelay
) | 0x1000) <<
1061 (MLD_MRC_EXP(maxdelay
) + 3);
1063 timer
= maxdelay
/ MLD_TIMER_SCALE
;
1067 qrv
= MLD_QRV(mld
->mld_misc
);
1069 MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__
,
1076 qqi
= MLD_QQIC_MANT(mld
->mld_qqi
) <<
1077 (MLD_QQIC_EXP(mld
->mld_qqi
) + 3);
1080 nsrc
= ntohs(mld
->mld_numsrc
);
1081 if (nsrc
> MLD_MAX_GS_SOURCES
) {
1085 if (icmp6len
< sizeof(struct mldv2_query
) +
1086 (nsrc
* sizeof(struct in6_addr
))) {
1092 * Do further input validation upfront to avoid resetting timers
1093 * should we need to discard this query.
1095 if (IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) {
1097 * A general query with a source list has undefined
1098 * behaviour; discard it.
1104 is_general_query
= 1;
1107 * Embed scope ID of receiving interface in MLD query for
1108 * lookup whilst we don't hold other locks (due to KAME
1109 * locking lameness). We own this mbuf chain just now.
1111 in6_setscope(&mld
->mld_addr
, ifp
, NULL
);
1114 mli
= MLD_IFINFO(ifp
);
1115 VERIFY(mli
!= NULL
);
1119 * Discard the v2 query if we're in Compatibility Mode.
1120 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
1121 * until the Old Version Querier Present timer expires.
1123 if (mli
->mli_version
!= MLD_VERSION_2
) {
1128 mtp
.qpt
= mld_set_version(mli
, MLD_VERSION_2
);
1131 mli
->mli_qri
= MAX(timer
, MLD_QRI_MIN
);
1133 MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__
, mli
->mli_rv
,
1134 mli
->mli_qi
, mli
->mli_qri
));
1136 if (is_general_query
) {
1138 * MLDv2 General Query.
1140 * Schedule a current-state report on this ifp for
1141 * all groups, possibly containing source lists.
1143 * If there is a pending General Query response
1144 * scheduled earlier than the selected delay, do
1145 * not schedule any other reports.
1146 * Otherwise, reset the interface timer.
1148 MLD_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n",
1149 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1150 if (mli
->mli_v2_timer
== 0 || mli
->mli_v2_timer
>= timer
) {
1151 mtp
.it
= mli
->mli_v2_timer
= MLD_RANDOM_DELAY(timer
);
1157 * MLDv2 Group-specific or Group-and-source-specific Query.
1159 * Group-source-specific queries are throttled on
1160 * a per-group basis to defeat denial-of-service attempts.
1161 * Queries for groups we are not a member of on this
1162 * link are simply ignored.
1164 in6_multihead_lock_shared();
1165 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
);
1166 in6_multihead_lock_done();
1172 if (!ratecheck(&inm
->in6m_lastgsrtv
,
1174 MLD_PRINTF(("%s: GS query throttled.\n",
1177 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1181 MLD_PRINTF(("%s: process v2 group query on ifp 0x%llx(%s)\n",
1182 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1184 * If there is a pending General Query response
1185 * scheduled sooner than the selected delay, no
1186 * further report need be scheduled.
1187 * Otherwise, prepare to respond to the
1188 * group-specific or group-and-source query.
1191 mtp
.it
= mli
->mli_v2_timer
;
1193 if (mtp
.it
== 0 || mtp
.it
>= timer
) {
1194 (void) mld_v2_process_group_query(inm
, timer
, m
, off
);
1195 mtp
.cst
= inm
->in6m_timer
;
1198 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1199 /* XXX Clear embedded scope ID as userland won't expect it. */
1200 in6_clearscope(&mld
->mld_addr
);
1204 MLD_PRINTF(("%s: v2 general query response scheduled in "
1205 "T+%d seconds on ifp 0x%llx(%s)\n", __func__
, mtp
.it
,
1206 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1208 mld_set_timeout(&mtp
);
1214 * Process a recieved MLDv2 group-specific or group-and-source-specific
1216 * Return <0 if any error occured. Currently this is ignored.
1219 mld_v2_process_group_query(struct in6_multi
*inm
, int timer
, struct mbuf
*m0
,
1222 struct mldv2_query
*mld
;
1226 IN6M_LOCK_ASSERT_HELD(inm
);
1229 mld
= (struct mldv2_query
*)(mtod(m0
, uint8_t *) + off
);
1231 switch (inm
->in6m_state
) {
1232 case MLD_NOT_MEMBER
:
1233 case MLD_SILENT_MEMBER
:
1234 case MLD_SLEEPING_MEMBER
:
1235 case MLD_LAZY_MEMBER
:
1236 case MLD_AWAKENING_MEMBER
:
1237 case MLD_IDLE_MEMBER
:
1238 case MLD_LEAVING_MEMBER
:
1240 case MLD_REPORTING_MEMBER
:
1241 case MLD_G_QUERY_PENDING_MEMBER
:
1242 case MLD_SG_QUERY_PENDING_MEMBER
:
1246 nsrc
= ntohs(mld
->mld_numsrc
);
1249 * Deal with group-specific queries upfront.
1250 * If any group query is already pending, purge any recorded
1251 * source-list state if it exists, and schedule a query response
1252 * for this group-specific query.
1255 if (inm
->in6m_state
== MLD_G_QUERY_PENDING_MEMBER
||
1256 inm
->in6m_state
== MLD_SG_QUERY_PENDING_MEMBER
) {
1257 in6m_clear_recorded(inm
);
1258 timer
= min(inm
->in6m_timer
, timer
);
1260 inm
->in6m_state
= MLD_G_QUERY_PENDING_MEMBER
;
1261 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1266 * Deal with the case where a group-and-source-specific query has
1267 * been received but a group-specific query is already pending.
1269 if (inm
->in6m_state
== MLD_G_QUERY_PENDING_MEMBER
) {
1270 timer
= min(inm
->in6m_timer
, timer
);
1271 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1276 * Finally, deal with the case where a group-and-source-specific
1277 * query has been received, where a response to a previous g-s-r
1278 * query exists, or none exists.
1279 * In this case, we need to parse the source-list which the Querier
1280 * has provided us with and check if we have any source list filter
1281 * entries at T1 for these sources. If we do not, there is no need
1282 * schedule a report and the query may be dropped.
1283 * If we do, we must record them and schedule a current-state
1284 * report for those sources.
1286 if (inm
->in6m_nsrc
> 0) {
1293 soff
= off
+ sizeof(struct mldv2_query
);
1295 for (i
= 0; i
< nsrc
; i
++) {
1296 sp
= mtod(m
, uint8_t *) + soff
;
1297 retval
= in6m_record_source(inm
,
1298 (const struct in6_addr
*)(void *)sp
);
1301 nrecorded
+= retval
;
1302 soff
+= sizeof(struct in6_addr
);
1303 if (soff
>= m
->m_len
) {
1304 soff
= soff
- m
->m_len
;
1310 if (nrecorded
> 0) {
1311 MLD_PRINTF(( "%s: schedule response to SG query\n",
1313 inm
->in6m_state
= MLD_SG_QUERY_PENDING_MEMBER
;
1314 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1322 * Process a received MLDv1 host membership report.
1323 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1325 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1326 * mld_addr. This is OK as we own the mbuf chain.
1329 mld_v1_input_report(struct ifnet
*ifp
, struct mbuf
*m
,
1330 const struct ip6_hdr
*ip6
, /*const*/ struct mld_hdr
*mld
)
1332 struct in6_addr src
, dst
;
1333 struct in6_ifaddr
*ia
;
1334 struct in6_multi
*inm
;
1336 if (!mld_v1enable
) {
1337 MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n",
1338 __func__
, ip6_sprintf(&mld
->mld_addr
),
1339 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1343 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
1344 (m
->m_pkthdr
.pkt_flags
& PKTF_LOOP
))
1348 * MLDv1 reports must originate from a host's link-local address,
1349 * or the unspecified address (when booting).
1352 in6_clearscope(&src
);
1353 if (!IN6_IS_SCOPE_LINKLOCAL(&src
) && !IN6_IS_ADDR_UNSPECIFIED(&src
)) {
1354 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
1355 __func__
, ip6_sprintf(&ip6
->ip6_src
),
1356 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1361 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1362 * group, and must be directed to the group itself.
1365 in6_clearscope(&dst
);
1366 if (!IN6_IS_ADDR_MULTICAST(&mld
->mld_addr
) ||
1367 !IN6_ARE_ADDR_EQUAL(&mld
->mld_addr
, &dst
)) {
1368 MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n",
1369 __func__
, ip6_sprintf(&ip6
->ip6_dst
),
1370 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1375 * Make sure we don't hear our own membership report, as fast
1376 * leave requires knowing that we are the only member of a
1377 * group. Assume we used the link-local address if available,
1378 * otherwise look for ::.
1380 * XXX Note that scope ID comparison is needed for the address
1381 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1382 * performed for the on-wire address.
1384 ia
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
|IN6_IFF_ANYCAST
);
1386 IFA_LOCK(&ia
->ia_ifa
);
1387 if ((IN6_ARE_ADDR_EQUAL(&ip6
->ip6_src
, IA6_IN6(ia
)))){
1388 IFA_UNLOCK(&ia
->ia_ifa
);
1389 IFA_REMREF(&ia
->ia_ifa
);
1392 IFA_UNLOCK(&ia
->ia_ifa
);
1393 IFA_REMREF(&ia
->ia_ifa
);
1394 } else if (IN6_IS_ADDR_UNSPECIFIED(&src
)) {
1398 MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n",
1399 __func__
, ip6_sprintf(&mld
->mld_addr
),
1400 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1403 * Embed scope ID of receiving interface in MLD query for lookup
1404 * whilst we don't hold other locks (due to KAME locking lameness).
1406 if (!IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
))
1407 in6_setscope(&mld
->mld_addr
, ifp
, NULL
);
1410 * MLDv1 report suppression.
1411 * If we are a member of this group, and our membership should be
1412 * reported, and our group timer is pending or about to be reset,
1413 * stop our group timer by transitioning to the 'lazy' state.
1415 in6_multihead_lock_shared();
1416 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
);
1417 in6_multihead_lock_done();
1420 struct mld_ifinfo
*mli
;
1423 mli
= inm
->in6m_mli
;
1424 VERIFY(mli
!= NULL
);
1428 * If we are in MLDv2 host mode, do not allow the
1429 * other host's MLDv1 report to suppress our reports.
1431 if (mli
->mli_version
== MLD_VERSION_2
) {
1434 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1439 inm
->in6m_timer
= 0;
1441 switch (inm
->in6m_state
) {
1442 case MLD_NOT_MEMBER
:
1443 case MLD_SILENT_MEMBER
:
1444 case MLD_SLEEPING_MEMBER
:
1446 case MLD_REPORTING_MEMBER
:
1447 case MLD_IDLE_MEMBER
:
1448 case MLD_AWAKENING_MEMBER
:
1449 MLD_PRINTF(("%s: report suppressed for %s on "
1450 "ifp 0x%llx(%s)\n", __func__
,
1451 ip6_sprintf(&mld
->mld_addr
),
1452 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1453 case MLD_LAZY_MEMBER
:
1454 inm
->in6m_state
= MLD_LAZY_MEMBER
;
1456 case MLD_G_QUERY_PENDING_MEMBER
:
1457 case MLD_SG_QUERY_PENDING_MEMBER
:
1458 case MLD_LEAVING_MEMBER
:
1462 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1466 /* XXX Clear embedded scope ID as userland won't expect it. */
1467 in6_clearscope(&mld
->mld_addr
);
1475 * Assume query messages which fit in a single ICMPv6 message header
1476 * have been pulled up.
1477 * Assume that userland will want to see the message, even if it
1478 * otherwise fails kernel input validation; do not free it.
1479 * Pullup may however free the mbuf chain m if it fails.
1481 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1484 mld_input(struct mbuf
*m
, int off
, int icmp6len
)
1487 struct ip6_hdr
*ip6
;
1488 struct mld_hdr
*mld
;
1491 MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__
,
1492 (uint64_t)VM_KERNEL_ADDRPERM(m
), off
));
1494 ifp
= m
->m_pkthdr
.rcvif
;
1496 ip6
= mtod(m
, struct ip6_hdr
*);
1498 /* Pullup to appropriate size. */
1499 mld
= (struct mld_hdr
*)(mtod(m
, uint8_t *) + off
);
1500 if (mld
->mld_type
== MLD_LISTENER_QUERY
&&
1501 icmp6len
>= sizeof(struct mldv2_query
)) {
1502 mldlen
= sizeof(struct mldv2_query
);
1504 mldlen
= sizeof(struct mld_hdr
);
1506 IP6_EXTHDR_GET(mld
, struct mld_hdr
*, m
, off
, mldlen
);
1508 icmp6stat
.icp6s_badlen
++;
1509 return (IPPROTO_DONE
);
1513 * Userland needs to see all of this traffic for implementing
1514 * the endpoint discovery portion of multicast routing.
1516 switch (mld
->mld_type
) {
1517 case MLD_LISTENER_QUERY
:
1518 icmp6_ifstat_inc(ifp
, ifs6_in_mldquery
);
1519 if (icmp6len
== sizeof(struct mld_hdr
)) {
1520 if (mld_v1_input_query(ifp
, ip6
, mld
) != 0)
1522 } else if (icmp6len
>= sizeof(struct mldv2_query
)) {
1523 if (mld_v2_input_query(ifp
, ip6
, m
, off
,
1528 case MLD_LISTENER_REPORT
:
1529 icmp6_ifstat_inc(ifp
, ifs6_in_mldreport
);
1530 if (mld_v1_input_report(ifp
, m
, ip6
, mld
) != 0)
1533 case MLDV2_LISTENER_REPORT
:
1534 icmp6_ifstat_inc(ifp
, ifs6_in_mldreport
);
1536 case MLD_LISTENER_DONE
:
1537 icmp6_ifstat_inc(ifp
, ifs6_in_mlddone
);
1547 * Schedule MLD timer based on various parameters; caller must ensure that
1548 * lock ordering is maintained as this routine acquires MLD global lock.
1551 mld_set_timeout(struct mld_tparams
*mtp
)
1553 MLD_LOCK_ASSERT_NOTHELD();
1554 VERIFY(mtp
!= NULL
);
1556 if (mtp
->qpt
!= 0 || mtp
->it
!= 0 || mtp
->cst
!= 0 || mtp
->sct
!= 0) {
1559 querier_present_timers_running6
= 1;
1561 interface_timers_running6
= 1;
1563 current_state_timers_running6
= 1;
1565 state_change_timers_running6
= 1;
1566 mld_sched_timeout();
1572 * MLD6 timer handler (per 1 second).
1575 mld_timeout(void *arg
)
1578 struct ifqueue scq
; /* State-change packets */
1579 struct ifqueue qrq
; /* Query response packets */
1581 struct mld_ifinfo
*mli
;
1582 struct in6_multi
*inm
;
1584 unsigned int genid
= mld_mli_list_genid
;
1586 SLIST_HEAD(, in6_multi
) in6m_dthead
;
1588 SLIST_INIT(&in6m_dthead
);
1591 * Update coarse-grained networking timestamp (in sec.); the idea
1592 * is to piggy-back on the timeout callout to update the counter
1593 * returnable via net_uptime().
1595 net_update_uptime();
1599 MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d\n", __func__
,
1600 querier_present_timers_running6
, interface_timers_running6
,
1601 current_state_timers_running6
, state_change_timers_running6
));
1604 * MLDv1 querier present timer processing.
1606 if (querier_present_timers_running6
) {
1607 querier_present_timers_running6
= 0;
1608 LIST_FOREACH(mli
, &mli_head
, mli_link
) {
1610 mld_v1_process_querier_timers(mli
);
1611 if (mli
->mli_v1_timer
> 0)
1612 querier_present_timers_running6
= 1;
1618 * MLDv2 General Query response timer processing.
1620 if (interface_timers_running6
) {
1621 MLD_PRINTF(("%s: interface timers running\n", __func__
));
1622 interface_timers_running6
= 0;
1623 mli
= LIST_FIRST(&mli_head
);
1625 while (mli
!= NULL
) {
1626 if (mli
->mli_flags
& MLIF_PROCESSED
) {
1627 mli
= LIST_NEXT(mli
, mli_link
);
1632 if (mli
->mli_version
!= MLD_VERSION_2
) {
1634 mli
= LIST_NEXT(mli
, mli_link
);
1638 * XXX The logic below ends up calling
1639 * mld_dispatch_packet which can unlock mli
1640 * and the global MLD lock.
1641 * Therefore grab a reference on MLI and also
1642 * check for generation count to see if we should
1643 * iterate the list again.
1645 MLI_ADDREF_LOCKED(mli
);
1647 if (mli
->mli_v2_timer
== 0) {
1649 } else if (--mli
->mli_v2_timer
== 0) {
1650 if (mld_v2_dispatch_general_query(mli
) > 0)
1651 interface_timers_running6
= 1;
1653 interface_timers_running6
= 1;
1655 mli
->mli_flags
|= MLIF_PROCESSED
;
1659 if (genid
!= mld_mli_list_genid
) {
1660 MLD_PRINTF(("%s: MLD information list changed "
1661 "in the middle of iteration! Restart iteration.\n",
1663 mli
= LIST_FIRST(&mli_head
);
1664 genid
= mld_mli_list_genid
;
1666 mli
= LIST_NEXT(mli
, mli_link
);
1670 LIST_FOREACH(mli
, &mli_head
, mli_link
)
1671 mli
->mli_flags
&= ~MLIF_PROCESSED
;
1676 if (!current_state_timers_running6
&&
1677 !state_change_timers_running6
)
1680 current_state_timers_running6
= 0;
1681 state_change_timers_running6
= 0;
1683 MLD_PRINTF(("%s: state change timers running\n", __func__
));
1685 memset(&qrq
, 0, sizeof(struct ifqueue
));
1686 qrq
.ifq_maxlen
= MLD_MAX_G_GS_PACKETS
;
1688 memset(&scq
, 0, sizeof(struct ifqueue
));
1689 scq
.ifq_maxlen
= MLD_MAX_STATE_CHANGE_PACKETS
;
1692 * MLD host report and state-change timer processing.
1693 * Note: Processing a v2 group timer may remove a node.
1695 mli
= LIST_FIRST(&mli_head
);
1697 while (mli
!= NULL
) {
1698 struct in6_multistep step
;
1700 if (mli
->mli_flags
& MLIF_PROCESSED
) {
1701 mli
= LIST_NEXT(mli
, mli_link
);
1707 uri_sec
= MLD_RANDOM_DELAY(mli
->mli_uri
);
1710 in6_multihead_lock_shared();
1711 IN6_FIRST_MULTI(step
, inm
);
1712 while (inm
!= NULL
) {
1714 if (inm
->in6m_ifp
!= ifp
)
1718 switch (mli
->mli_version
) {
1720 mld_v1_process_group_timer(inm
,
1724 mld_v2_process_group_timers(mli
, &qrq
,
1725 &scq
, inm
, uri_sec
);
1731 IN6_NEXT_MULTI(step
, inm
);
1733 in6_multihead_lock_done();
1736 * XXX The logic below ends up calling
1737 * mld_dispatch_packet which can unlock mli
1738 * and the global MLD lock.
1739 * Therefore grab a reference on MLI and also
1740 * check for generation count to see if we should
1741 * iterate the list again.
1744 MLI_ADDREF_LOCKED(mli
);
1745 if (mli
->mli_version
== MLD_VERSION_1
) {
1746 mld_dispatch_queue_locked(mli
, &mli
->mli_v1q
, 0);
1747 } else if (mli
->mli_version
== MLD_VERSION_2
) {
1749 mld_dispatch_queue_locked(NULL
, &qrq
, 0);
1750 mld_dispatch_queue_locked(NULL
, &scq
, 0);
1751 VERIFY(qrq
.ifq_len
== 0);
1752 VERIFY(scq
.ifq_len
== 0);
1756 * In case there are still any pending membership reports
1757 * which didn't get drained at version change time.
1759 IF_DRAIN(&mli
->mli_v1q
);
1761 * Release all deferred inm records, and drain any locally
1762 * enqueued packets; do it even if the current MLD version
1763 * for the link is no longer MLDv2, in order to handle the
1764 * version change case.
1766 mld_flush_relq(mli
, (struct mld_in6m_relhead
*)&in6m_dthead
);
1767 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
));
1768 mli
->mli_flags
|= MLIF_PROCESSED
;
1775 if (genid
!= mld_mli_list_genid
) {
1776 MLD_PRINTF(("%s: MLD information list changed "
1777 "in the middle of iteration! Restart iteration.\n",
1779 mli
= LIST_FIRST(&mli_head
);
1780 genid
= mld_mli_list_genid
;
1782 mli
= LIST_NEXT(mli
, mli_link
);
1786 LIST_FOREACH(mli
, &mli_head
, mli_link
)
1787 mli
->mli_flags
&= ~MLIF_PROCESSED
;
1790 /* re-arm the timer if there's work to do */
1791 mld_timeout_run
= 0;
1792 mld_sched_timeout();
1795 /* Now that we're dropped all locks, release detached records */
1796 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead
);
1800 mld_sched_timeout(void)
1802 MLD_LOCK_ASSERT_HELD();
1804 if (!mld_timeout_run
&&
1805 (querier_present_timers_running6
|| current_state_timers_running6
||
1806 interface_timers_running6
|| state_change_timers_running6
)) {
1807 mld_timeout_run
= 1;
1808 timeout(mld_timeout
, NULL
, hz
);
1813 * Free the in6_multi reference(s) for this MLD lifecycle.
1815 * Caller must be holding mli_lock.
1818 mld_flush_relq(struct mld_ifinfo
*mli
, struct mld_in6m_relhead
*in6m_dthead
)
1820 struct in6_multi
*inm
;
1823 MLI_LOCK_ASSERT_HELD(mli
);
1824 inm
= SLIST_FIRST(&mli
->mli_relinmhead
);
1828 SLIST_REMOVE_HEAD(&mli
->mli_relinmhead
, in6m_nrele
);
1831 in6_multihead_lock_exclusive();
1833 VERIFY(inm
->in6m_nrelecnt
!= 0);
1834 inm
->in6m_nrelecnt
--;
1835 lastref
= in6_multi_detach(inm
);
1836 VERIFY(!lastref
|| (!(inm
->in6m_debug
& IFD_ATTACHED
) &&
1837 inm
->in6m_reqcnt
== 0));
1839 in6_multihead_lock_done();
1840 /* from mli_relinmhead */
1842 /* from in6_multihead_list */
1845 * Defer releasing our final reference, as we
1846 * are holding the MLD lock at this point, and
1847 * we could end up with locking issues later on
1848 * (while issuing SIOCDELMULTI) when this is the
1849 * final reference count. Let the caller do it
1852 MLD_ADD_DETACHED_IN6M(in6m_dthead
, inm
);
1860 * Update host report group timer.
1861 * Will update the global pending timer flags.
1864 mld_v1_process_group_timer(struct in6_multi
*inm
, const int mld_version
)
1866 #pragma unused(mld_version)
1867 int report_timer_expired
;
1869 MLD_LOCK_ASSERT_HELD();
1870 IN6M_LOCK_ASSERT_HELD(inm
);
1871 MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
);
1873 if (inm
->in6m_timer
== 0) {
1874 report_timer_expired
= 0;
1875 } else if (--inm
->in6m_timer
== 0) {
1876 report_timer_expired
= 1;
1878 current_state_timers_running6
= 1;
1879 /* caller will schedule timer */
1883 switch (inm
->in6m_state
) {
1884 case MLD_NOT_MEMBER
:
1885 case MLD_SILENT_MEMBER
:
1886 case MLD_IDLE_MEMBER
:
1887 case MLD_LAZY_MEMBER
:
1888 case MLD_SLEEPING_MEMBER
:
1889 case MLD_AWAKENING_MEMBER
:
1891 case MLD_REPORTING_MEMBER
:
1892 if (report_timer_expired
) {
1893 inm
->in6m_state
= MLD_IDLE_MEMBER
;
1894 (void) mld_v1_transmit_report(inm
,
1895 MLD_LISTENER_REPORT
);
1896 IN6M_LOCK_ASSERT_HELD(inm
);
1897 MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
);
1900 case MLD_G_QUERY_PENDING_MEMBER
:
1901 case MLD_SG_QUERY_PENDING_MEMBER
:
1902 case MLD_LEAVING_MEMBER
:
1908 * Update a group's timers for MLDv2.
1909 * Will update the global pending timer flags.
1910 * Note: Unlocked read from mli.
1913 mld_v2_process_group_timers(struct mld_ifinfo
*mli
,
1914 struct ifqueue
*qrq
, struct ifqueue
*scq
,
1915 struct in6_multi
*inm
, const int uri_sec
)
1917 int query_response_timer_expired
;
1918 int state_change_retransmit_timer_expired
;
1920 MLD_LOCK_ASSERT_HELD();
1921 IN6M_LOCK_ASSERT_HELD(inm
);
1922 MLI_LOCK_ASSERT_HELD(mli
);
1923 VERIFY(mli
== inm
->in6m_mli
);
1925 query_response_timer_expired
= 0;
1926 state_change_retransmit_timer_expired
= 0;
1929 * During a transition from compatibility mode back to MLDv2,
1930 * a group record in REPORTING state may still have its group
1931 * timer active. This is a no-op in this function; it is easier
1932 * to deal with it here than to complicate the timeout path.
1934 if (inm
->in6m_timer
== 0) {
1935 query_response_timer_expired
= 0;
1936 } else if (--inm
->in6m_timer
== 0) {
1937 query_response_timer_expired
= 1;
1939 current_state_timers_running6
= 1;
1940 /* caller will schedule timer */
1943 if (inm
->in6m_sctimer
== 0) {
1944 state_change_retransmit_timer_expired
= 0;
1945 } else if (--inm
->in6m_sctimer
== 0) {
1946 state_change_retransmit_timer_expired
= 1;
1948 state_change_timers_running6
= 1;
1949 /* caller will schedule timer */
1952 /* We are in timer callback, so be quick about it. */
1953 if (!state_change_retransmit_timer_expired
&&
1954 !query_response_timer_expired
)
1957 switch (inm
->in6m_state
) {
1958 case MLD_NOT_MEMBER
:
1959 case MLD_SILENT_MEMBER
:
1960 case MLD_SLEEPING_MEMBER
:
1961 case MLD_LAZY_MEMBER
:
1962 case MLD_AWAKENING_MEMBER
:
1963 case MLD_IDLE_MEMBER
:
1965 case MLD_G_QUERY_PENDING_MEMBER
:
1966 case MLD_SG_QUERY_PENDING_MEMBER
:
1968 * Respond to a previously pending Group-Specific
1969 * or Group-and-Source-Specific query by enqueueing
1970 * the appropriate Current-State report for
1971 * immediate transmission.
1973 if (query_response_timer_expired
) {
1976 retval
= mld_v2_enqueue_group_record(qrq
, inm
, 0, 1,
1977 (inm
->in6m_state
== MLD_SG_QUERY_PENDING_MEMBER
),
1979 MLD_PRINTF(("%s: enqueue record = %d\n",
1981 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
1982 in6m_clear_recorded(inm
);
1985 case MLD_REPORTING_MEMBER
:
1986 case MLD_LEAVING_MEMBER
:
1987 if (state_change_retransmit_timer_expired
) {
1989 * State-change retransmission timer fired.
1990 * If there are any further pending retransmissions,
1991 * set the global pending state-change flag, and
1994 if (--inm
->in6m_scrv
> 0) {
1995 inm
->in6m_sctimer
= uri_sec
;
1996 state_change_timers_running6
= 1;
1997 /* caller will schedule timer */
2000 * Retransmit the previously computed state-change
2001 * report. If there are no further pending
2002 * retransmissions, the mbuf queue will be consumed.
2003 * Update T0 state to T1 as we have now sent
2006 (void) mld_v2_merge_state_changes(inm
, scq
);
2009 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2010 ip6_sprintf(&inm
->in6m_addr
),
2011 if_name(inm
->in6m_ifp
)));
2014 * If we are leaving the group for good, make sure
2015 * we release MLD's reference to it.
2016 * This release must be deferred using a SLIST,
2017 * as we are called from a loop which traverses
2018 * the in_ifmultiaddr TAILQ.
2020 if (inm
->in6m_state
== MLD_LEAVING_MEMBER
&&
2021 inm
->in6m_scrv
== 0) {
2022 inm
->in6m_state
= MLD_NOT_MEMBER
;
2024 * A reference has already been held in
2025 * mld_final_leave() for this inm, so
2026 * no need to hold another one. We also
2027 * bumped up its request count then, so
2028 * that it stays in in6_multihead. Both
2029 * of them will be released when it is
2030 * dequeued later on.
2032 VERIFY(inm
->in6m_nrelecnt
!= 0);
2033 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
,
2042 * Switch to a different version on the given interface,
2043 * as per Section 9.12.
2046 mld_set_version(struct mld_ifinfo
*mli
, const int mld_version
)
2048 int old_version_timer
;
2050 MLI_LOCK_ASSERT_HELD(mli
);
2052 MLD_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__
,
2053 mld_version
, (uint64_t)VM_KERNEL_ADDRPERM(mli
->mli_ifp
),
2054 if_name(mli
->mli_ifp
)));
2056 if (mld_version
== MLD_VERSION_1
) {
2058 * Compute the "Older Version Querier Present" timer as per
2059 * Section 9.12, in seconds.
2061 old_version_timer
= (mli
->mli_rv
* mli
->mli_qi
) + mli
->mli_qri
;
2062 mli
->mli_v1_timer
= old_version_timer
;
2065 if (mli
->mli_v1_timer
> 0 && mli
->mli_version
!= MLD_VERSION_1
) {
2066 mli
->mli_version
= MLD_VERSION_1
;
2067 mld_v2_cancel_link_timers(mli
);
2070 MLI_LOCK_ASSERT_HELD(mli
);
2072 return (mli
->mli_v1_timer
);
2076 * Cancel pending MLDv2 timers for the given link and all groups
2077 * joined on it; state-change, general-query, and group-query timers.
2079 * Only ever called on a transition from v2 to Compatibility mode. Kill
2080 * the timers stone dead (this may be expensive for large N groups), they
2081 * will be restarted if Compatibility Mode deems that they must be due to
2085 mld_v2_cancel_link_timers(struct mld_ifinfo
*mli
)
2088 struct in6_multi
*inm
;
2089 struct in6_multistep step
;
2091 MLI_LOCK_ASSERT_HELD(mli
);
2093 MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__
,
2094 (uint64_t)VM_KERNEL_ADDRPERM(mli
->mli_ifp
), if_name(mli
->mli_ifp
)));
2097 * Stop the v2 General Query Response on this link stone dead.
2098 * If timer is woken up due to interface_timers_running6,
2099 * the flag will be cleared if there are no pending link timers.
2101 mli
->mli_v2_timer
= 0;
2104 * Now clear the current-state and state-change report timers
2105 * for all memberships scoped to this link.
2110 in6_multihead_lock_shared();
2111 IN6_FIRST_MULTI(step
, inm
);
2112 while (inm
!= NULL
) {
2114 if (inm
->in6m_ifp
!= ifp
)
2117 switch (inm
->in6m_state
) {
2118 case MLD_NOT_MEMBER
:
2119 case MLD_SILENT_MEMBER
:
2120 case MLD_IDLE_MEMBER
:
2121 case MLD_LAZY_MEMBER
:
2122 case MLD_SLEEPING_MEMBER
:
2123 case MLD_AWAKENING_MEMBER
:
2125 * These states are either not relevant in v2 mode,
2126 * or are unreported. Do nothing.
2129 case MLD_LEAVING_MEMBER
:
2131 * If we are leaving the group and switching
2132 * version, we need to release the final
2133 * reference held for issuing the INCLUDE {}.
2134 * During mld_final_leave(), we bumped up both the
2135 * request and reference counts. Since we cannot
2136 * call in6_multi_detach() here, defer this task to
2137 * the timer routine.
2139 VERIFY(inm
->in6m_nrelecnt
!= 0);
2141 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, inm
,
2145 case MLD_G_QUERY_PENDING_MEMBER
:
2146 case MLD_SG_QUERY_PENDING_MEMBER
:
2147 in6m_clear_recorded(inm
);
2149 case MLD_REPORTING_MEMBER
:
2150 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
2154 * Always clear state-change and group report timers.
2155 * Free any pending MLDv2 state-change records.
2157 inm
->in6m_sctimer
= 0;
2158 inm
->in6m_timer
= 0;
2159 IF_DRAIN(&inm
->in6m_scq
);
2162 IN6_NEXT_MULTI(step
, inm
);
2164 in6_multihead_lock_done();
2170 * Update the Older Version Querier Present timers for a link.
2171 * See Section 9.12 of RFC 3810.
2174 mld_v1_process_querier_timers(struct mld_ifinfo
*mli
)
2176 MLI_LOCK_ASSERT_HELD(mli
);
2178 if (mld_v2enable
&& mli
->mli_version
!= MLD_VERSION_2
&&
2179 --mli
->mli_v1_timer
== 0) {
2181 * MLDv1 Querier Present timer expired; revert to MLDv2.
2183 MLD_PRINTF(("%s: transition from v%d -> v%d on 0x%llx(%s)\n",
2184 __func__
, mli
->mli_version
, MLD_VERSION_2
,
2185 (uint64_t)VM_KERNEL_ADDRPERM(mli
->mli_ifp
),
2186 if_name(mli
->mli_ifp
)));
2187 mli
->mli_version
= MLD_VERSION_2
;
2192 * Transmit an MLDv1 report immediately.
2195 mld_v1_transmit_report(struct in6_multi
*in6m
, const int type
)
2198 struct in6_ifaddr
*ia
;
2199 struct ip6_hdr
*ip6
;
2200 struct mbuf
*mh
, *md
;
2201 struct mld_hdr
*mld
;
2204 IN6M_LOCK_ASSERT_HELD(in6m
);
2205 MLI_LOCK_ASSERT_HELD(in6m
->in6m_mli
);
2207 ifp
= in6m
->in6m_ifp
;
2208 /* ia may be NULL if link-local address is tentative. */
2209 ia
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
|IN6_IFF_ANYCAST
);
2211 MGETHDR(mh
, M_DONTWAIT
, MT_HEADER
);
2214 IFA_REMREF(&ia
->ia_ifa
);
2217 MGET(md
, M_DONTWAIT
, MT_DATA
);
2221 IFA_REMREF(&ia
->ia_ifa
);
2227 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
2228 * that ether_output() does not need to allocate another mbuf
2229 * for the header in the most common case.
2231 MH_ALIGN(mh
, sizeof(struct ip6_hdr
));
2232 mh
->m_pkthdr
.len
= sizeof(struct ip6_hdr
) + sizeof(struct mld_hdr
);
2233 mh
->m_len
= sizeof(struct ip6_hdr
);
2235 ip6
= mtod(mh
, struct ip6_hdr
*);
2237 ip6
->ip6_vfc
&= ~IPV6_VERSION_MASK
;
2238 ip6
->ip6_vfc
|= IPV6_VERSION
;
2239 ip6
->ip6_nxt
= IPPROTO_ICMPV6
;
2241 IFA_LOCK(&ia
->ia_ifa
);
2242 ip6
->ip6_src
= ia
? ia
->ia_addr
.sin6_addr
: in6addr_any
;
2244 IFA_UNLOCK(&ia
->ia_ifa
);
2245 IFA_REMREF(&ia
->ia_ifa
);
2248 ip6
->ip6_dst
= in6m
->in6m_addr
;
2250 md
->m_len
= sizeof(struct mld_hdr
);
2251 mld
= mtod(md
, struct mld_hdr
*);
2252 mld
->mld_type
= type
;
2255 mld
->mld_maxdelay
= 0;
2256 mld
->mld_reserved
= 0;
2257 mld
->mld_addr
= in6m
->in6m_addr
;
2258 in6_clearscope(&mld
->mld_addr
);
2259 mld
->mld_cksum
= in6_cksum(mh
, IPPROTO_ICMPV6
,
2260 sizeof(struct ip6_hdr
), sizeof(struct mld_hdr
));
2262 mld_save_context(mh
, ifp
);
2263 mh
->m_flags
|= M_MLDV1
;
2266 * Due to the fact that at this point we are possibly holding
2267 * in6_multihead_lock in shared or exclusive mode, we can't call
2268 * mld_dispatch_packet() here since that will eventually call
2269 * ip6_output(), which will try to lock in6_multihead_lock and cause
2271 * Instead we defer the work to the mld_timeout() thread, thus
2272 * avoiding unlocking in_multihead_lock here.
2274 if (IF_QFULL(&in6m
->in6m_mli
->mli_v1q
)) {
2275 MLD_PRINTF(("%s: v1 outbound queue full\n", __func__
));
2279 IF_ENQUEUE(&in6m
->in6m_mli
->mli_v1q
, mh
);
2287 * Process a state change from the upper layer for the given IPv6 group.
2289 * Each socket holds a reference on the in6_multi in its own ip_moptions.
2290 * The socket layer will have made the necessary updates to.the group
2291 * state, it is now up to MLD to issue a state change report if there
2292 * has been any change between T0 (when the last state-change was issued)
2295 * We use the MLDv2 state machine at group level. The MLd module
2296 * however makes the decision as to which MLD protocol version to speak.
2297 * A state change *from* INCLUDE {} always means an initial join.
2298 * A state change *to* INCLUDE {} always means a final leave.
2300 * If delay is non-zero, and the state change is an initial multicast
2301 * join, the state change report will be delayed by 'delay' ticks
2302 * in units of seconds if MLDv1 is active on the link; otherwise
2303 * the initial MLDv2 state change report will be delayed by whichever
2304 * is sooner, a pending state-change timer or delay itself.
2307 mld_change_state(struct in6_multi
*inm
, struct mld_tparams
*mtp
,
2310 struct mld_ifinfo
*mli
;
2314 VERIFY(mtp
!= NULL
);
2315 bzero(mtp
, sizeof (*mtp
));
2317 IN6M_LOCK_ASSERT_HELD(inm
);
2318 VERIFY(inm
->in6m_mli
!= NULL
);
2319 MLI_LOCK_ASSERT_NOTHELD(inm
->in6m_mli
);
2322 * Try to detect if the upper layer just asked us to change state
2323 * for an interface which has now gone away.
2325 VERIFY(inm
->in6m_ifma
!= NULL
);
2326 ifp
= inm
->in6m_ifma
->ifma_ifp
;
2328 * Sanity check that netinet6's notion of ifp is the same as net's.
2330 VERIFY(inm
->in6m_ifp
== ifp
);
2332 mli
= MLD_IFINFO(ifp
);
2333 VERIFY(mli
!= NULL
);
2336 * If we detect a state transition to or from MCAST_UNDEFINED
2337 * for this group, then we are starting or finishing an MLD
2338 * life cycle for this group.
2340 if (inm
->in6m_st
[1].iss_fmode
!= inm
->in6m_st
[0].iss_fmode
) {
2341 MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__
,
2342 inm
->in6m_st
[0].iss_fmode
, inm
->in6m_st
[1].iss_fmode
));
2343 if (inm
->in6m_st
[0].iss_fmode
== MCAST_UNDEFINED
) {
2344 MLD_PRINTF(("%s: initial join\n", __func__
));
2345 error
= mld_initial_join(inm
, mli
, mtp
, delay
);
2347 } else if (inm
->in6m_st
[1].iss_fmode
== MCAST_UNDEFINED
) {
2348 MLD_PRINTF(("%s: final leave\n", __func__
));
2349 mld_final_leave(inm
, mli
, mtp
);
2353 MLD_PRINTF(("%s: filter set change\n", __func__
));
2356 error
= mld_handle_state_change(inm
, mli
, mtp
);
2362 * Perform the initial join for an MLD group.
2364 * When joining a group:
2365 * If the group should have its MLD traffic suppressed, do nothing.
2366 * MLDv1 starts sending MLDv1 host membership reports.
2367 * MLDv2 will schedule an MLDv2 state-change report containing the
2368 * initial state of the membership.
2370 * If the delay argument is non-zero, then we must delay sending the
2371 * initial state change for delay ticks (in units of seconds).
2374 mld_initial_join(struct in6_multi
*inm
, struct mld_ifinfo
*mli
,
2375 struct mld_tparams
*mtp
, const int delay
)
2378 struct ifqueue
*ifq
;
2379 int error
, retval
, syncstates
;
2382 IN6M_LOCK_ASSERT_HELD(inm
);
2383 MLI_LOCK_ASSERT_NOTHELD(mli
);
2384 VERIFY(mtp
!= NULL
);
2386 MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n",
2387 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2388 (uint64_t)VM_KERNEL_ADDRPERM(inm
->in6m_ifp
),
2389 if_name(inm
->in6m_ifp
)));
2394 ifp
= inm
->in6m_ifp
;
2397 VERIFY(mli
->mli_ifp
== ifp
);
2400 * Avoid MLD if group is :
2401 * 1. Joined on loopback, OR
2402 * 2. On a link that is marked MLIF_SILENT
2403 * 3. rdar://problem/19227650 Is link local scoped and
2404 * on cellular interface
2405 * 4. Is a type that should not be reported (node local
2406 * or all node link local multicast.
2407 * All other groups enter the appropriate state machine
2408 * for the version in use on this link.
2410 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2411 (mli
->mli_flags
& MLIF_SILENT
) ||
2412 (IFNET_IS_CELLULAR(ifp
) &&
2413 IN6_IS_ADDR_MC_LINKLOCAL(&inm
->in6m_addr
)) ||
2414 !mld_is_addr_reported(&inm
->in6m_addr
)) {
2415 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2417 inm
->in6m_state
= MLD_SILENT_MEMBER
;
2418 inm
->in6m_timer
= 0;
2421 * Deal with overlapping in6_multi lifecycle.
2422 * If this group was LEAVING, then make sure
2423 * we drop the reference we picked up to keep the
2424 * group around for the final INCLUDE {} enqueue.
2425 * Since we cannot call in6_multi_detach() here,
2426 * defer this task to the timer routine.
2428 if (mli
->mli_version
== MLD_VERSION_2
&&
2429 inm
->in6m_state
== MLD_LEAVING_MEMBER
) {
2430 VERIFY(inm
->in6m_nrelecnt
!= 0);
2431 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, inm
,
2435 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
2437 switch (mli
->mli_version
) {
2440 * If a delay was provided, only use it if
2441 * it is greater than the delay normally
2442 * used for an MLDv1 state change report,
2443 * and delay sending the initial MLDv1 report
2444 * by not transitioning to the IDLE state.
2446 odelay
= MLD_RANDOM_DELAY(MLD_V1_MAX_RI
);
2448 inm
->in6m_timer
= max(delay
, odelay
);
2451 inm
->in6m_state
= MLD_IDLE_MEMBER
;
2452 error
= mld_v1_transmit_report(inm
,
2453 MLD_LISTENER_REPORT
);
2455 IN6M_LOCK_ASSERT_HELD(inm
);
2456 MLI_LOCK_ASSERT_HELD(mli
);
2459 inm
->in6m_timer
= odelay
;
2467 * Defer update of T0 to T1, until the first copy
2468 * of the state change has been transmitted.
2473 * Immediately enqueue a State-Change Report for
2474 * this interface, freeing any previous reports.
2475 * Don't kick the timers if there is nothing to do,
2476 * or if an error occurred.
2478 ifq
= &inm
->in6m_scq
;
2480 retval
= mld_v2_enqueue_group_record(ifq
, inm
, 1,
2481 0, 0, (mli
->mli_flags
& MLIF_USEALLOW
));
2482 mtp
->cst
= (ifq
->ifq_len
> 0);
2483 MLD_PRINTF(("%s: enqueue record = %d\n",
2486 error
= retval
* -1;
2491 * Schedule transmission of pending state-change
2492 * report up to RV times for this link. The timer
2493 * will fire at the next mld_timeout (1 second)),
2494 * giving us an opportunity to merge the reports.
2496 * If a delay was provided to this function, only
2497 * use this delay if sooner than the existing one.
2499 VERIFY(mli
->mli_rv
> 1);
2500 inm
->in6m_scrv
= mli
->mli_rv
;
2502 if (inm
->in6m_sctimer
> 1) {
2504 min(inm
->in6m_sctimer
, delay
);
2506 inm
->in6m_sctimer
= delay
;
2508 inm
->in6m_sctimer
= 1;
2518 * Only update the T0 state if state change is atomic,
2519 * i.e. we don't need to wait for a timer to fire before we
2520 * can consider the state change to have been communicated.
2524 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2525 ip6_sprintf(&inm
->in6m_addr
),
2526 if_name(inm
->in6m_ifp
)));
2533 * Issue an intermediate state change during the life-cycle.
2536 mld_handle_state_change(struct in6_multi
*inm
, struct mld_ifinfo
*mli
,
2537 struct mld_tparams
*mtp
)
2542 IN6M_LOCK_ASSERT_HELD(inm
);
2543 MLI_LOCK_ASSERT_NOTHELD(mli
);
2544 VERIFY(mtp
!= NULL
);
2546 MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n",
2547 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2548 (uint64_t)VM_KERNEL_ADDRPERM(inm
->in6m_ifp
),
2549 if_name(inm
->in6m_ifp
)));
2551 ifp
= inm
->in6m_ifp
;
2554 VERIFY(mli
->mli_ifp
== ifp
);
2556 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2557 (mli
->mli_flags
& MLIF_SILENT
) ||
2558 !mld_is_addr_reported(&inm
->in6m_addr
) ||
2559 (mli
->mli_version
!= MLD_VERSION_2
)) {
2561 if (!mld_is_addr_reported(&inm
->in6m_addr
)) {
2562 MLD_PRINTF(("%s: not kicking state machine for silent "
2563 "group\n", __func__
));
2565 MLD_PRINTF(("%s: nothing to do\n", __func__
));
2567 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2568 ip6_sprintf(&inm
->in6m_addr
),
2569 if_name(inm
->in6m_ifp
)));
2573 IF_DRAIN(&inm
->in6m_scq
);
2575 retval
= mld_v2_enqueue_group_record(&inm
->in6m_scq
, inm
, 1, 0, 0,
2576 (mli
->mli_flags
& MLIF_USEALLOW
));
2577 mtp
->cst
= (inm
->in6m_scq
.ifq_len
> 0);
2578 MLD_PRINTF(("%s: enqueue record = %d\n", __func__
, retval
));
2588 * If record(s) were enqueued, start the state-change
2589 * report timer for this group.
2591 inm
->in6m_scrv
= mli
->mli_rv
;
2592 inm
->in6m_sctimer
= 1;
2601 * Perform the final leave for a multicast address.
2603 * When leaving a group:
2604 * MLDv1 sends a DONE message, if and only if we are the reporter.
2605 * MLDv2 enqueues a state-change report containing a transition
2606 * to INCLUDE {} for immediate transmission.
2609 mld_final_leave(struct in6_multi
*inm
, struct mld_ifinfo
*mli
,
2610 struct mld_tparams
*mtp
)
2614 IN6M_LOCK_ASSERT_HELD(inm
);
2615 MLI_LOCK_ASSERT_NOTHELD(mli
);
2616 VERIFY(mtp
!= NULL
);
2618 MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n",
2619 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2620 (uint64_t)VM_KERNEL_ADDRPERM(inm
->in6m_ifp
),
2621 if_name(inm
->in6m_ifp
)));
2623 switch (inm
->in6m_state
) {
2624 case MLD_NOT_MEMBER
:
2625 case MLD_SILENT_MEMBER
:
2626 case MLD_LEAVING_MEMBER
:
2627 /* Already leaving or left; do nothing. */
2628 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2631 case MLD_REPORTING_MEMBER
:
2632 case MLD_IDLE_MEMBER
:
2633 case MLD_G_QUERY_PENDING_MEMBER
:
2634 case MLD_SG_QUERY_PENDING_MEMBER
:
2636 if (mli
->mli_version
== MLD_VERSION_1
) {
2637 if (inm
->in6m_state
== MLD_G_QUERY_PENDING_MEMBER
||
2638 inm
->in6m_state
== MLD_SG_QUERY_PENDING_MEMBER
) {
2639 panic("%s: MLDv2 state reached, not MLDv2 "
2640 "mode\n", __func__
);
2643 /* scheduler timer if enqueue is successful */
2644 mtp
->cst
= (mld_v1_transmit_report(inm
,
2645 MLD_LISTENER_DONE
) == 0);
2647 IN6M_LOCK_ASSERT_HELD(inm
);
2648 MLI_LOCK_ASSERT_HELD(mli
);
2650 inm
->in6m_state
= MLD_NOT_MEMBER
;
2651 } else if (mli
->mli_version
== MLD_VERSION_2
) {
2653 * Stop group timer and all pending reports.
2654 * Immediately enqueue a state-change report
2655 * TO_IN {} to be sent on the next timeout,
2656 * giving us an opportunity to merge reports.
2658 IF_DRAIN(&inm
->in6m_scq
);
2659 inm
->in6m_timer
= 0;
2660 inm
->in6m_scrv
= mli
->mli_rv
;
2661 MLD_PRINTF(("%s: Leaving %s/%s with %d "
2662 "pending retransmissions.\n", __func__
,
2663 ip6_sprintf(&inm
->in6m_addr
),
2664 if_name(inm
->in6m_ifp
),
2666 if (inm
->in6m_scrv
== 0) {
2667 inm
->in6m_state
= MLD_NOT_MEMBER
;
2668 inm
->in6m_sctimer
= 0;
2672 * Stick around in the in6_multihead list;
2673 * the final detach will be issued by
2674 * mld_v2_process_group_timers() when
2675 * the retransmit timer expires.
2677 IN6M_ADDREF_LOCKED(inm
);
2678 VERIFY(inm
->in6m_debug
& IFD_ATTACHED
);
2680 VERIFY(inm
->in6m_reqcnt
>= 1);
2681 inm
->in6m_nrelecnt
++;
2682 VERIFY(inm
->in6m_nrelecnt
!= 0);
2684 retval
= mld_v2_enqueue_group_record(
2685 &inm
->in6m_scq
, inm
, 1, 0, 0,
2686 (mli
->mli_flags
& MLIF_USEALLOW
));
2687 mtp
->cst
= (inm
->in6m_scq
.ifq_len
> 0);
2688 KASSERT(retval
!= 0,
2689 ("%s: enqueue record = %d\n", __func__
,
2692 inm
->in6m_state
= MLD_LEAVING_MEMBER
;
2693 inm
->in6m_sctimer
= 1;
2700 case MLD_LAZY_MEMBER
:
2701 case MLD_SLEEPING_MEMBER
:
2702 case MLD_AWAKENING_MEMBER
:
2703 /* Our reports are suppressed; do nothing. */
2709 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2710 ip6_sprintf(&inm
->in6m_addr
),
2711 if_name(inm
->in6m_ifp
)));
2712 inm
->in6m_st
[1].iss_fmode
= MCAST_UNDEFINED
;
2713 MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n",
2714 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(&inm
->in6m_addr
),
2715 if_name(inm
->in6m_ifp
)));
2720 * Enqueue an MLDv2 group record to the given output queue.
2722 * If is_state_change is zero, a current-state record is appended.
2723 * If is_state_change is non-zero, a state-change report is appended.
2725 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2726 * If is_group_query is zero, and if there is a packet with free space
2727 * at the tail of the queue, it will be appended to providing there
2728 * is enough free space.
2729 * Otherwise a new mbuf packet chain is allocated.
2731 * If is_source_query is non-zero, each source is checked to see if
2732 * it was recorded for a Group-Source query, and will be omitted if
2733 * it is not both in-mode and recorded.
2735 * If use_block_allow is non-zero, state change reports for initial join
2736 * and final leave, on an inclusive mode group with a source list, will be
2737 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2739 * The function will attempt to allocate leading space in the packet
2740 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2742 * If successful the size of all data appended to the queue is returned,
2743 * otherwise an error code less than zero is returned, or zero if
2744 * no record(s) were appended.
2747 mld_v2_enqueue_group_record(struct ifqueue
*ifq
, struct in6_multi
*inm
,
2748 const int is_state_change
, const int is_group_query
,
2749 const int is_source_query
, const int use_block_allow
)
2751 struct mldv2_record mr
;
2752 struct mldv2_record
*pmr
;
2754 struct ip6_msource
*ims
, *nims
;
2755 struct mbuf
*m0
, *m
, *md
;
2756 int error
, is_filter_list_change
;
2757 int minrec0len
, m0srcs
, msrcs
, nbytes
, off
;
2758 int record_has_sources
;
2763 IN6M_LOCK_ASSERT_HELD(inm
);
2764 MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
);
2767 ifp
= inm
->in6m_ifp
;
2768 is_filter_list_change
= 0;
2775 record_has_sources
= 1;
2777 type
= MLD_DO_NOTHING
;
2778 mode
= inm
->in6m_st
[1].iss_fmode
;
2781 * If we did not transition out of ASM mode during t0->t1,
2782 * and there are no source nodes to process, we can skip
2783 * the generation of source records.
2785 if (inm
->in6m_st
[0].iss_asm
> 0 && inm
->in6m_st
[1].iss_asm
> 0 &&
2786 inm
->in6m_nsrc
== 0)
2787 record_has_sources
= 0;
2789 if (is_state_change
) {
2791 * Queue a state change record.
2792 * If the mode did not change, and there are non-ASM
2793 * listeners or source filters present,
2794 * we potentially need to issue two records for the group.
2795 * If there are ASM listeners, and there was no filter
2796 * mode transition of any kind, do nothing.
2798 * If we are transitioning to MCAST_UNDEFINED, we need
2799 * not send any sources. A transition to/from this state is
2800 * considered inclusive with some special treatment.
2802 * If we are rewriting initial joins/leaves to use
2803 * ALLOW/BLOCK, and the group's membership is inclusive,
2804 * we need to send sources in all cases.
2806 if (mode
!= inm
->in6m_st
[0].iss_fmode
) {
2807 if (mode
== MCAST_EXCLUDE
) {
2808 MLD_PRINTF(("%s: change to EXCLUDE\n",
2810 type
= MLD_CHANGE_TO_EXCLUDE_MODE
;
2812 MLD_PRINTF(("%s: change to INCLUDE\n",
2814 if (use_block_allow
) {
2817 * Here we're interested in state
2818 * edges either direction between
2819 * MCAST_UNDEFINED and MCAST_INCLUDE.
2820 * Perhaps we should just check
2821 * the group state, rather than
2824 if (mode
== MCAST_UNDEFINED
) {
2825 type
= MLD_BLOCK_OLD_SOURCES
;
2827 type
= MLD_ALLOW_NEW_SOURCES
;
2830 type
= MLD_CHANGE_TO_INCLUDE_MODE
;
2831 if (mode
== MCAST_UNDEFINED
)
2832 record_has_sources
= 0;
2836 if (record_has_sources
) {
2837 is_filter_list_change
= 1;
2839 type
= MLD_DO_NOTHING
;
2844 * Queue a current state record.
2846 if (mode
== MCAST_EXCLUDE
) {
2847 type
= MLD_MODE_IS_EXCLUDE
;
2848 } else if (mode
== MCAST_INCLUDE
) {
2849 type
= MLD_MODE_IS_INCLUDE
;
2850 VERIFY(inm
->in6m_st
[1].iss_asm
== 0);
2855 * Generate the filter list changes using a separate function.
2857 if (is_filter_list_change
)
2858 return (mld_v2_enqueue_filter_change(ifq
, inm
));
2860 if (type
== MLD_DO_NOTHING
) {
2861 MLD_PRINTF(("%s: nothing to do for %s/%s\n",
2862 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2863 if_name(inm
->in6m_ifp
)));
2868 * If any sources are present, we must be able to fit at least
2869 * one in the trailing space of the tail packet's mbuf,
2872 minrec0len
= sizeof(struct mldv2_record
);
2873 if (record_has_sources
)
2874 minrec0len
+= sizeof(struct in6_addr
);
2875 MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__
,
2876 mld_rec_type_to_str(type
),
2877 ip6_sprintf(&inm
->in6m_addr
),
2878 if_name(inm
->in6m_ifp
)));
2881 * Check if we have a packet in the tail of the queue for this
2882 * group into which the first group record for this group will fit.
2883 * Otherwise allocate a new packet.
2884 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2885 * Note: Group records for G/GSR query responses MUST be sent
2886 * in their own packet.
2889 if (!is_group_query
&&
2891 (m0
->m_pkthdr
.vt_nrecs
+ 1 <= MLD_V2_REPORT_MAXRECS
) &&
2892 (m0
->m_pkthdr
.len
+ minrec0len
) <
2893 (ifp
->if_mtu
- MLD_MTUSPACE
)) {
2894 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
2895 sizeof(struct mldv2_record
)) /
2896 sizeof(struct in6_addr
);
2898 MLD_PRINTF(("%s: use existing packet\n", __func__
));
2900 if (IF_QFULL(ifq
)) {
2901 MLD_PRINTF(("%s: outbound queue full\n", __func__
));
2905 m0srcs
= (ifp
->if_mtu
- MLD_MTUSPACE
-
2906 sizeof(struct mldv2_record
)) / sizeof(struct in6_addr
);
2907 if (!is_state_change
&& !is_group_query
)
2908 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
2910 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
2914 mld_save_context(m
, ifp
);
2916 MLD_PRINTF(("%s: allocated first packet\n", __func__
));
2920 * Append group record.
2921 * If we have sources, we don't know how many yet.
2926 mr
.mr_addr
= inm
->in6m_addr
;
2927 in6_clearscope(&mr
.mr_addr
);
2928 if (!m_append(m
, sizeof(struct mldv2_record
), (void *)&mr
)) {
2931 MLD_PRINTF(("%s: m_append() failed.\n", __func__
));
2934 nbytes
+= sizeof(struct mldv2_record
);
2937 * Append as many sources as will fit in the first packet.
2938 * If we are appending to a new packet, the chain allocation
2939 * may potentially use clusters; use m_getptr() in this case.
2940 * If we are appending to an existing packet, we need to obtain
2941 * a pointer to the group record after m_append(), in case a new
2942 * mbuf was allocated.
2944 * Only append sources which are in-mode at t1. If we are
2945 * transitioning to MCAST_UNDEFINED state on the group, and
2946 * use_block_allow is zero, do not include source entries.
2947 * Otherwise, we need to include this source in the report.
2949 * Only report recorded sources in our filter set when responding
2950 * to a group-source query.
2952 if (record_has_sources
) {
2955 pmr
= (struct mldv2_record
*)(mtod(md
, uint8_t *) +
2956 md
->m_len
- nbytes
);
2958 md
= m_getptr(m
, 0, &off
);
2959 pmr
= (struct mldv2_record
*)(mtod(md
, uint8_t *) +
2963 RB_FOREACH_SAFE(ims
, ip6_msource_tree
, &inm
->in6m_srcs
,
2965 MLD_PRINTF(("%s: visit node %s\n", __func__
,
2966 ip6_sprintf(&ims
->im6s_addr
)));
2967 now
= im6s_get_mode(inm
, ims
, 1);
2968 MLD_PRINTF(("%s: node is %d\n", __func__
, now
));
2969 if ((now
!= mode
) ||
2971 (!use_block_allow
&& mode
== MCAST_UNDEFINED
))) {
2972 MLD_PRINTF(("%s: skip node\n", __func__
));
2975 if (is_source_query
&& ims
->im6s_stp
== 0) {
2976 MLD_PRINTF(("%s: skip unrecorded node\n",
2980 MLD_PRINTF(("%s: append node\n", __func__
));
2981 if (!m_append(m
, sizeof(struct in6_addr
),
2982 (void *)&ims
->im6s_addr
)) {
2985 MLD_PRINTF(("%s: m_append() failed.\n",
2989 nbytes
+= sizeof(struct in6_addr
);
2991 if (msrcs
== m0srcs
)
2994 MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__
,
2996 pmr
->mr_numsrc
= htons(msrcs
);
2997 nbytes
+= (msrcs
* sizeof(struct in6_addr
));
3000 if (is_source_query
&& msrcs
== 0) {
3001 MLD_PRINTF(("%s: no recorded sources to report\n", __func__
));
3008 * We are good to go with first packet.
3011 MLD_PRINTF(("%s: enqueueing first packet\n", __func__
));
3012 m
->m_pkthdr
.vt_nrecs
= 1;
3015 m
->m_pkthdr
.vt_nrecs
++;
3018 * No further work needed if no source list in packet(s).
3020 if (!record_has_sources
)
3024 * Whilst sources remain to be announced, we need to allocate
3025 * a new packet and fill out as many sources as will fit.
3026 * Always try for a cluster first.
3028 while (nims
!= NULL
) {
3029 if (IF_QFULL(ifq
)) {
3030 MLD_PRINTF(("%s: outbound queue full\n", __func__
));
3033 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3035 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3038 mld_save_context(m
, ifp
);
3039 md
= m_getptr(m
, 0, &off
);
3040 pmr
= (struct mldv2_record
*)(mtod(md
, uint8_t *) + off
);
3041 MLD_PRINTF(("%s: allocated next packet\n", __func__
));
3043 if (!m_append(m
, sizeof(struct mldv2_record
), (void *)&mr
)) {
3046 MLD_PRINTF(("%s: m_append() failed.\n", __func__
));
3049 m
->m_pkthdr
.vt_nrecs
= 1;
3050 nbytes
+= sizeof(struct mldv2_record
);
3052 m0srcs
= (ifp
->if_mtu
- MLD_MTUSPACE
-
3053 sizeof(struct mldv2_record
)) / sizeof(struct in6_addr
);
3056 RB_FOREACH_FROM(ims
, ip6_msource_tree
, nims
) {
3057 MLD_PRINTF(("%s: visit node %s\n",
3058 __func__
, ip6_sprintf(&ims
->im6s_addr
)));
3059 now
= im6s_get_mode(inm
, ims
, 1);
3060 if ((now
!= mode
) ||
3062 (!use_block_allow
&& mode
== MCAST_UNDEFINED
))) {
3063 MLD_PRINTF(("%s: skip node\n", __func__
));
3066 if (is_source_query
&& ims
->im6s_stp
== 0) {
3067 MLD_PRINTF(("%s: skip unrecorded node\n",
3071 MLD_PRINTF(("%s: append node\n", __func__
));
3072 if (!m_append(m
, sizeof(struct in6_addr
),
3073 (void *)&ims
->im6s_addr
)) {
3076 MLD_PRINTF(("%s: m_append() failed.\n",
3081 if (msrcs
== m0srcs
)
3084 pmr
->mr_numsrc
= htons(msrcs
);
3085 nbytes
+= (msrcs
* sizeof(struct in6_addr
));
3087 MLD_PRINTF(("%s: enqueueing next packet\n", __func__
));
3095 * Type used to mark record pass completion.
3096 * We exploit the fact we can cast to this easily from the
3097 * current filter modes on each ip_msource node.
3100 REC_NONE
= 0x00, /* MCAST_UNDEFINED */
3101 REC_ALLOW
= 0x01, /* MCAST_INCLUDE */
3102 REC_BLOCK
= 0x02, /* MCAST_EXCLUDE */
3103 REC_FULL
= REC_ALLOW
| REC_BLOCK
3107 * Enqueue an MLDv2 filter list change to the given output queue.
3109 * Source list filter state is held in an RB-tree. When the filter list
3110 * for a group is changed without changing its mode, we need to compute
3111 * the deltas between T0 and T1 for each source in the filter set,
3112 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3114 * As we may potentially queue two record types, and the entire R-B tree
3115 * needs to be walked at once, we break this out into its own function
3116 * so we can generate a tightly packed queue of packets.
3118 * XXX This could be written to only use one tree walk, although that makes
3119 * serializing into the mbuf chains a bit harder. For now we do two walks
3120 * which makes things easier on us, and it may or may not be harder on
3123 * If successful the size of all data appended to the queue is returned,
3124 * otherwise an error code less than zero is returned, or zero if
3125 * no record(s) were appended.
3128 mld_v2_enqueue_filter_change(struct ifqueue
*ifq
, struct in6_multi
*inm
)
3130 static const int MINRECLEN
=
3131 sizeof(struct mldv2_record
) + sizeof(struct in6_addr
);
3133 struct mldv2_record mr
;
3134 struct mldv2_record
*pmr
;
3135 struct ip6_msource
*ims
, *nims
;
3136 struct mbuf
*m
, *m0
, *md
;
3137 int m0srcs
, nbytes
, npbytes
, off
, rsrcs
, schanged
;
3139 uint8_t mode
, now
, then
;
3140 rectype_t crt
, drt
, nrt
;
3142 IN6M_LOCK_ASSERT_HELD(inm
);
3144 if (inm
->in6m_nsrc
== 0 ||
3145 (inm
->in6m_st
[0].iss_asm
> 0 && inm
->in6m_st
[1].iss_asm
> 0))
3148 ifp
= inm
->in6m_ifp
; /* interface */
3149 mode
= inm
->in6m_st
[1].iss_fmode
; /* filter mode at t1 */
3150 crt
= REC_NONE
; /* current group record type */
3151 drt
= REC_NONE
; /* mask of completed group record types */
3152 nrt
= REC_NONE
; /* record type for current node */
3153 m0srcs
= 0; /* # source which will fit in current mbuf chain */
3154 npbytes
= 0; /* # of bytes appended this packet */
3155 nbytes
= 0; /* # of bytes appended to group's state-change queue */
3156 rsrcs
= 0; /* # sources encoded in current record */
3157 schanged
= 0; /* # nodes encoded in overall filter change */
3158 nallow
= 0; /* # of source entries in ALLOW_NEW */
3159 nblock
= 0; /* # of source entries in BLOCK_OLD */
3160 nims
= NULL
; /* next tree node pointer */
3163 * For each possible filter record mode.
3164 * The first kind of source we encounter tells us which
3165 * is the first kind of record we start appending.
3166 * If a node transitioned to UNDEFINED at t1, its mode is treated
3167 * as the inverse of the group's filter mode.
3169 while (drt
!= REC_FULL
) {
3173 (m0
->m_pkthdr
.vt_nrecs
+ 1 <=
3174 MLD_V2_REPORT_MAXRECS
) &&
3175 (m0
->m_pkthdr
.len
+ MINRECLEN
) <
3176 (ifp
->if_mtu
- MLD_MTUSPACE
)) {
3178 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
3179 sizeof(struct mldv2_record
)) /
3180 sizeof(struct in6_addr
);
3181 MLD_PRINTF(("%s: use previous packet\n",
3184 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3186 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3188 MLD_PRINTF(("%s: m_get*() failed\n",
3192 m
->m_pkthdr
.vt_nrecs
= 0;
3193 mld_save_context(m
, ifp
);
3194 m0srcs
= (ifp
->if_mtu
- MLD_MTUSPACE
-
3195 sizeof(struct mldv2_record
)) /
3196 sizeof(struct in6_addr
);
3198 MLD_PRINTF(("%s: allocated new packet\n",
3202 * Append the MLD group record header to the
3203 * current packet's data area.
3204 * Recalculate pointer to free space for next
3205 * group record, in case m_append() allocated
3206 * a new mbuf or cluster.
3208 memset(&mr
, 0, sizeof(mr
));
3209 mr
.mr_addr
= inm
->in6m_addr
;
3210 in6_clearscope(&mr
.mr_addr
);
3211 if (!m_append(m
, sizeof(mr
), (void *)&mr
)) {
3214 MLD_PRINTF(("%s: m_append() failed\n",
3218 npbytes
+= sizeof(struct mldv2_record
);
3220 /* new packet; offset in chain */
3221 md
= m_getptr(m
, npbytes
-
3222 sizeof(struct mldv2_record
), &off
);
3223 pmr
= (struct mldv2_record
*)(mtod(md
,
3226 /* current packet; offset from last append */
3228 pmr
= (struct mldv2_record
*)(mtod(md
,
3229 uint8_t *) + md
->m_len
-
3230 sizeof(struct mldv2_record
));
3233 * Begin walking the tree for this record type
3234 * pass, or continue from where we left off
3235 * previously if we had to allocate a new packet.
3236 * Only report deltas in-mode at t1.
3237 * We need not report included sources as allowed
3238 * if we are in inclusive mode on the group,
3239 * however the converse is not true.
3243 nims
= RB_MIN(ip6_msource_tree
,
3246 RB_FOREACH_FROM(ims
, ip6_msource_tree
, nims
) {
3247 MLD_PRINTF(("%s: visit node %s\n", __func__
,
3248 ip6_sprintf(&ims
->im6s_addr
)));
3249 now
= im6s_get_mode(inm
, ims
, 1);
3250 then
= im6s_get_mode(inm
, ims
, 0);
3251 MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3252 __func__
, then
, now
));
3254 MLD_PRINTF(("%s: skip unchanged\n",
3258 if (mode
== MCAST_EXCLUDE
&&
3259 now
== MCAST_INCLUDE
) {
3260 MLD_PRINTF(("%s: skip IN src on EX "
3261 "group\n", __func__
));
3264 nrt
= (rectype_t
)now
;
3265 if (nrt
== REC_NONE
)
3266 nrt
= (rectype_t
)(~mode
& REC_FULL
);
3267 if (schanged
++ == 0) {
3269 } else if (crt
!= nrt
)
3271 if (!m_append(m
, sizeof(struct in6_addr
),
3272 (void *)&ims
->im6s_addr
)) {
3275 MLD_PRINTF(("%s: m_append() failed\n",
3279 nallow
+= !!(crt
== REC_ALLOW
);
3280 nblock
+= !!(crt
== REC_BLOCK
);
3281 if (++rsrcs
== m0srcs
)
3285 * If we did not append any tree nodes on this
3286 * pass, back out of allocations.
3289 npbytes
-= sizeof(struct mldv2_record
);
3291 MLD_PRINTF(("%s: m_free(m)\n",
3295 MLD_PRINTF(("%s: m_adj(m, -mr)\n",
3297 m_adj(m
, -((int)sizeof(
3298 struct mldv2_record
)));
3302 npbytes
+= (rsrcs
* sizeof(struct in6_addr
));
3303 if (crt
== REC_ALLOW
)
3304 pmr
->mr_type
= MLD_ALLOW_NEW_SOURCES
;
3305 else if (crt
== REC_BLOCK
)
3306 pmr
->mr_type
= MLD_BLOCK_OLD_SOURCES
;
3307 pmr
->mr_numsrc
= htons(rsrcs
);
3309 * Count the new group record, and enqueue this
3310 * packet if it wasn't already queued.
3312 m
->m_pkthdr
.vt_nrecs
++;
3316 } while (nims
!= NULL
);
3318 crt
= (~crt
& REC_FULL
);
3321 MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__
,
3328 mld_v2_merge_state_changes(struct in6_multi
*inm
, struct ifqueue
*ifscq
)
3331 struct mbuf
*m
; /* pending state-change */
3332 struct mbuf
*m0
; /* copy of pending state-change */
3333 struct mbuf
*mt
; /* last state-change in packet */
3335 int docopy
, domerge
;
3338 IN6M_LOCK_ASSERT_HELD(inm
);
3345 * If there are further pending retransmissions, make a writable
3346 * copy of each queued state-change message before merging.
3348 if (inm
->in6m_scrv
> 0)
3351 gq
= &inm
->in6m_scq
;
3353 if (gq
->ifq_head
== NULL
) {
3354 MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
3355 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(inm
)));
3360 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3361 * packet might not always be at the head of the ifqueue.
3366 * Only merge the report into the current packet if
3367 * there is sufficient space to do so; an MLDv2 report
3368 * packet may only contain 65,535 group records.
3369 * Always use a simple mbuf chain concatentation to do this,
3370 * as large state changes for single groups may have
3371 * allocated clusters.
3374 mt
= ifscq
->ifq_tail
;
3376 recslen
= m_length(m
);
3378 if ((mt
->m_pkthdr
.vt_nrecs
+
3379 m
->m_pkthdr
.vt_nrecs
<=
3380 MLD_V2_REPORT_MAXRECS
) &&
3381 (mt
->m_pkthdr
.len
+ recslen
<=
3382 (inm
->in6m_ifp
->if_mtu
- MLD_MTUSPACE
)))
3386 if (!domerge
&& IF_QFULL(gq
)) {
3387 MLD_PRINTF(("%s: outbound queue full, skipping whole "
3388 "packet 0x%llx\n", __func__
,
3389 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3400 MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__
,
3401 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3407 MLD_PRINTF(("%s: copying 0x%llx\n", __func__
,
3408 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3409 m0
= m_dup(m
, M_NOWAIT
);
3412 m0
->m_nextpkt
= NULL
;
3417 MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
3418 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m0
),
3419 (uint64_t)VM_KERNEL_ADDRPERM(ifscq
)));
3420 IF_ENQUEUE(ifscq
, m0
);
3422 struct mbuf
*mtl
; /* last mbuf of packet mt */
3424 MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
3425 "0x%llx)\n", __func__
,
3426 (uint64_t)VM_KERNEL_ADDRPERM(m0
),
3427 (uint64_t)VM_KERNEL_ADDRPERM(mt
)));
3430 m0
->m_flags
&= ~M_PKTHDR
;
3431 mt
->m_pkthdr
.len
+= recslen
;
3432 mt
->m_pkthdr
.vt_nrecs
+=
3433 m0
->m_pkthdr
.vt_nrecs
;
3443 * Respond to a pending MLDv2 General Query.
3446 mld_v2_dispatch_general_query(struct mld_ifinfo
*mli
)
3449 struct in6_multi
*inm
;
3450 struct in6_multistep step
;
3453 MLI_LOCK_ASSERT_HELD(mli
);
3455 VERIFY(mli
->mli_version
== MLD_VERSION_2
);
3460 in6_multihead_lock_shared();
3461 IN6_FIRST_MULTI(step
, inm
);
3462 while (inm
!= NULL
) {
3464 if (inm
->in6m_ifp
!= ifp
)
3467 switch (inm
->in6m_state
) {
3468 case MLD_NOT_MEMBER
:
3469 case MLD_SILENT_MEMBER
:
3471 case MLD_REPORTING_MEMBER
:
3472 case MLD_IDLE_MEMBER
:
3473 case MLD_LAZY_MEMBER
:
3474 case MLD_SLEEPING_MEMBER
:
3475 case MLD_AWAKENING_MEMBER
:
3476 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
3478 retval
= mld_v2_enqueue_group_record(&mli
->mli_gq
,
3481 MLD_PRINTF(("%s: enqueue record = %d\n",
3484 case MLD_G_QUERY_PENDING_MEMBER
:
3485 case MLD_SG_QUERY_PENDING_MEMBER
:
3486 case MLD_LEAVING_MEMBER
:
3491 IN6_NEXT_MULTI(step
, inm
);
3493 in6_multihead_lock_done();
3496 mld_dispatch_queue_locked(mli
, &mli
->mli_gq
, MLD_MAX_RESPONSE_BURST
);
3497 MLI_LOCK_ASSERT_HELD(mli
);
3500 * Slew transmission of bursts over 1 second intervals.
3502 if (mli
->mli_gq
.ifq_head
!= NULL
) {
3503 mli
->mli_v2_timer
= 1 + MLD_RANDOM_DELAY(
3504 MLD_RESPONSE_BURST_INTERVAL
);
3507 return (mli
->mli_v2_timer
);
3511 * Transmit the next pending message in the output queue.
3513 * Must not be called with in6m_lockm or mli_lock held.
3516 mld_dispatch_packet(struct mbuf
*m
)
3518 struct ip6_moptions
*im6o
;
3520 struct ifnet
*oifp
= NULL
;
3523 struct ip6_hdr
*ip6
;
3524 struct mld_hdr
*mld
;
3529 MLD_PRINTF(("%s: transmit 0x%llx\n", __func__
,
3530 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3533 * Check if the ifnet is still attached.
3535 ifp
= mld_restore_context(m
);
3536 if (ifp
== NULL
|| !ifnet_is_attached(ifp
, 0)) {
3537 MLD_PRINTF(("%s: dropped 0x%llx as ifindex %u went away.\n",
3538 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m
),
3541 ip6stat
.ip6s_noroute
++;
3545 im6o
= ip6_allocmoptions(M_WAITOK
);
3551 im6o
->im6o_multicast_hlim
= 1;
3552 im6o
->im6o_multicast_loop
= 0;
3553 im6o
->im6o_multicast_ifp
= ifp
;
3555 if (m
->m_flags
& M_MLDV1
) {
3558 m0
= mld_v2_encap_report(ifp
, m
);
3560 MLD_PRINTF(("%s: dropped 0x%llx\n", __func__
,
3561 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3563 * mld_v2_encap_report() has already freed our mbuf.
3566 ip6stat
.ip6s_odropped
++;
3571 mld_scrub_context(m0
);
3572 m
->m_flags
&= ~(M_PROTOFLAGS
);
3573 m0
->m_pkthdr
.rcvif
= lo_ifp
;
3575 ip6
= mtod(m0
, struct ip6_hdr
*);
3576 (void) in6_setscope(&ip6
->ip6_dst
, ifp
, NULL
);
3579 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3580 * so we can bump the stats.
3582 md
= m_getptr(m0
, sizeof(struct ip6_hdr
), &off
);
3583 mld
= (struct mld_hdr
*)(mtod(md
, uint8_t *) + off
);
3584 type
= mld
->mld_type
;
3586 if (ifp
->if_eflags
& IFEF_TXSTART
) {
3588 * Use control service class if the outgoing
3589 * interface supports transmit-start model.
3591 (void) m_set_service_class(m0
, MBUF_SC_CTL
);
3594 error
= ip6_output(m0
, &mld_po
, NULL
, IPV6_UNSPECSRC
, im6o
,
3600 MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__
,
3601 (uint64_t)VM_KERNEL_ADDRPERM(m0
), error
));
3603 ifnet_release(oifp
);
3607 icmp6stat
.icp6s_outhist
[type
]++;
3609 icmp6_ifstat_inc(oifp
, ifs6_out_msg
);
3611 case MLD_LISTENER_REPORT
:
3612 case MLDV2_LISTENER_REPORT
:
3613 icmp6_ifstat_inc(oifp
, ifs6_out_mldreport
);
3615 case MLD_LISTENER_DONE
:
3616 icmp6_ifstat_inc(oifp
, ifs6_out_mlddone
);
3619 ifnet_release(oifp
);
3624 * Encapsulate an MLDv2 report.
3626 * KAME IPv6 requires that hop-by-hop options be passed separately,
3627 * and that the IPv6 header be prepended in a separate mbuf.
3629 * Returns a pointer to the new mbuf chain head, or NULL if the
3630 * allocation failed.
3632 static struct mbuf
*
3633 mld_v2_encap_report(struct ifnet
*ifp
, struct mbuf
*m
)
3636 struct mldv2_report
*mld
;
3637 struct ip6_hdr
*ip6
;
3638 struct in6_ifaddr
*ia
;
3641 VERIFY(m
->m_flags
& M_PKTHDR
);
3644 * RFC3590: OK to send as :: or tentative during DAD.
3646 ia
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
|IN6_IFF_ANYCAST
);
3648 MLD_PRINTF(("%s: warning: ia is NULL\n", __func__
));
3650 MGETHDR(mh
, M_DONTWAIT
, MT_HEADER
);
3653 IFA_REMREF(&ia
->ia_ifa
);
3657 MH_ALIGN(mh
, sizeof(struct ip6_hdr
) + sizeof(struct mldv2_report
));
3659 mldreclen
= m_length(m
);
3660 MLD_PRINTF(("%s: mldreclen is %d\n", __func__
, mldreclen
));
3662 mh
->m_len
= sizeof(struct ip6_hdr
) + sizeof(struct mldv2_report
);
3663 mh
->m_pkthdr
.len
= sizeof(struct ip6_hdr
) +
3664 sizeof(struct mldv2_report
) + mldreclen
;
3666 ip6
= mtod(mh
, struct ip6_hdr
*);
3668 ip6
->ip6_vfc
&= ~IPV6_VERSION_MASK
;
3669 ip6
->ip6_vfc
|= IPV6_VERSION
;
3670 ip6
->ip6_nxt
= IPPROTO_ICMPV6
;
3672 IFA_LOCK(&ia
->ia_ifa
);
3673 ip6
->ip6_src
= ia
? ia
->ia_addr
.sin6_addr
: in6addr_any
;
3675 IFA_UNLOCK(&ia
->ia_ifa
);
3676 IFA_REMREF(&ia
->ia_ifa
);
3679 ip6
->ip6_dst
= in6addr_linklocal_allv2routers
;
3680 /* scope ID will be set in netisr */
3682 mld
= (struct mldv2_report
*)(ip6
+ 1);
3683 mld
->mld_type
= MLDV2_LISTENER_REPORT
;
3686 mld
->mld_v2_reserved
= 0;
3687 mld
->mld_v2_numrecs
= htons(m
->m_pkthdr
.vt_nrecs
);
3688 m
->m_pkthdr
.vt_nrecs
= 0;
3689 m
->m_flags
&= ~M_PKTHDR
;
3692 mld
->mld_cksum
= in6_cksum(mh
, IPPROTO_ICMPV6
,
3693 sizeof(struct ip6_hdr
), sizeof(struct mldv2_report
) + mldreclen
);
3699 mld_rec_type_to_str(const int type
)
3702 case MLD_CHANGE_TO_EXCLUDE_MODE
:
3704 case MLD_CHANGE_TO_INCLUDE_MODE
:
3706 case MLD_MODE_IS_EXCLUDE
:
3708 case MLD_MODE_IS_INCLUDE
:
3710 case MLD_ALLOW_NEW_SOURCES
:
3712 case MLD_BLOCK_OLD_SOURCES
:
3725 MLD_PRINTF(("%s: initializing\n", __func__
));
3727 /* Setup lock group and attribute for mld_mtx */
3728 mld_mtx_grp_attr
= lck_grp_attr_alloc_init();
3729 mld_mtx_grp
= lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr
);
3730 mld_mtx_attr
= lck_attr_alloc_init();
3731 lck_mtx_init(&mld_mtx
, mld_mtx_grp
, mld_mtx_attr
);
3733 ip6_initpktopts(&mld_po
);
3734 mld_po
.ip6po_hlim
= 1;
3735 mld_po
.ip6po_hbh
= &mld_ra
.hbh
;
3736 mld_po
.ip6po_prefer_tempaddr
= IP6PO_TEMPADDR_NOTPREFER
;
3737 mld_po
.ip6po_flags
= IP6PO_DONTFRAG
;
3738 LIST_INIT(&mli_head
);
3740 mli_size
= sizeof (struct mld_ifinfo
);
3741 mli_zone
= zinit(mli_size
, MLI_ZONE_MAX
* mli_size
,
3743 if (mli_zone
== NULL
) {
3744 panic("%s: failed allocating %s", __func__
, MLI_ZONE_NAME
);
3747 zone_change(mli_zone
, Z_EXPAND
, TRUE
);
3748 zone_change(mli_zone
, Z_CALLERACCT
, FALSE
);