2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 2009 Bruce Simpson.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. The name of the author may not be used to endorse or promote
40 * products derived from this software without specific prior written
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * Copyright (c) 1988 Stephen Deering.
58 * Copyright (c) 1992, 1993
59 * The Regents of the University of California. All rights reserved.
61 * This code is derived from software contributed to Berkeley by
62 * Stephen Deering of Stanford University.
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
72 * 3. All advertising materials mentioning features or use of this software
73 * must display the following acknowledgement:
74 * This product includes software developed by the University of
75 * California, Berkeley and its contributors.
76 * 4. Neither the name of the University nor the names of its contributors
77 * may be used to endorse or promote products derived from this software
78 * without specific prior written permission.
80 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
81 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
82 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
83 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
84 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
85 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
86 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
87 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
88 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
89 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
92 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
95 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
96 * support for mandatory and extensible security protections. This notice
97 * is included in support of clause 2.2 (b) of the Apple Public License,
101 #include <sys/cdefs.h>
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/socket.h>
107 #include <sys/protosw.h>
108 #include <sys/sysctl.h>
109 #include <sys/kernel.h>
110 #include <sys/malloc.h>
111 #include <sys/mcache.h>
113 #include <dev/random/randomdev.h>
115 #include <kern/zalloc.h>
118 #include <net/route.h>
120 #include <netinet/in.h>
121 #include <netinet/in_var.h>
122 #include <netinet6/in6_var.h>
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/scope6_var.h>
126 #include <netinet/icmp6.h>
127 #include <netinet6/mld6.h>
128 #include <netinet6/mld6_var.h>
130 /* Lock group and attribute for mld_mtx */
131 static lck_attr_t
*mld_mtx_attr
;
132 static lck_grp_t
*mld_mtx_grp
;
133 static lck_grp_attr_t
*mld_mtx_grp_attr
;
136 * Locking and reference counting:
138 * mld_mtx mainly protects mli_head. In cases where both mld_mtx and
139 * in6_multihead_lock must be held, the former must be acquired first in order
140 * to maintain lock ordering. It is not a requirement that mld_mtx be
141 * acquired first before in6_multihead_lock, but in case both must be acquired
142 * in succession, the correct lock ordering must be followed.
144 * Instead of walking the if_multiaddrs list at the interface and returning
145 * the ifma_protospec value of a matching entry, we search the global list
146 * of in6_multi records and find it that way; this is done with in6_multihead
147 * lock held. Doing so avoids the race condition issues that many other BSDs
148 * suffer from (therefore in our implementation, ifma_protospec will never be
149 * NULL for as long as the in6_multi is valid.)
151 * The above creates a requirement for the in6_multi to stay in in6_multihead
152 * list even after the final MLD leave (in MLDv2 mode) until no longer needs
153 * be retransmitted (this is not required for MLDv1.) In order to handle
154 * this, the request and reference counts of the in6_multi are bumped up when
155 * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
156 * handler. Each in6_multi holds a reference to the underlying mld_ifinfo.
158 * Thus, the permitted lock order is:
160 * mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
162 * Any may be taken independently, but if any are held at the same time,
163 * the above lock order must be followed.
165 static decl_lck_mtx_data(, mld_mtx
);
167 SLIST_HEAD(mld_in6m_relhead
, in6_multi
);
169 static void mli_initvar(struct mld_ifinfo
*, struct ifnet
*, int);
170 static struct mld_ifinfo
*mli_alloc(int);
171 static void mli_free(struct mld_ifinfo
*);
172 static void mli_delete(const struct ifnet
*, struct mld_in6m_relhead
*);
173 static void mld_dispatch_packet(struct mbuf
*);
174 static void mld_final_leave(struct in6_multi
*, struct mld_ifinfo
*,
175 struct mld_tparams
*);
176 static int mld_handle_state_change(struct in6_multi
*, struct mld_ifinfo
*,
177 struct mld_tparams
*);
178 static int mld_initial_join(struct in6_multi
*, struct mld_ifinfo
*,
179 struct mld_tparams
*, const int);
181 static const char * mld_rec_type_to_str(const int);
183 static uint32_t mld_set_version(struct mld_ifinfo
*, const int);
184 static void mld_flush_relq(struct mld_ifinfo
*, struct mld_in6m_relhead
*);
185 static void mld_dispatch_queue_locked(struct mld_ifinfo
*, struct ifqueue
*, int);
186 static int mld_v1_input_query(struct ifnet
*, const struct ip6_hdr
*,
187 /*const*/ struct mld_hdr
*);
188 static int mld_v1_input_report(struct ifnet
*, struct mbuf
*,
189 const struct ip6_hdr
*, /*const*/ struct mld_hdr
*);
190 static void mld_v1_process_group_timer(struct in6_multi
*, const int);
191 static void mld_v1_process_querier_timers(struct mld_ifinfo
*);
192 static int mld_v1_transmit_report(struct in6_multi
*, const int);
193 static uint32_t mld_v1_update_group(struct in6_multi
*, const int);
194 static void mld_v2_cancel_link_timers(struct mld_ifinfo
*);
195 static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo
*);
197 mld_v2_encap_report(struct ifnet
*, struct mbuf
*);
198 static int mld_v2_enqueue_filter_change(struct ifqueue
*,
200 static int mld_v2_enqueue_group_record(struct ifqueue
*,
201 struct in6_multi
*, const int, const int, const int,
203 static int mld_v2_input_query(struct ifnet
*, const struct ip6_hdr
*,
204 struct mbuf
*, const int, const int);
205 static int mld_v2_merge_state_changes(struct in6_multi
*,
207 static void mld_v2_process_group_timers(struct mld_ifinfo
*,
208 struct ifqueue
*, struct ifqueue
*,
209 struct in6_multi
*, const int);
210 static int mld_v2_process_group_query(struct in6_multi
*,
211 int, struct mbuf
*, const int);
212 static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS
;
213 static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
;
214 static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
;
216 static int mld_timeout_run
; /* MLD timer is scheduled to run */
217 static void mld_timeout(void *);
218 static void mld_sched_timeout(void);
221 * Normative references: RFC 2710, RFC 3590, RFC 3810.
223 static struct timeval mld_gsrdelay
= {.tv_sec
= 10, .tv_usec
= 0};
224 static LIST_HEAD(, mld_ifinfo
) mli_head
;
226 static int querier_present_timers_running6
;
227 static int interface_timers_running6
;
228 static int state_change_timers_running6
;
229 static int current_state_timers_running6
;
231 static unsigned int mld_mli_list_genid
;
233 * Subsystem lock macros.
236 lck_mtx_lock(&mld_mtx)
237 #define MLD_LOCK_ASSERT_HELD() \
238 LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED)
239 #define MLD_LOCK_ASSERT_NOTHELD() \
240 LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED)
241 #define MLD_UNLOCK() \
242 lck_mtx_unlock(&mld_mtx)
244 #define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \
245 SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \
248 #define MLD_REMOVE_DETACHED_IN6M(_head) { \
249 struct in6_multi *_in6m, *_inm_tmp; \
250 SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \
251 SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \
252 IN6M_REMREF(_in6m); \
254 VERIFY(SLIST_EMPTY(_head)); \
257 #define MLI_ZONE_MAX 64 /* maximum elements in zone */
258 #define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
260 static unsigned int mli_size
; /* size of zone element */
261 static struct zone
*mli_zone
; /* zone for mld_ifinfo */
263 SYSCTL_DECL(_net_inet6
); /* Note: Not in any common header. */
265 SYSCTL_NODE(_net_inet6
, OID_AUTO
, mld
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0,
266 "IPv6 Multicast Listener Discovery");
267 SYSCTL_PROC(_net_inet6_mld
, OID_AUTO
, gsrdelay
,
268 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
269 &mld_gsrdelay
.tv_sec
, 0, sysctl_mld_gsr
, "I",
270 "Rate limit for MLDv2 Group-and-Source queries in seconds");
272 SYSCTL_NODE(_net_inet6_mld
, OID_AUTO
, ifinfo
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
273 sysctl_mld_ifinfo
, "Per-interface MLDv2 state");
275 static int mld_v1enable
= 1;
276 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, v1enable
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
277 &mld_v1enable
, 0, "Enable fallback to MLDv1");
279 static int mld_v2enable
= 1;
280 SYSCTL_PROC(_net_inet6_mld
, OID_AUTO
, v2enable
,
281 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
282 &mld_v2enable
, 0, sysctl_mld_v2enable
, "I",
283 "Enable MLDv2 (debug purposes only)");
285 static int mld_use_allow
= 1;
286 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, use_allow
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
287 &mld_use_allow
, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
291 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
,
292 debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &mld_debug
, 0, "");
295 * Packed Router Alert option structure declaration.
300 struct ip6_opt_router ra
;
304 * Router Alert hop-by-hop option header.
306 static struct mld_raopt mld_ra
= {
307 .hbh
= { .ip6h_nxt
= 0, .ip6h_len
= 0 },
308 .pad
= { .ip6o_type
= IP6OPT_PADN
, .ip6o_len
= 0 },
310 .ip6or_type
= (u_int8_t
)IP6OPT_ROUTER_ALERT
,
311 .ip6or_len
= (u_int8_t
)(IP6OPT_RTALERT_LEN
- 2),
312 .ip6or_value
= {((IP6OPT_RTALERT_MLD
>> 8) & 0xFF),
313 (IP6OPT_RTALERT_MLD
& 0xFF) }
316 static struct ip6_pktopts mld_po
;
318 /* Store MLDv2 record count in the module private scratch space */
319 #define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0]
322 mld_save_context(struct mbuf
*m
, struct ifnet
*ifp
)
324 m
->m_pkthdr
.rcvif
= ifp
;
328 mld_scrub_context(struct mbuf
*m
)
330 m
->m_pkthdr
.rcvif
= NULL
;
334 * Restore context from a queued output chain.
337 static __inline
struct ifnet
*
338 mld_restore_context(struct mbuf
*m
)
340 return m
->m_pkthdr
.rcvif
;
344 * Retrieve or set threshold between group-source queries in seconds.
347 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
349 #pragma unused(arg1, arg2)
355 i
= mld_gsrdelay
.tv_sec
;
357 error
= sysctl_handle_int(oidp
, &i
, 0, req
);
358 if (error
|| !req
->newptr
) {
362 if (i
< -1 || i
>= 60) {
367 mld_gsrdelay
.tv_sec
= i
;
374 * Expose struct mld_ifinfo to userland, keyed by ifindex.
375 * For use by ifmcstat(8).
379 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
386 struct mld_ifinfo
*mli
;
387 struct mld_ifinfo_u mli_u
;
392 if (req
->newptr
!= USER_ADDR_NULL
) {
402 if (name
[0] <= 0 || name
[0] > (u_int
)if_index
) {
409 ifnet_head_lock_shared();
410 ifp
= ifindex2ifnet
[name
[0]];
416 bzero(&mli_u
, sizeof(mli_u
));
418 LIST_FOREACH(mli
, &mli_head
, mli_link
) {
420 if (ifp
!= mli
->mli_ifp
) {
425 mli_u
.mli_ifindex
= mli
->mli_ifp
->if_index
;
426 mli_u
.mli_version
= mli
->mli_version
;
427 mli_u
.mli_v1_timer
= mli
->mli_v1_timer
;
428 mli_u
.mli_v2_timer
= mli
->mli_v2_timer
;
429 mli_u
.mli_flags
= mli
->mli_flags
;
430 mli_u
.mli_rv
= mli
->mli_rv
;
431 mli_u
.mli_qi
= mli
->mli_qi
;
432 mli_u
.mli_qri
= mli
->mli_qri
;
433 mli_u
.mli_uri
= mli
->mli_uri
;
436 error
= SYSCTL_OUT(req
, &mli_u
, sizeof(mli_u
));
446 sysctl_mld_v2enable SYSCTL_HANDLER_ARGS
448 #pragma unused(arg1, arg2)
451 struct mld_ifinfo
*mli
;
452 struct mld_tparams mtp
= { .qpt
= 0, .it
= 0, .cst
= 0, .sct
= 0 };
458 error
= sysctl_handle_int(oidp
, &i
, 0, req
);
459 if (error
|| !req
->newptr
) {
463 if (i
< 0 || i
> 1) {
470 * If we enabled v2, the state transition will take care of upgrading
471 * the MLD version back to v2. Otherwise, we have to explicitly
472 * downgrade. Note that this functionality is to be used for debugging.
474 if (mld_v2enable
== 1) {
478 LIST_FOREACH(mli
, &mli_head
, mli_link
) {
480 if (mld_set_version(mli
, MLD_VERSION_1
) > 0) {
489 mld_set_timeout(&mtp
);
495 * Dispatch an entire queue of pending packet chains.
497 * Must not be called with in6m_lock held.
498 * XXX This routine unlocks MLD global lock and also mli locks.
499 * Make sure that the calling routine takes reference on the mli
500 * before calling this routine.
501 * Also if we are traversing mli_head, remember to check for
502 * mli list generation count and restart the loop if generation count
506 mld_dispatch_queue_locked(struct mld_ifinfo
*mli
, struct ifqueue
*ifq
, int limit
)
510 MLD_LOCK_ASSERT_HELD();
513 MLI_LOCK_ASSERT_HELD(mli
);
521 MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__
,
522 (uint64_t)VM_KERNEL_ADDRPERM(ifq
),
523 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
530 mld_dispatch_packet(m
);
543 MLI_LOCK_ASSERT_HELD(mli
);
548 * Filter outgoing MLD report state by group.
550 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
551 * and node-local addresses. However, kernel and socket consumers
552 * always embed the KAME scope ID in the address provided, so strip it
553 * when performing comparison.
554 * Note: This is not the same as the *multicast* scope.
556 * Return zero if the given group is one for which MLD reports
557 * should be suppressed, or non-zero if reports should be issued.
559 static __inline__
int
560 mld_is_addr_reported(const struct in6_addr
*addr
)
562 VERIFY(IN6_IS_ADDR_MULTICAST(addr
));
564 if (IPV6_ADDR_MC_SCOPE(addr
) == IPV6_ADDR_SCOPE_NODELOCAL
) {
568 if (IPV6_ADDR_MC_SCOPE(addr
) == IPV6_ADDR_SCOPE_LINKLOCAL
) {
569 struct in6_addr tmp
= *addr
;
570 in6_clearscope(&tmp
);
571 if (IN6_ARE_ADDR_EQUAL(&tmp
, &in6addr_linklocal_allnodes
)) {
580 * Attach MLD when PF_INET6 is attached to an interface.
583 mld_domifattach(struct ifnet
*ifp
, int how
)
585 struct mld_ifinfo
*mli
;
587 MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__
,
588 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
590 mli
= mli_alloc(how
);
598 mli_initvar(mli
, ifp
, 0);
599 mli
->mli_debug
|= IFD_ATTACHED
;
600 MLI_ADDREF_LOCKED(mli
); /* hold a reference for mli_head */
601 MLI_ADDREF_LOCKED(mli
); /* hold a reference for caller */
603 ifnet_lock_shared(ifp
);
604 mld6_initsilent(ifp
, mli
);
605 ifnet_lock_done(ifp
);
607 LIST_INSERT_HEAD(&mli_head
, mli
, mli_link
);
608 mld_mli_list_genid
++;
612 MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n",
613 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
619 * Attach MLD when PF_INET6 is reattached to an interface. Caller is
620 * expected to have an outstanding reference to the mli.
623 mld_domifreattach(struct mld_ifinfo
*mli
)
630 VERIFY(!(mli
->mli_debug
& IFD_ATTACHED
));
633 mli_initvar(mli
, ifp
, 1);
634 mli
->mli_debug
|= IFD_ATTACHED
;
635 MLI_ADDREF_LOCKED(mli
); /* hold a reference for mli_head */
637 ifnet_lock_shared(ifp
);
638 mld6_initsilent(ifp
, mli
);
639 ifnet_lock_done(ifp
);
641 LIST_INSERT_HEAD(&mli_head
, mli
, mli_link
);
642 mld_mli_list_genid
++;
646 MLD_PRINTF(("%s: reattached mld_ifinfo for ifp 0x%llx(%s)\n",
647 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
651 * Hook for domifdetach.
654 mld_domifdetach(struct ifnet
*ifp
)
656 SLIST_HEAD(, in6_multi
) in6m_dthead
;
658 SLIST_INIT(&in6m_dthead
);
660 MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__
,
661 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
664 mli_delete(ifp
, (struct mld_in6m_relhead
*)&in6m_dthead
);
667 /* Now that we're dropped all locks, release detached records */
668 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead
);
672 * Called at interface detach time. Note that we only flush all deferred
673 * responses and record releases; all remaining inm records and their source
674 * entries related to this interface are left intact, in order to handle
678 mli_delete(const struct ifnet
*ifp
, struct mld_in6m_relhead
*in6m_dthead
)
680 struct mld_ifinfo
*mli
, *tmli
;
682 MLD_LOCK_ASSERT_HELD();
684 LIST_FOREACH_SAFE(mli
, &mli_head
, mli_link
, tmli
) {
686 if (mli
->mli_ifp
== ifp
) {
688 * Free deferred General Query responses.
690 IF_DRAIN(&mli
->mli_gq
);
691 IF_DRAIN(&mli
->mli_v1q
);
692 mld_flush_relq(mli
, in6m_dthead
);
693 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
));
694 mli
->mli_debug
&= ~IFD_ATTACHED
;
697 LIST_REMOVE(mli
, mli_link
);
698 MLI_REMREF(mli
); /* release mli_head reference */
699 mld_mli_list_genid
++;
704 panic("%s: mld_ifinfo not found for ifp %p(%s)\n", __func__
,
708 __private_extern__
void
709 mld6_initsilent(struct ifnet
*ifp
, struct mld_ifinfo
*mli
)
711 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_OWNED
);
713 MLI_LOCK_ASSERT_NOTHELD(mli
);
715 if (!(ifp
->if_flags
& IFF_MULTICAST
) &&
716 (ifp
->if_eflags
& (IFEF_IPV6_ND6ALT
| IFEF_LOCALNET_PRIVATE
))) {
717 mli
->mli_flags
|= MLIF_SILENT
;
719 mli
->mli_flags
&= ~MLIF_SILENT
;
725 mli_initvar(struct mld_ifinfo
*mli
, struct ifnet
*ifp
, int reattach
)
727 MLI_LOCK_ASSERT_HELD(mli
);
731 mli
->mli_version
= MLD_VERSION_2
;
733 mli
->mli_version
= MLD_VERSION_1
;
736 mli
->mli_rv
= MLD_RV_INIT
;
737 mli
->mli_qi
= MLD_QI_INIT
;
738 mli
->mli_qri
= MLD_QRI_INIT
;
739 mli
->mli_uri
= MLD_URI_INIT
;
742 mli
->mli_flags
|= MLIF_USEALLOW
;
745 SLIST_INIT(&mli
->mli_relinmhead
);
749 * Responses to general queries are subject to bounds.
751 mli
->mli_gq
.ifq_maxlen
= MLD_MAX_RESPONSE_PACKETS
;
752 mli
->mli_v1q
.ifq_maxlen
= MLD_MAX_RESPONSE_PACKETS
;
755 static struct mld_ifinfo
*
758 struct mld_ifinfo
*mli
;
760 mli
= (how
== M_WAITOK
) ? zalloc(mli_zone
) : zalloc_noblock(mli_zone
);
762 bzero(mli
, mli_size
);
763 lck_mtx_init(&mli
->mli_lock
, mld_mtx_grp
, mld_mtx_attr
);
764 mli
->mli_debug
|= IFD_ALLOC
;
770 mli_free(struct mld_ifinfo
*mli
)
773 if (mli
->mli_debug
& IFD_ATTACHED
) {
774 panic("%s: attached mli=%p is being freed", __func__
, mli
);
776 } else if (mli
->mli_ifp
!= NULL
) {
777 panic("%s: ifp not NULL for mli=%p", __func__
, mli
);
779 } else if (!(mli
->mli_debug
& IFD_ALLOC
)) {
780 panic("%s: mli %p cannot be freed", __func__
, mli
);
782 } else if (mli
->mli_refcnt
!= 0) {
783 panic("%s: non-zero refcnt mli=%p", __func__
, mli
);
786 mli
->mli_debug
&= ~IFD_ALLOC
;
789 lck_mtx_destroy(&mli
->mli_lock
, mld_mtx_grp
);
790 zfree(mli_zone
, mli
);
794 mli_addref(struct mld_ifinfo
*mli
, int locked
)
799 MLI_LOCK_ASSERT_HELD(mli
);
802 if (++mli
->mli_refcnt
== 0) {
803 panic("%s: mli=%p wraparound refcnt", __func__
, mli
);
812 mli_remref(struct mld_ifinfo
*mli
)
814 SLIST_HEAD(, in6_multi
) in6m_dthead
;
819 if (mli
->mli_refcnt
== 0) {
820 panic("%s: mli=%p negative refcnt", __func__
, mli
);
825 if (mli
->mli_refcnt
> 0) {
832 IF_DRAIN(&mli
->mli_gq
);
833 IF_DRAIN(&mli
->mli_v1q
);
834 SLIST_INIT(&in6m_dthead
);
835 mld_flush_relq(mli
, (struct mld_in6m_relhead
*)&in6m_dthead
);
836 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
));
839 /* Now that we're dropped all locks, release detached records */
840 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead
);
842 MLD_PRINTF(("%s: freeing mld_ifinfo for ifp 0x%llx(%s)\n",
843 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
849 * Process a received MLDv1 general or address-specific query.
850 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
852 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
853 * mld_addr. This is OK as we own the mbuf chain.
856 mld_v1_input_query(struct ifnet
*ifp
, const struct ip6_hdr
*ip6
,
857 /*const*/ struct mld_hdr
*mld
)
859 struct mld_ifinfo
*mli
;
860 struct in6_multi
*inm
;
861 int err
= 0, is_general_query
;
863 struct mld_tparams mtp
= { .qpt
= 0, .it
= 0, .cst
= 0, .sct
= 0 };
865 MLD_LOCK_ASSERT_NOTHELD();
867 is_general_query
= 0;
870 MLD_PRINTF(("%s: ignore v1 query %s on ifp 0x%llx(%s)\n",
871 __func__
, ip6_sprintf(&mld
->mld_addr
),
872 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
877 * RFC3810 Section 6.2: MLD queries must originate from
878 * a router's link-local address.
880 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_src
)) {
881 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
882 __func__
, ip6_sprintf(&ip6
->ip6_src
),
883 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
888 * Do address field validation upfront before we accept
891 if (IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) {
893 * MLDv1 General Query.
894 * If this was not sent to the all-nodes group, ignore it.
899 in6_clearscope(&dst
);
900 if (!IN6_ARE_ADDR_EQUAL(&dst
, &in6addr_linklocal_allnodes
)) {
904 is_general_query
= 1;
907 * Embed scope ID of receiving interface in MLD query for
908 * lookup whilst we don't hold other locks.
910 (void)in6_setscope(&mld
->mld_addr
, ifp
, NULL
);
914 * Switch to MLDv1 host compatibility mode.
916 mli
= MLD_IFINFO(ifp
);
920 mtp
.qpt
= mld_set_version(mli
, MLD_VERSION_1
);
923 timer
= ntohs(mld
->mld_maxdelay
) / MLD_TIMER_SCALE
;
928 if (is_general_query
) {
929 struct in6_multistep step
;
931 MLD_PRINTF(("%s: process v1 general query on ifp 0x%llx(%s)\n",
932 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
934 * For each reporting group joined on this
935 * interface, kick the report timer.
937 in6_multihead_lock_shared();
938 IN6_FIRST_MULTI(step
, inm
);
939 while (inm
!= NULL
) {
941 if (inm
->in6m_ifp
== ifp
) {
942 mtp
.cst
+= mld_v1_update_group(inm
, timer
);
945 IN6_NEXT_MULTI(step
, inm
);
947 in6_multihead_lock_done();
950 * MLDv1 Group-Specific Query.
951 * If this is a group-specific MLDv1 query, we need only
952 * look up the single group to process it.
954 in6_multihead_lock_shared();
955 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
);
956 in6_multihead_lock_done();
960 MLD_PRINTF(("%s: process v1 query %s on "
961 "ifp 0x%llx(%s)\n", __func__
,
962 ip6_sprintf(&mld
->mld_addr
),
963 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
964 mtp
.cst
= mld_v1_update_group(inm
, timer
);
966 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
968 /* XXX Clear embedded scope ID as userland won't expect it. */
969 in6_clearscope(&mld
->mld_addr
);
972 mld_set_timeout(&mtp
);
978 * Update the report timer on a group in response to an MLDv1 query.
980 * If we are becoming the reporting member for this group, start the timer.
981 * If we already are the reporting member for this group, and timer is
982 * below the threshold, reset it.
984 * We may be updating the group for the first time since we switched
985 * to MLDv2. If we are, then we must clear any recorded source lists,
986 * and transition to REPORTING state; the group timer is overloaded
987 * for group and group-source query responses.
989 * Unlike MLDv2, the delay per group should be jittered
990 * to avoid bursts of MLDv1 reports.
993 mld_v1_update_group(struct in6_multi
*inm
, const int timer
)
995 IN6M_LOCK_ASSERT_HELD(inm
);
997 MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__
,
998 ip6_sprintf(&inm
->in6m_addr
),
999 if_name(inm
->in6m_ifp
), timer
));
1001 switch (inm
->in6m_state
) {
1002 case MLD_NOT_MEMBER
:
1003 case MLD_SILENT_MEMBER
:
1005 case MLD_REPORTING_MEMBER
:
1006 if (inm
->in6m_timer
!= 0 &&
1007 inm
->in6m_timer
<= timer
) {
1008 MLD_PRINTF(("%s: REPORTING and timer running, "
1009 "skipping.\n", __func__
));
1013 case MLD_SG_QUERY_PENDING_MEMBER
:
1014 case MLD_G_QUERY_PENDING_MEMBER
:
1015 case MLD_IDLE_MEMBER
:
1016 case MLD_LAZY_MEMBER
:
1017 case MLD_AWAKENING_MEMBER
:
1018 MLD_PRINTF(("%s: ->REPORTING\n", __func__
));
1019 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
1020 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1022 case MLD_SLEEPING_MEMBER
:
1023 MLD_PRINTF(("%s: ->AWAKENING\n", __func__
));
1024 inm
->in6m_state
= MLD_AWAKENING_MEMBER
;
1026 case MLD_LEAVING_MEMBER
:
1030 return inm
->in6m_timer
;
1034 * Process a received MLDv2 general, group-specific or
1035 * group-and-source-specific query.
1037 * Assumes that the query header has been pulled up to sizeof(mldv2_query).
1039 * Return 0 if successful, otherwise an appropriate error code is returned.
1042 mld_v2_input_query(struct ifnet
*ifp
, const struct ip6_hdr
*ip6
,
1043 struct mbuf
*m
, const int off
, const int icmp6len
)
1045 struct mld_ifinfo
*mli
;
1046 struct mldv2_query
*mld
;
1047 struct in6_multi
*inm
;
1048 uint32_t maxdelay
, nsrc
, qqi
;
1049 int err
= 0, is_general_query
;
1052 struct mld_tparams mtp
= { .qpt
= 0, .it
= 0, .cst
= 0, .sct
= 0 };
1054 MLD_LOCK_ASSERT_NOTHELD();
1056 is_general_query
= 0;
1058 if (!mld_v2enable
) {
1059 MLD_PRINTF(("%s: ignore v2 query %s on ifp 0x%llx(%s)\n",
1060 __func__
, ip6_sprintf(&ip6
->ip6_src
),
1061 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1066 * RFC3810 Section 6.2: MLD queries must originate from
1067 * a router's link-local address.
1069 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_src
)) {
1070 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
1071 __func__
, ip6_sprintf(&ip6
->ip6_src
),
1072 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1076 MLD_PRINTF(("%s: input v2 query on ifp 0x%llx(%s)\n", __func__
,
1077 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1079 mld
= (struct mldv2_query
*)(mtod(m
, uint8_t *) + off
);
1081 maxdelay
= ntohs(mld
->mld_maxdelay
); /* in 1/10ths of a second */
1082 if (maxdelay
>= 32768) {
1083 maxdelay
= (MLD_MRC_MANT(maxdelay
) | 0x1000) <<
1084 (MLD_MRC_EXP(maxdelay
) + 3);
1086 timer
= maxdelay
/ MLD_TIMER_SCALE
;
1091 qrv
= MLD_QRV(mld
->mld_misc
);
1093 MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__
,
1100 qqi
= MLD_QQIC_MANT(mld
->mld_qqi
) <<
1101 (MLD_QQIC_EXP(mld
->mld_qqi
) + 3);
1104 nsrc
= ntohs(mld
->mld_numsrc
);
1105 if (nsrc
> MLD_MAX_GS_SOURCES
) {
1109 if (icmp6len
< sizeof(struct mldv2_query
) +
1110 (nsrc
* sizeof(struct in6_addr
))) {
1116 * Do further input validation upfront to avoid resetting timers
1117 * should we need to discard this query.
1119 if (IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) {
1121 * A general query with a source list has undefined
1122 * behaviour; discard it.
1128 is_general_query
= 1;
1131 * Embed scope ID of receiving interface in MLD query for
1132 * lookup whilst we don't hold other locks (due to KAME
1133 * locking lameness). We own this mbuf chain just now.
1135 (void)in6_setscope(&mld
->mld_addr
, ifp
, NULL
);
1138 mli
= MLD_IFINFO(ifp
);
1139 VERIFY(mli
!= NULL
);
1143 * Discard the v2 query if we're in Compatibility Mode.
1144 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
1145 * until the Old Version Querier Present timer expires.
1147 if (mli
->mli_version
!= MLD_VERSION_2
) {
1152 mtp
.qpt
= mld_set_version(mli
, MLD_VERSION_2
);
1155 mli
->mli_qri
= MAX(timer
, MLD_QRI_MIN
);
1157 MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__
, mli
->mli_rv
,
1158 mli
->mli_qi
, mli
->mli_qri
));
1160 if (is_general_query
) {
1162 * MLDv2 General Query.
1164 * Schedule a current-state report on this ifp for
1165 * all groups, possibly containing source lists.
1167 * If there is a pending General Query response
1168 * scheduled earlier than the selected delay, do
1169 * not schedule any other reports.
1170 * Otherwise, reset the interface timer.
1172 MLD_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n",
1173 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1174 if (mli
->mli_v2_timer
== 0 || mli
->mli_v2_timer
>= timer
) {
1175 mtp
.it
= mli
->mli_v2_timer
= MLD_RANDOM_DELAY(timer
);
1181 * MLDv2 Group-specific or Group-and-source-specific Query.
1183 * Group-source-specific queries are throttled on
1184 * a per-group basis to defeat denial-of-service attempts.
1185 * Queries for groups we are not a member of on this
1186 * link are simply ignored.
1188 in6_multihead_lock_shared();
1189 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
);
1190 in6_multihead_lock_done();
1197 if (!ratecheck(&inm
->in6m_lastgsrtv
,
1199 MLD_PRINTF(("%s: GS query throttled.\n",
1202 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1206 MLD_PRINTF(("%s: process v2 group query on ifp 0x%llx(%s)\n",
1207 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1209 * If there is a pending General Query response
1210 * scheduled sooner than the selected delay, no
1211 * further report need be scheduled.
1212 * Otherwise, prepare to respond to the
1213 * group-specific or group-and-source query.
1216 mtp
.it
= mli
->mli_v2_timer
;
1218 if (mtp
.it
== 0 || mtp
.it
>= timer
) {
1219 (void) mld_v2_process_group_query(inm
, timer
, m
, off
);
1220 mtp
.cst
= inm
->in6m_timer
;
1223 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1224 /* XXX Clear embedded scope ID as userland won't expect it. */
1225 in6_clearscope(&mld
->mld_addr
);
1229 MLD_PRINTF(("%s: v2 general query response scheduled in "
1230 "T+%d seconds on ifp 0x%llx(%s)\n", __func__
, mtp
.it
,
1231 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1233 mld_set_timeout(&mtp
);
1239 * Process a recieved MLDv2 group-specific or group-and-source-specific
1241 * Return <0 if any error occured. Currently this is ignored.
1244 mld_v2_process_group_query(struct in6_multi
*inm
, int timer
, struct mbuf
*m0
,
1247 struct mldv2_query
*mld
;
1251 IN6M_LOCK_ASSERT_HELD(inm
);
1254 mld
= (struct mldv2_query
*)(mtod(m0
, uint8_t *) + off
);
1256 switch (inm
->in6m_state
) {
1257 case MLD_NOT_MEMBER
:
1258 case MLD_SILENT_MEMBER
:
1259 case MLD_SLEEPING_MEMBER
:
1260 case MLD_LAZY_MEMBER
:
1261 case MLD_AWAKENING_MEMBER
:
1262 case MLD_IDLE_MEMBER
:
1263 case MLD_LEAVING_MEMBER
:
1265 case MLD_REPORTING_MEMBER
:
1266 case MLD_G_QUERY_PENDING_MEMBER
:
1267 case MLD_SG_QUERY_PENDING_MEMBER
:
1271 nsrc
= ntohs(mld
->mld_numsrc
);
1274 * Deal with group-specific queries upfront.
1275 * If any group query is already pending, purge any recorded
1276 * source-list state if it exists, and schedule a query response
1277 * for this group-specific query.
1280 if (inm
->in6m_state
== MLD_G_QUERY_PENDING_MEMBER
||
1281 inm
->in6m_state
== MLD_SG_QUERY_PENDING_MEMBER
) {
1282 in6m_clear_recorded(inm
);
1283 timer
= min(inm
->in6m_timer
, timer
);
1285 inm
->in6m_state
= MLD_G_QUERY_PENDING_MEMBER
;
1286 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1291 * Deal with the case where a group-and-source-specific query has
1292 * been received but a group-specific query is already pending.
1294 if (inm
->in6m_state
== MLD_G_QUERY_PENDING_MEMBER
) {
1295 timer
= min(inm
->in6m_timer
, timer
);
1296 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1301 * Finally, deal with the case where a group-and-source-specific
1302 * query has been received, where a response to a previous g-s-r
1303 * query exists, or none exists.
1304 * In this case, we need to parse the source-list which the Querier
1305 * has provided us with and check if we have any source list filter
1306 * entries at T1 for these sources. If we do not, there is no need
1307 * schedule a report and the query may be dropped.
1308 * If we do, we must record them and schedule a current-state
1309 * report for those sources.
1311 if (inm
->in6m_nsrc
> 0) {
1318 soff
= off
+ sizeof(struct mldv2_query
);
1320 for (i
= 0; i
< nsrc
; i
++) {
1321 sp
= mtod(m
, uint8_t *) + soff
;
1322 retval
= in6m_record_source(inm
,
1323 (const struct in6_addr
*)(void *)sp
);
1327 nrecorded
+= retval
;
1328 soff
+= sizeof(struct in6_addr
);
1329 if (soff
>= m
->m_len
) {
1330 soff
= soff
- m
->m_len
;
1337 if (nrecorded
> 0) {
1338 MLD_PRINTF(("%s: schedule response to SG query\n",
1340 inm
->in6m_state
= MLD_SG_QUERY_PENDING_MEMBER
;
1341 inm
->in6m_timer
= MLD_RANDOM_DELAY(timer
);
1349 * Process a received MLDv1 host membership report.
1350 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1352 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1353 * mld_addr. This is OK as we own the mbuf chain.
1356 mld_v1_input_report(struct ifnet
*ifp
, struct mbuf
*m
,
1357 const struct ip6_hdr
*ip6
, /*const*/ struct mld_hdr
*mld
)
1359 struct in6_addr src
, dst
;
1360 struct in6_ifaddr
*ia
;
1361 struct in6_multi
*inm
;
1363 if (!mld_v1enable
) {
1364 MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n",
1365 __func__
, ip6_sprintf(&mld
->mld_addr
),
1366 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1370 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
1371 (m
->m_pkthdr
.pkt_flags
& PKTF_LOOP
)) {
1376 * MLDv1 reports must originate from a host's link-local address,
1377 * or the unspecified address (when booting).
1380 in6_clearscope(&src
);
1381 if (!IN6_IS_SCOPE_LINKLOCAL(&src
) && !IN6_IS_ADDR_UNSPECIFIED(&src
)) {
1382 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n",
1383 __func__
, ip6_sprintf(&ip6
->ip6_src
),
1384 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1389 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1390 * group, and must be directed to the group itself.
1393 in6_clearscope(&dst
);
1394 if (!IN6_IS_ADDR_MULTICAST(&mld
->mld_addr
) ||
1395 !IN6_ARE_ADDR_EQUAL(&mld
->mld_addr
, &dst
)) {
1396 MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n",
1397 __func__
, ip6_sprintf(&ip6
->ip6_dst
),
1398 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1403 * Make sure we don't hear our own membership report, as fast
1404 * leave requires knowing that we are the only member of a
1405 * group. Assume we used the link-local address if available,
1406 * otherwise look for ::.
1408 * XXX Note that scope ID comparison is needed for the address
1409 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1410 * performed for the on-wire address.
1412 ia
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
| IN6_IFF_ANYCAST
);
1414 IFA_LOCK(&ia
->ia_ifa
);
1415 if ((IN6_ARE_ADDR_EQUAL(&ip6
->ip6_src
, IA6_IN6(ia
)))) {
1416 IFA_UNLOCK(&ia
->ia_ifa
);
1417 IFA_REMREF(&ia
->ia_ifa
);
1420 IFA_UNLOCK(&ia
->ia_ifa
);
1421 IFA_REMREF(&ia
->ia_ifa
);
1422 } else if (IN6_IS_ADDR_UNSPECIFIED(&src
)) {
1426 MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n",
1427 __func__
, ip6_sprintf(&mld
->mld_addr
),
1428 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1431 * Embed scope ID of receiving interface in MLD query for lookup
1432 * whilst we don't hold other locks (due to KAME locking lameness).
1434 if (!IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) {
1435 (void)in6_setscope(&mld
->mld_addr
, ifp
, NULL
);
1439 * MLDv1 report suppression.
1440 * If we are a member of this group, and our membership should be
1441 * reported, and our group timer is pending or about to be reset,
1442 * stop our group timer by transitioning to the 'lazy' state.
1444 in6_multihead_lock_shared();
1445 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
);
1446 in6_multihead_lock_done();
1449 struct mld_ifinfo
*mli
;
1452 mli
= inm
->in6m_mli
;
1453 VERIFY(mli
!= NULL
);
1457 * If we are in MLDv2 host mode, do not allow the
1458 * other host's MLDv1 report to suppress our reports.
1460 if (mli
->mli_version
== MLD_VERSION_2
) {
1463 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1468 inm
->in6m_timer
= 0;
1470 switch (inm
->in6m_state
) {
1471 case MLD_NOT_MEMBER
:
1472 case MLD_SILENT_MEMBER
:
1473 case MLD_SLEEPING_MEMBER
:
1475 case MLD_REPORTING_MEMBER
:
1476 case MLD_IDLE_MEMBER
:
1477 case MLD_AWAKENING_MEMBER
:
1478 MLD_PRINTF(("%s: report suppressed for %s on "
1479 "ifp 0x%llx(%s)\n", __func__
,
1480 ip6_sprintf(&mld
->mld_addr
),
1481 (uint64_t)VM_KERNEL_ADDRPERM(ifp
), if_name(ifp
)));
1482 case MLD_LAZY_MEMBER
:
1483 inm
->in6m_state
= MLD_LAZY_MEMBER
;
1485 case MLD_G_QUERY_PENDING_MEMBER
:
1486 case MLD_SG_QUERY_PENDING_MEMBER
:
1487 case MLD_LEAVING_MEMBER
:
1491 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */
1495 /* XXX Clear embedded scope ID as userland won't expect it. */
1496 in6_clearscope(&mld
->mld_addr
);
1504 * Assume query messages which fit in a single ICMPv6 message header
1505 * have been pulled up.
1506 * Assume that userland will want to see the message, even if it
1507 * otherwise fails kernel input validation; do not free it.
1508 * Pullup may however free the mbuf chain m if it fails.
1510 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1513 mld_input(struct mbuf
*m
, int off
, int icmp6len
)
1516 struct ip6_hdr
*ip6
;
1517 struct mld_hdr
*mld
;
1520 MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__
,
1521 (uint64_t)VM_KERNEL_ADDRPERM(m
), off
));
1523 ifp
= m
->m_pkthdr
.rcvif
;
1525 ip6
= mtod(m
, struct ip6_hdr
*);
1527 /* Pullup to appropriate size. */
1528 mld
= (struct mld_hdr
*)(mtod(m
, uint8_t *) + off
);
1529 if (mld
->mld_type
== MLD_LISTENER_QUERY
&&
1530 icmp6len
>= sizeof(struct mldv2_query
)) {
1531 mldlen
= sizeof(struct mldv2_query
);
1533 mldlen
= sizeof(struct mld_hdr
);
1535 // check if mldv2_query/mld_hdr fits in the first mbuf
1536 IP6_EXTHDR_CHECK(m
, off
, mldlen
, return IPPROTO_DONE
);
1537 IP6_EXTHDR_GET(mld
, struct mld_hdr
*, m
, off
, mldlen
);
1539 icmp6stat
.icp6s_badlen
++;
1540 return IPPROTO_DONE
;
1544 * Userland needs to see all of this traffic for implementing
1545 * the endpoint discovery portion of multicast routing.
1547 switch (mld
->mld_type
) {
1548 case MLD_LISTENER_QUERY
:
1549 icmp6_ifstat_inc(ifp
, ifs6_in_mldquery
);
1550 if (icmp6len
== sizeof(struct mld_hdr
)) {
1551 if (mld_v1_input_query(ifp
, ip6
, mld
) != 0) {
1554 } else if (icmp6len
>= sizeof(struct mldv2_query
)) {
1555 if (mld_v2_input_query(ifp
, ip6
, m
, off
,
1561 case MLD_LISTENER_REPORT
:
1562 icmp6_ifstat_inc(ifp
, ifs6_in_mldreport
);
1563 if (mld_v1_input_report(ifp
, m
, ip6
, mld
) != 0) {
1567 case MLDV2_LISTENER_REPORT
:
1568 icmp6_ifstat_inc(ifp
, ifs6_in_mldreport
);
1570 case MLD_LISTENER_DONE
:
1571 icmp6_ifstat_inc(ifp
, ifs6_in_mlddone
);
1581 * Schedule MLD timer based on various parameters; caller must ensure that
1582 * lock ordering is maintained as this routine acquires MLD global lock.
1585 mld_set_timeout(struct mld_tparams
*mtp
)
1587 MLD_LOCK_ASSERT_NOTHELD();
1588 VERIFY(mtp
!= NULL
);
1590 if (mtp
->qpt
!= 0 || mtp
->it
!= 0 || mtp
->cst
!= 0 || mtp
->sct
!= 0) {
1592 if (mtp
->qpt
!= 0) {
1593 querier_present_timers_running6
= 1;
1596 interface_timers_running6
= 1;
1598 if (mtp
->cst
!= 0) {
1599 current_state_timers_running6
= 1;
1601 if (mtp
->sct
!= 0) {
1602 state_change_timers_running6
= 1;
1604 mld_sched_timeout();
1610 * MLD6 timer handler (per 1 second).
1613 mld_timeout(void *arg
)
1616 struct ifqueue scq
; /* State-change packets */
1617 struct ifqueue qrq
; /* Query response packets */
1619 struct mld_ifinfo
*mli
;
1620 struct in6_multi
*inm
;
1622 unsigned int genid
= mld_mli_list_genid
;
1624 SLIST_HEAD(, in6_multi
) in6m_dthead
;
1626 SLIST_INIT(&in6m_dthead
);
1629 * Update coarse-grained networking timestamp (in sec.); the idea
1630 * is to piggy-back on the timeout callout to update the counter
1631 * returnable via net_uptime().
1633 net_update_uptime();
1637 MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d\n", __func__
,
1638 querier_present_timers_running6
, interface_timers_running6
,
1639 current_state_timers_running6
, state_change_timers_running6
));
1642 * MLDv1 querier present timer processing.
1644 if (querier_present_timers_running6
) {
1645 querier_present_timers_running6
= 0;
1646 LIST_FOREACH(mli
, &mli_head
, mli_link
) {
1648 mld_v1_process_querier_timers(mli
);
1649 if (mli
->mli_v1_timer
> 0) {
1650 querier_present_timers_running6
= 1;
1657 * MLDv2 General Query response timer processing.
1659 if (interface_timers_running6
) {
1660 MLD_PRINTF(("%s: interface timers running\n", __func__
));
1661 interface_timers_running6
= 0;
1662 mli
= LIST_FIRST(&mli_head
);
1664 while (mli
!= NULL
) {
1665 if (mli
->mli_flags
& MLIF_PROCESSED
) {
1666 mli
= LIST_NEXT(mli
, mli_link
);
1671 if (mli
->mli_version
!= MLD_VERSION_2
) {
1673 mli
= LIST_NEXT(mli
, mli_link
);
1677 * XXX The logic below ends up calling
1678 * mld_dispatch_packet which can unlock mli
1679 * and the global MLD lock.
1680 * Therefore grab a reference on MLI and also
1681 * check for generation count to see if we should
1682 * iterate the list again.
1684 MLI_ADDREF_LOCKED(mli
);
1686 if (mli
->mli_v2_timer
== 0) {
1688 } else if (--mli
->mli_v2_timer
== 0) {
1689 if (mld_v2_dispatch_general_query(mli
) > 0) {
1690 interface_timers_running6
= 1;
1693 interface_timers_running6
= 1;
1695 mli
->mli_flags
|= MLIF_PROCESSED
;
1699 if (genid
!= mld_mli_list_genid
) {
1700 MLD_PRINTF(("%s: MLD information list changed "
1701 "in the middle of iteration! Restart iteration.\n",
1703 mli
= LIST_FIRST(&mli_head
);
1704 genid
= mld_mli_list_genid
;
1706 mli
= LIST_NEXT(mli
, mli_link
);
1710 LIST_FOREACH(mli
, &mli_head
, mli_link
)
1711 mli
->mli_flags
&= ~MLIF_PROCESSED
;
1716 if (!current_state_timers_running6
&&
1717 !state_change_timers_running6
) {
1721 current_state_timers_running6
= 0;
1722 state_change_timers_running6
= 0;
1724 MLD_PRINTF(("%s: state change timers running\n", __func__
));
1726 memset(&qrq
, 0, sizeof(struct ifqueue
));
1727 qrq
.ifq_maxlen
= MLD_MAX_G_GS_PACKETS
;
1729 memset(&scq
, 0, sizeof(struct ifqueue
));
1730 scq
.ifq_maxlen
= MLD_MAX_STATE_CHANGE_PACKETS
;
1733 * MLD host report and state-change timer processing.
1734 * Note: Processing a v2 group timer may remove a node.
1736 mli
= LIST_FIRST(&mli_head
);
1738 while (mli
!= NULL
) {
1739 struct in6_multistep step
;
1741 if (mli
->mli_flags
& MLIF_PROCESSED
) {
1742 mli
= LIST_NEXT(mli
, mli_link
);
1748 uri_sec
= MLD_RANDOM_DELAY(mli
->mli_uri
);
1751 in6_multihead_lock_shared();
1752 IN6_FIRST_MULTI(step
, inm
);
1753 while (inm
!= NULL
) {
1755 if (inm
->in6m_ifp
!= ifp
) {
1760 switch (mli
->mli_version
) {
1762 mld_v1_process_group_timer(inm
,
1766 mld_v2_process_group_timers(mli
, &qrq
,
1767 &scq
, inm
, uri_sec
);
1773 IN6_NEXT_MULTI(step
, inm
);
1775 in6_multihead_lock_done();
1778 * XXX The logic below ends up calling
1779 * mld_dispatch_packet which can unlock mli
1780 * and the global MLD lock.
1781 * Therefore grab a reference on MLI and also
1782 * check for generation count to see if we should
1783 * iterate the list again.
1786 MLI_ADDREF_LOCKED(mli
);
1787 if (mli
->mli_version
== MLD_VERSION_1
) {
1788 mld_dispatch_queue_locked(mli
, &mli
->mli_v1q
, 0);
1789 } else if (mli
->mli_version
== MLD_VERSION_2
) {
1791 mld_dispatch_queue_locked(NULL
, &qrq
, 0);
1792 mld_dispatch_queue_locked(NULL
, &scq
, 0);
1793 VERIFY(qrq
.ifq_len
== 0);
1794 VERIFY(scq
.ifq_len
== 0);
1798 * In case there are still any pending membership reports
1799 * which didn't get drained at version change time.
1801 IF_DRAIN(&mli
->mli_v1q
);
1803 * Release all deferred inm records, and drain any locally
1804 * enqueued packets; do it even if the current MLD version
1805 * for the link is no longer MLDv2, in order to handle the
1806 * version change case.
1808 mld_flush_relq(mli
, (struct mld_in6m_relhead
*)&in6m_dthead
);
1809 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
));
1810 mli
->mli_flags
|= MLIF_PROCESSED
;
1817 if (genid
!= mld_mli_list_genid
) {
1818 MLD_PRINTF(("%s: MLD information list changed "
1819 "in the middle of iteration! Restart iteration.\n",
1821 mli
= LIST_FIRST(&mli_head
);
1822 genid
= mld_mli_list_genid
;
1824 mli
= LIST_NEXT(mli
, mli_link
);
1828 LIST_FOREACH(mli
, &mli_head
, mli_link
)
1829 mli
->mli_flags
&= ~MLIF_PROCESSED
;
1832 /* re-arm the timer if there's work to do */
1833 mld_timeout_run
= 0;
1834 mld_sched_timeout();
1837 /* Now that we're dropped all locks, release detached records */
1838 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead
);
1842 mld_sched_timeout(void)
1844 MLD_LOCK_ASSERT_HELD();
1846 if (!mld_timeout_run
&&
1847 (querier_present_timers_running6
|| current_state_timers_running6
||
1848 interface_timers_running6
|| state_change_timers_running6
)) {
1849 mld_timeout_run
= 1;
1850 timeout(mld_timeout
, NULL
, hz
);
1855 * Free the in6_multi reference(s) for this MLD lifecycle.
1857 * Caller must be holding mli_lock.
1860 mld_flush_relq(struct mld_ifinfo
*mli
, struct mld_in6m_relhead
*in6m_dthead
)
1862 struct in6_multi
*inm
;
1865 MLI_LOCK_ASSERT_HELD(mli
);
1866 inm
= SLIST_FIRST(&mli
->mli_relinmhead
);
1870 SLIST_REMOVE_HEAD(&mli
->mli_relinmhead
, in6m_nrele
);
1873 in6_multihead_lock_exclusive();
1875 VERIFY(inm
->in6m_nrelecnt
!= 0);
1876 inm
->in6m_nrelecnt
--;
1877 lastref
= in6_multi_detach(inm
);
1878 VERIFY(!lastref
|| (!(inm
->in6m_debug
& IFD_ATTACHED
) &&
1879 inm
->in6m_reqcnt
== 0));
1881 in6_multihead_lock_done();
1882 /* from mli_relinmhead */
1884 /* from in6_multihead_list */
1887 * Defer releasing our final reference, as we
1888 * are holding the MLD lock at this point, and
1889 * we could end up with locking issues later on
1890 * (while issuing SIOCDELMULTI) when this is the
1891 * final reference count. Let the caller do it
1894 MLD_ADD_DETACHED_IN6M(in6m_dthead
, inm
);
1902 * Update host report group timer.
1903 * Will update the global pending timer flags.
1906 mld_v1_process_group_timer(struct in6_multi
*inm
, const int mld_version
)
1908 #pragma unused(mld_version)
1909 int report_timer_expired
;
1911 MLD_LOCK_ASSERT_HELD();
1912 IN6M_LOCK_ASSERT_HELD(inm
);
1913 MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
);
1915 if (inm
->in6m_timer
== 0) {
1916 report_timer_expired
= 0;
1917 } else if (--inm
->in6m_timer
== 0) {
1918 report_timer_expired
= 1;
1920 current_state_timers_running6
= 1;
1921 /* caller will schedule timer */
1925 switch (inm
->in6m_state
) {
1926 case MLD_NOT_MEMBER
:
1927 case MLD_SILENT_MEMBER
:
1928 case MLD_IDLE_MEMBER
:
1929 case MLD_LAZY_MEMBER
:
1930 case MLD_SLEEPING_MEMBER
:
1931 case MLD_AWAKENING_MEMBER
:
1933 case MLD_REPORTING_MEMBER
:
1934 if (report_timer_expired
) {
1935 inm
->in6m_state
= MLD_IDLE_MEMBER
;
1936 (void) mld_v1_transmit_report(inm
,
1937 MLD_LISTENER_REPORT
);
1938 IN6M_LOCK_ASSERT_HELD(inm
);
1939 MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
);
1942 case MLD_G_QUERY_PENDING_MEMBER
:
1943 case MLD_SG_QUERY_PENDING_MEMBER
:
1944 case MLD_LEAVING_MEMBER
:
1950 * Update a group's timers for MLDv2.
1951 * Will update the global pending timer flags.
1952 * Note: Unlocked read from mli.
1955 mld_v2_process_group_timers(struct mld_ifinfo
*mli
,
1956 struct ifqueue
*qrq
, struct ifqueue
*scq
,
1957 struct in6_multi
*inm
, const int uri_sec
)
1959 int query_response_timer_expired
;
1960 int state_change_retransmit_timer_expired
;
1962 MLD_LOCK_ASSERT_HELD();
1963 IN6M_LOCK_ASSERT_HELD(inm
);
1964 MLI_LOCK_ASSERT_HELD(mli
);
1965 VERIFY(mli
== inm
->in6m_mli
);
1967 query_response_timer_expired
= 0;
1968 state_change_retransmit_timer_expired
= 0;
1971 * During a transition from compatibility mode back to MLDv2,
1972 * a group record in REPORTING state may still have its group
1973 * timer active. This is a no-op in this function; it is easier
1974 * to deal with it here than to complicate the timeout path.
1976 if (inm
->in6m_timer
== 0) {
1977 query_response_timer_expired
= 0;
1978 } else if (--inm
->in6m_timer
== 0) {
1979 query_response_timer_expired
= 1;
1981 current_state_timers_running6
= 1;
1982 /* caller will schedule timer */
1985 if (inm
->in6m_sctimer
== 0) {
1986 state_change_retransmit_timer_expired
= 0;
1987 } else if (--inm
->in6m_sctimer
== 0) {
1988 state_change_retransmit_timer_expired
= 1;
1990 state_change_timers_running6
= 1;
1991 /* caller will schedule timer */
1994 /* We are in timer callback, so be quick about it. */
1995 if (!state_change_retransmit_timer_expired
&&
1996 !query_response_timer_expired
) {
2000 switch (inm
->in6m_state
) {
2001 case MLD_NOT_MEMBER
:
2002 case MLD_SILENT_MEMBER
:
2003 case MLD_SLEEPING_MEMBER
:
2004 case MLD_LAZY_MEMBER
:
2005 case MLD_AWAKENING_MEMBER
:
2006 case MLD_IDLE_MEMBER
:
2008 case MLD_G_QUERY_PENDING_MEMBER
:
2009 case MLD_SG_QUERY_PENDING_MEMBER
:
2011 * Respond to a previously pending Group-Specific
2012 * or Group-and-Source-Specific query by enqueueing
2013 * the appropriate Current-State report for
2014 * immediate transmission.
2016 if (query_response_timer_expired
) {
2019 retval
= mld_v2_enqueue_group_record(qrq
, inm
, 0, 1,
2020 (inm
->in6m_state
== MLD_SG_QUERY_PENDING_MEMBER
),
2022 MLD_PRINTF(("%s: enqueue record = %d\n",
2024 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
2025 in6m_clear_recorded(inm
);
2028 case MLD_REPORTING_MEMBER
:
2029 case MLD_LEAVING_MEMBER
:
2030 if (state_change_retransmit_timer_expired
) {
2032 * State-change retransmission timer fired.
2033 * If there are any further pending retransmissions,
2034 * set the global pending state-change flag, and
2037 if (--inm
->in6m_scrv
> 0) {
2038 inm
->in6m_sctimer
= uri_sec
;
2039 state_change_timers_running6
= 1;
2040 /* caller will schedule timer */
2043 * Retransmit the previously computed state-change
2044 * report. If there are no further pending
2045 * retransmissions, the mbuf queue will be consumed.
2046 * Update T0 state to T1 as we have now sent
2049 (void) mld_v2_merge_state_changes(inm
, scq
);
2052 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2053 ip6_sprintf(&inm
->in6m_addr
),
2054 if_name(inm
->in6m_ifp
)));
2057 * If we are leaving the group for good, make sure
2058 * we release MLD's reference to it.
2059 * This release must be deferred using a SLIST,
2060 * as we are called from a loop which traverses
2061 * the in_ifmultiaddr TAILQ.
2063 if (inm
->in6m_state
== MLD_LEAVING_MEMBER
&&
2064 inm
->in6m_scrv
== 0) {
2065 inm
->in6m_state
= MLD_NOT_MEMBER
;
2067 * A reference has already been held in
2068 * mld_final_leave() for this inm, so
2069 * no need to hold another one. We also
2070 * bumped up its request count then, so
2071 * that it stays in in6_multihead. Both
2072 * of them will be released when it is
2073 * dequeued later on.
2075 VERIFY(inm
->in6m_nrelecnt
!= 0);
2076 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
,
2085 * Switch to a different version on the given interface,
2086 * as per Section 9.12.
2089 mld_set_version(struct mld_ifinfo
*mli
, const int mld_version
)
2091 int old_version_timer
;
2093 MLI_LOCK_ASSERT_HELD(mli
);
2095 MLD_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__
,
2096 mld_version
, (uint64_t)VM_KERNEL_ADDRPERM(mli
->mli_ifp
),
2097 if_name(mli
->mli_ifp
)));
2099 if (mld_version
== MLD_VERSION_1
) {
2101 * Compute the "Older Version Querier Present" timer as per
2102 * Section 9.12, in seconds.
2104 old_version_timer
= (mli
->mli_rv
* mli
->mli_qi
) + mli
->mli_qri
;
2105 mli
->mli_v1_timer
= old_version_timer
;
2108 if (mli
->mli_v1_timer
> 0 && mli
->mli_version
!= MLD_VERSION_1
) {
2109 mli
->mli_version
= MLD_VERSION_1
;
2110 mld_v2_cancel_link_timers(mli
);
2113 MLI_LOCK_ASSERT_HELD(mli
);
2115 return mli
->mli_v1_timer
;
2119 * Cancel pending MLDv2 timers for the given link and all groups
2120 * joined on it; state-change, general-query, and group-query timers.
2122 * Only ever called on a transition from v2 to Compatibility mode. Kill
2123 * the timers stone dead (this may be expensive for large N groups), they
2124 * will be restarted if Compatibility Mode deems that they must be due to
2128 mld_v2_cancel_link_timers(struct mld_ifinfo
*mli
)
2131 struct in6_multi
*inm
;
2132 struct in6_multistep step
;
2134 MLI_LOCK_ASSERT_HELD(mli
);
2136 MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__
,
2137 (uint64_t)VM_KERNEL_ADDRPERM(mli
->mli_ifp
), if_name(mli
->mli_ifp
)));
2140 * Stop the v2 General Query Response on this link stone dead.
2141 * If timer is woken up due to interface_timers_running6,
2142 * the flag will be cleared if there are no pending link timers.
2144 mli
->mli_v2_timer
= 0;
2147 * Now clear the current-state and state-change report timers
2148 * for all memberships scoped to this link.
2153 in6_multihead_lock_shared();
2154 IN6_FIRST_MULTI(step
, inm
);
2155 while (inm
!= NULL
) {
2157 if (inm
->in6m_ifp
!= ifp
) {
2161 switch (inm
->in6m_state
) {
2162 case MLD_NOT_MEMBER
:
2163 case MLD_SILENT_MEMBER
:
2164 case MLD_IDLE_MEMBER
:
2165 case MLD_LAZY_MEMBER
:
2166 case MLD_SLEEPING_MEMBER
:
2167 case MLD_AWAKENING_MEMBER
:
2169 * These states are either not relevant in v2 mode,
2170 * or are unreported. Do nothing.
2173 case MLD_LEAVING_MEMBER
:
2175 * If we are leaving the group and switching
2176 * version, we need to release the final
2177 * reference held for issuing the INCLUDE {}.
2178 * During mld_final_leave(), we bumped up both the
2179 * request and reference counts. Since we cannot
2180 * call in6_multi_detach() here, defer this task to
2181 * the timer routine.
2183 VERIFY(inm
->in6m_nrelecnt
!= 0);
2185 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, inm
,
2189 case MLD_G_QUERY_PENDING_MEMBER
:
2190 case MLD_SG_QUERY_PENDING_MEMBER
:
2191 in6m_clear_recorded(inm
);
2193 case MLD_REPORTING_MEMBER
:
2194 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
2198 * Always clear state-change and group report timers.
2199 * Free any pending MLDv2 state-change records.
2201 inm
->in6m_sctimer
= 0;
2202 inm
->in6m_timer
= 0;
2203 IF_DRAIN(&inm
->in6m_scq
);
2206 IN6_NEXT_MULTI(step
, inm
);
2208 in6_multihead_lock_done();
2214 * Update the Older Version Querier Present timers for a link.
2215 * See Section 9.12 of RFC 3810.
2218 mld_v1_process_querier_timers(struct mld_ifinfo
*mli
)
2220 MLI_LOCK_ASSERT_HELD(mli
);
2222 if (mld_v2enable
&& mli
->mli_version
!= MLD_VERSION_2
&&
2223 --mli
->mli_v1_timer
== 0) {
2225 * MLDv1 Querier Present timer expired; revert to MLDv2.
2227 MLD_PRINTF(("%s: transition from v%d -> v%d on 0x%llx(%s)\n",
2228 __func__
, mli
->mli_version
, MLD_VERSION_2
,
2229 (uint64_t)VM_KERNEL_ADDRPERM(mli
->mli_ifp
),
2230 if_name(mli
->mli_ifp
)));
2231 mli
->mli_version
= MLD_VERSION_2
;
2236 * Transmit an MLDv1 report immediately.
2239 mld_v1_transmit_report(struct in6_multi
*in6m
, const int type
)
2242 struct in6_ifaddr
*ia
;
2243 struct ip6_hdr
*ip6
;
2244 struct mbuf
*mh
, *md
;
2245 struct mld_hdr
*mld
;
2248 IN6M_LOCK_ASSERT_HELD(in6m
);
2249 MLI_LOCK_ASSERT_HELD(in6m
->in6m_mli
);
2251 ifp
= in6m
->in6m_ifp
;
2252 /* ia may be NULL if link-local address is tentative. */
2253 ia
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
| IN6_IFF_ANYCAST
);
2255 MGETHDR(mh
, M_DONTWAIT
, MT_HEADER
);
2258 IFA_REMREF(&ia
->ia_ifa
);
2262 MGET(md
, M_DONTWAIT
, MT_DATA
);
2266 IFA_REMREF(&ia
->ia_ifa
);
2273 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
2274 * that ether_output() does not need to allocate another mbuf
2275 * for the header in the most common case.
2277 MH_ALIGN(mh
, sizeof(struct ip6_hdr
));
2278 mh
->m_pkthdr
.len
= sizeof(struct ip6_hdr
) + sizeof(struct mld_hdr
);
2279 mh
->m_len
= sizeof(struct ip6_hdr
);
2281 ip6
= mtod(mh
, struct ip6_hdr
*);
2283 ip6
->ip6_vfc
&= ~IPV6_VERSION_MASK
;
2284 ip6
->ip6_vfc
|= IPV6_VERSION
;
2285 ip6
->ip6_nxt
= IPPROTO_ICMPV6
;
2287 IFA_LOCK(&ia
->ia_ifa
);
2289 ip6
->ip6_src
= ia
? ia
->ia_addr
.sin6_addr
: in6addr_any
;
2291 IFA_UNLOCK(&ia
->ia_ifa
);
2292 IFA_REMREF(&ia
->ia_ifa
);
2295 ip6
->ip6_dst
= in6m
->in6m_addr
;
2297 md
->m_len
= sizeof(struct mld_hdr
);
2298 mld
= mtod(md
, struct mld_hdr
*);
2299 mld
->mld_type
= type
;
2302 mld
->mld_maxdelay
= 0;
2303 mld
->mld_reserved
= 0;
2304 mld
->mld_addr
= in6m
->in6m_addr
;
2305 in6_clearscope(&mld
->mld_addr
);
2306 mld
->mld_cksum
= in6_cksum(mh
, IPPROTO_ICMPV6
,
2307 sizeof(struct ip6_hdr
), sizeof(struct mld_hdr
));
2309 mld_save_context(mh
, ifp
);
2310 mh
->m_flags
|= M_MLDV1
;
2313 * Due to the fact that at this point we are possibly holding
2314 * in6_multihead_lock in shared or exclusive mode, we can't call
2315 * mld_dispatch_packet() here since that will eventually call
2316 * ip6_output(), which will try to lock in6_multihead_lock and cause
2318 * Instead we defer the work to the mld_timeout() thread, thus
2319 * avoiding unlocking in_multihead_lock here.
2321 if (IF_QFULL(&in6m
->in6m_mli
->mli_v1q
)) {
2322 MLD_PRINTF(("%s: v1 outbound queue full\n", __func__
));
2326 IF_ENQUEUE(&in6m
->in6m_mli
->mli_v1q
, mh
);
2334 * Process a state change from the upper layer for the given IPv6 group.
2336 * Each socket holds a reference on the in6_multi in its own ip_moptions.
2337 * The socket layer will have made the necessary updates to.the group
2338 * state, it is now up to MLD to issue a state change report if there
2339 * has been any change between T0 (when the last state-change was issued)
2342 * We use the MLDv2 state machine at group level. The MLd module
2343 * however makes the decision as to which MLD protocol version to speak.
2344 * A state change *from* INCLUDE {} always means an initial join.
2345 * A state change *to* INCLUDE {} always means a final leave.
2347 * If delay is non-zero, and the state change is an initial multicast
2348 * join, the state change report will be delayed by 'delay' ticks
2349 * in units of seconds if MLDv1 is active on the link; otherwise
2350 * the initial MLDv2 state change report will be delayed by whichever
2351 * is sooner, a pending state-change timer or delay itself.
2354 mld_change_state(struct in6_multi
*inm
, struct mld_tparams
*mtp
,
2357 struct mld_ifinfo
*mli
;
2361 VERIFY(mtp
!= NULL
);
2362 bzero(mtp
, sizeof(*mtp
));
2364 IN6M_LOCK_ASSERT_HELD(inm
);
2365 VERIFY(inm
->in6m_mli
!= NULL
);
2366 MLI_LOCK_ASSERT_NOTHELD(inm
->in6m_mli
);
2369 * Try to detect if the upper layer just asked us to change state
2370 * for an interface which has now gone away.
2372 VERIFY(inm
->in6m_ifma
!= NULL
);
2373 ifp
= inm
->in6m_ifma
->ifma_ifp
;
2375 * Sanity check that netinet6's notion of ifp is the same as net's.
2377 VERIFY(inm
->in6m_ifp
== ifp
);
2379 mli
= MLD_IFINFO(ifp
);
2380 VERIFY(mli
!= NULL
);
2383 * If we detect a state transition to or from MCAST_UNDEFINED
2384 * for this group, then we are starting or finishing an MLD
2385 * life cycle for this group.
2387 if (inm
->in6m_st
[1].iss_fmode
!= inm
->in6m_st
[0].iss_fmode
) {
2388 MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__
,
2389 inm
->in6m_st
[0].iss_fmode
, inm
->in6m_st
[1].iss_fmode
));
2390 if (inm
->in6m_st
[0].iss_fmode
== MCAST_UNDEFINED
) {
2391 MLD_PRINTF(("%s: initial join\n", __func__
));
2392 error
= mld_initial_join(inm
, mli
, mtp
, delay
);
2394 } else if (inm
->in6m_st
[1].iss_fmode
== MCAST_UNDEFINED
) {
2395 MLD_PRINTF(("%s: final leave\n", __func__
));
2396 mld_final_leave(inm
, mli
, mtp
);
2400 MLD_PRINTF(("%s: filter set change\n", __func__
));
2403 error
= mld_handle_state_change(inm
, mli
, mtp
);
2409 * Perform the initial join for an MLD group.
2411 * When joining a group:
2412 * If the group should have its MLD traffic suppressed, do nothing.
2413 * MLDv1 starts sending MLDv1 host membership reports.
2414 * MLDv2 will schedule an MLDv2 state-change report containing the
2415 * initial state of the membership.
2417 * If the delay argument is non-zero, then we must delay sending the
2418 * initial state change for delay ticks (in units of seconds).
2421 mld_initial_join(struct in6_multi
*inm
, struct mld_ifinfo
*mli
,
2422 struct mld_tparams
*mtp
, const int delay
)
2425 struct ifqueue
*ifq
;
2426 int error
, retval
, syncstates
;
2429 IN6M_LOCK_ASSERT_HELD(inm
);
2430 MLI_LOCK_ASSERT_NOTHELD(mli
);
2431 VERIFY(mtp
!= NULL
);
2433 MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n",
2434 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2435 (uint64_t)VM_KERNEL_ADDRPERM(inm
->in6m_ifp
),
2436 if_name(inm
->in6m_ifp
)));
2441 ifp
= inm
->in6m_ifp
;
2444 VERIFY(mli
->mli_ifp
== ifp
);
2447 * Avoid MLD if group is :
2448 * 1. Joined on loopback, OR
2449 * 2. On a link that is marked MLIF_SILENT
2450 * 3. rdar://problem/19227650 Is link local scoped and
2451 * on cellular interface
2452 * 4. Is a type that should not be reported (node local
2453 * or all node link local multicast.
2454 * All other groups enter the appropriate state machine
2455 * for the version in use on this link.
2457 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2458 (mli
->mli_flags
& MLIF_SILENT
) ||
2459 (IFNET_IS_CELLULAR(ifp
) &&
2460 IN6_IS_ADDR_MC_LINKLOCAL(&inm
->in6m_addr
)) ||
2461 !mld_is_addr_reported(&inm
->in6m_addr
)) {
2462 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2464 inm
->in6m_state
= MLD_SILENT_MEMBER
;
2465 inm
->in6m_timer
= 0;
2468 * Deal with overlapping in6_multi lifecycle.
2469 * If this group was LEAVING, then make sure
2470 * we drop the reference we picked up to keep the
2471 * group around for the final INCLUDE {} enqueue.
2472 * Since we cannot call in6_multi_detach() here,
2473 * defer this task to the timer routine.
2475 if (mli
->mli_version
== MLD_VERSION_2
&&
2476 inm
->in6m_state
== MLD_LEAVING_MEMBER
) {
2477 VERIFY(inm
->in6m_nrelecnt
!= 0);
2478 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, inm
,
2482 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
2484 switch (mli
->mli_version
) {
2487 * If a delay was provided, only use it if
2488 * it is greater than the delay normally
2489 * used for an MLDv1 state change report,
2490 * and delay sending the initial MLDv1 report
2491 * by not transitioning to the IDLE state.
2493 odelay
= MLD_RANDOM_DELAY(MLD_V1_MAX_RI
);
2495 inm
->in6m_timer
= max(delay
, odelay
);
2498 inm
->in6m_state
= MLD_IDLE_MEMBER
;
2499 error
= mld_v1_transmit_report(inm
,
2500 MLD_LISTENER_REPORT
);
2502 IN6M_LOCK_ASSERT_HELD(inm
);
2503 MLI_LOCK_ASSERT_HELD(mli
);
2506 inm
->in6m_timer
= odelay
;
2514 * Defer update of T0 to T1, until the first copy
2515 * of the state change has been transmitted.
2520 * Immediately enqueue a State-Change Report for
2521 * this interface, freeing any previous reports.
2522 * Don't kick the timers if there is nothing to do,
2523 * or if an error occurred.
2525 ifq
= &inm
->in6m_scq
;
2527 retval
= mld_v2_enqueue_group_record(ifq
, inm
, 1,
2528 0, 0, (mli
->mli_flags
& MLIF_USEALLOW
));
2529 mtp
->cst
= (ifq
->ifq_len
> 0);
2530 MLD_PRINTF(("%s: enqueue record = %d\n",
2533 error
= retval
* -1;
2538 * Schedule transmission of pending state-change
2539 * report up to RV times for this link. The timer
2540 * will fire at the next mld_timeout (1 second)),
2541 * giving us an opportunity to merge the reports.
2543 * If a delay was provided to this function, only
2544 * use this delay if sooner than the existing one.
2546 VERIFY(mli
->mli_rv
> 1);
2547 inm
->in6m_scrv
= mli
->mli_rv
;
2549 if (inm
->in6m_sctimer
> 1) {
2551 min(inm
->in6m_sctimer
, delay
);
2553 inm
->in6m_sctimer
= delay
;
2556 inm
->in6m_sctimer
= 1;
2566 * Only update the T0 state if state change is atomic,
2567 * i.e. we don't need to wait for a timer to fire before we
2568 * can consider the state change to have been communicated.
2572 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2573 ip6_sprintf(&inm
->in6m_addr
),
2574 if_name(inm
->in6m_ifp
)));
2581 * Issue an intermediate state change during the life-cycle.
2584 mld_handle_state_change(struct in6_multi
*inm
, struct mld_ifinfo
*mli
,
2585 struct mld_tparams
*mtp
)
2590 IN6M_LOCK_ASSERT_HELD(inm
);
2591 MLI_LOCK_ASSERT_NOTHELD(mli
);
2592 VERIFY(mtp
!= NULL
);
2594 MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n",
2595 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2596 (uint64_t)VM_KERNEL_ADDRPERM(inm
->in6m_ifp
),
2597 if_name(inm
->in6m_ifp
)));
2599 ifp
= inm
->in6m_ifp
;
2602 VERIFY(mli
->mli_ifp
== ifp
);
2604 if ((ifp
->if_flags
& IFF_LOOPBACK
) ||
2605 (mli
->mli_flags
& MLIF_SILENT
) ||
2606 !mld_is_addr_reported(&inm
->in6m_addr
) ||
2607 (mli
->mli_version
!= MLD_VERSION_2
)) {
2609 if (!mld_is_addr_reported(&inm
->in6m_addr
)) {
2610 MLD_PRINTF(("%s: not kicking state machine for silent "
2611 "group\n", __func__
));
2613 MLD_PRINTF(("%s: nothing to do\n", __func__
));
2615 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2616 ip6_sprintf(&inm
->in6m_addr
),
2617 if_name(inm
->in6m_ifp
)));
2621 IF_DRAIN(&inm
->in6m_scq
);
2623 retval
= mld_v2_enqueue_group_record(&inm
->in6m_scq
, inm
, 1, 0, 0,
2624 (mli
->mli_flags
& MLIF_USEALLOW
));
2625 mtp
->cst
= (inm
->in6m_scq
.ifq_len
> 0);
2626 MLD_PRINTF(("%s: enqueue record = %d\n", __func__
, retval
));
2636 * If record(s) were enqueued, start the state-change
2637 * report timer for this group.
2639 inm
->in6m_scrv
= mli
->mli_rv
;
2640 inm
->in6m_sctimer
= 1;
2649 * Perform the final leave for a multicast address.
2651 * When leaving a group:
2652 * MLDv1 sends a DONE message, if and only if we are the reporter.
2653 * MLDv2 enqueues a state-change report containing a transition
2654 * to INCLUDE {} for immediate transmission.
2657 mld_final_leave(struct in6_multi
*inm
, struct mld_ifinfo
*mli
,
2658 struct mld_tparams
*mtp
)
2662 IN6M_LOCK_ASSERT_HELD(inm
);
2663 MLI_LOCK_ASSERT_NOTHELD(mli
);
2664 VERIFY(mtp
!= NULL
);
2666 MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n",
2667 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2668 (uint64_t)VM_KERNEL_ADDRPERM(inm
->in6m_ifp
),
2669 if_name(inm
->in6m_ifp
)));
2671 switch (inm
->in6m_state
) {
2672 case MLD_NOT_MEMBER
:
2673 case MLD_SILENT_MEMBER
:
2674 case MLD_LEAVING_MEMBER
:
2675 /* Already leaving or left; do nothing. */
2676 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2679 case MLD_REPORTING_MEMBER
:
2680 case MLD_IDLE_MEMBER
:
2681 case MLD_G_QUERY_PENDING_MEMBER
:
2682 case MLD_SG_QUERY_PENDING_MEMBER
:
2684 if (mli
->mli_version
== MLD_VERSION_1
) {
2685 if (inm
->in6m_state
== MLD_G_QUERY_PENDING_MEMBER
||
2686 inm
->in6m_state
== MLD_SG_QUERY_PENDING_MEMBER
) {
2687 panic("%s: MLDv2 state reached, not MLDv2 "
2688 "mode\n", __func__
);
2691 /* scheduler timer if enqueue is successful */
2692 mtp
->cst
= (mld_v1_transmit_report(inm
,
2693 MLD_LISTENER_DONE
) == 0);
2695 IN6M_LOCK_ASSERT_HELD(inm
);
2696 MLI_LOCK_ASSERT_HELD(mli
);
2698 inm
->in6m_state
= MLD_NOT_MEMBER
;
2699 } else if (mli
->mli_version
== MLD_VERSION_2
) {
2701 * Stop group timer and all pending reports.
2702 * Immediately enqueue a state-change report
2703 * TO_IN {} to be sent on the next timeout,
2704 * giving us an opportunity to merge reports.
2706 IF_DRAIN(&inm
->in6m_scq
);
2707 inm
->in6m_timer
= 0;
2708 inm
->in6m_scrv
= mli
->mli_rv
;
2709 MLD_PRINTF(("%s: Leaving %s/%s with %d "
2710 "pending retransmissions.\n", __func__
,
2711 ip6_sprintf(&inm
->in6m_addr
),
2712 if_name(inm
->in6m_ifp
),
2714 if (inm
->in6m_scrv
== 0) {
2715 inm
->in6m_state
= MLD_NOT_MEMBER
;
2716 inm
->in6m_sctimer
= 0;
2720 * Stick around in the in6_multihead list;
2721 * the final detach will be issued by
2722 * mld_v2_process_group_timers() when
2723 * the retransmit timer expires.
2725 IN6M_ADDREF_LOCKED(inm
);
2726 VERIFY(inm
->in6m_debug
& IFD_ATTACHED
);
2728 VERIFY(inm
->in6m_reqcnt
>= 1);
2729 inm
->in6m_nrelecnt
++;
2730 VERIFY(inm
->in6m_nrelecnt
!= 0);
2732 retval
= mld_v2_enqueue_group_record(
2733 &inm
->in6m_scq
, inm
, 1, 0, 0,
2734 (mli
->mli_flags
& MLIF_USEALLOW
));
2735 mtp
->cst
= (inm
->in6m_scq
.ifq_len
> 0);
2736 KASSERT(retval
!= 0,
2737 ("%s: enqueue record = %d\n", __func__
,
2740 inm
->in6m_state
= MLD_LEAVING_MEMBER
;
2741 inm
->in6m_sctimer
= 1;
2748 case MLD_LAZY_MEMBER
:
2749 case MLD_SLEEPING_MEMBER
:
2750 case MLD_AWAKENING_MEMBER
:
2751 /* Our reports are suppressed; do nothing. */
2757 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__
,
2758 ip6_sprintf(&inm
->in6m_addr
),
2759 if_name(inm
->in6m_ifp
)));
2760 inm
->in6m_st
[1].iss_fmode
= MCAST_UNDEFINED
;
2761 MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n",
2762 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(&inm
->in6m_addr
),
2763 if_name(inm
->in6m_ifp
)));
2768 * Enqueue an MLDv2 group record to the given output queue.
2770 * If is_state_change is zero, a current-state record is appended.
2771 * If is_state_change is non-zero, a state-change report is appended.
2773 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2774 * If is_group_query is zero, and if there is a packet with free space
2775 * at the tail of the queue, it will be appended to providing there
2776 * is enough free space.
2777 * Otherwise a new mbuf packet chain is allocated.
2779 * If is_source_query is non-zero, each source is checked to see if
2780 * it was recorded for a Group-Source query, and will be omitted if
2781 * it is not both in-mode and recorded.
2783 * If use_block_allow is non-zero, state change reports for initial join
2784 * and final leave, on an inclusive mode group with a source list, will be
2785 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2787 * The function will attempt to allocate leading space in the packet
2788 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2790 * If successful the size of all data appended to the queue is returned,
2791 * otherwise an error code less than zero is returned, or zero if
2792 * no record(s) were appended.
2795 mld_v2_enqueue_group_record(struct ifqueue
*ifq
, struct in6_multi
*inm
,
2796 const int is_state_change
, const int is_group_query
,
2797 const int is_source_query
, const int use_block_allow
)
2799 struct mldv2_record mr
;
2800 struct mldv2_record
*pmr
;
2802 struct ip6_msource
*ims
, *nims
;
2803 struct mbuf
*m0
, *m
, *md
;
2804 int error
, is_filter_list_change
;
2805 int minrec0len
, m0srcs
, msrcs
, nbytes
, off
;
2806 int record_has_sources
;
2811 IN6M_LOCK_ASSERT_HELD(inm
);
2812 MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
);
2815 ifp
= inm
->in6m_ifp
;
2816 is_filter_list_change
= 0;
2823 record_has_sources
= 1;
2825 type
= MLD_DO_NOTHING
;
2826 mode
= inm
->in6m_st
[1].iss_fmode
;
2829 * If we did not transition out of ASM mode during t0->t1,
2830 * and there are no source nodes to process, we can skip
2831 * the generation of source records.
2833 if (inm
->in6m_st
[0].iss_asm
> 0 && inm
->in6m_st
[1].iss_asm
> 0 &&
2834 inm
->in6m_nsrc
== 0) {
2835 record_has_sources
= 0;
2838 if (is_state_change
) {
2840 * Queue a state change record.
2841 * If the mode did not change, and there are non-ASM
2842 * listeners or source filters present,
2843 * we potentially need to issue two records for the group.
2844 * If there are ASM listeners, and there was no filter
2845 * mode transition of any kind, do nothing.
2847 * If we are transitioning to MCAST_UNDEFINED, we need
2848 * not send any sources. A transition to/from this state is
2849 * considered inclusive with some special treatment.
2851 * If we are rewriting initial joins/leaves to use
2852 * ALLOW/BLOCK, and the group's membership is inclusive,
2853 * we need to send sources in all cases.
2855 if (mode
!= inm
->in6m_st
[0].iss_fmode
) {
2856 if (mode
== MCAST_EXCLUDE
) {
2857 MLD_PRINTF(("%s: change to EXCLUDE\n",
2859 type
= MLD_CHANGE_TO_EXCLUDE_MODE
;
2861 MLD_PRINTF(("%s: change to INCLUDE\n",
2863 if (use_block_allow
) {
2866 * Here we're interested in state
2867 * edges either direction between
2868 * MCAST_UNDEFINED and MCAST_INCLUDE.
2869 * Perhaps we should just check
2870 * the group state, rather than
2873 if (mode
== MCAST_UNDEFINED
) {
2874 type
= MLD_BLOCK_OLD_SOURCES
;
2876 type
= MLD_ALLOW_NEW_SOURCES
;
2879 type
= MLD_CHANGE_TO_INCLUDE_MODE
;
2880 if (mode
== MCAST_UNDEFINED
) {
2881 record_has_sources
= 0;
2886 if (record_has_sources
) {
2887 is_filter_list_change
= 1;
2889 type
= MLD_DO_NOTHING
;
2894 * Queue a current state record.
2896 if (mode
== MCAST_EXCLUDE
) {
2897 type
= MLD_MODE_IS_EXCLUDE
;
2898 } else if (mode
== MCAST_INCLUDE
) {
2899 type
= MLD_MODE_IS_INCLUDE
;
2900 VERIFY(inm
->in6m_st
[1].iss_asm
== 0);
2905 * Generate the filter list changes using a separate function.
2907 if (is_filter_list_change
) {
2908 return mld_v2_enqueue_filter_change(ifq
, inm
);
2911 if (type
== MLD_DO_NOTHING
) {
2912 MLD_PRINTF(("%s: nothing to do for %s/%s\n",
2913 __func__
, ip6_sprintf(&inm
->in6m_addr
),
2914 if_name(inm
->in6m_ifp
)));
2919 * If any sources are present, we must be able to fit at least
2920 * one in the trailing space of the tail packet's mbuf,
2923 minrec0len
= sizeof(struct mldv2_record
);
2924 if (record_has_sources
) {
2925 minrec0len
+= sizeof(struct in6_addr
);
2927 MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__
,
2928 mld_rec_type_to_str(type
),
2929 ip6_sprintf(&inm
->in6m_addr
),
2930 if_name(inm
->in6m_ifp
)));
2933 * Check if we have a packet in the tail of the queue for this
2934 * group into which the first group record for this group will fit.
2935 * Otherwise allocate a new packet.
2936 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2937 * Note: Group records for G/GSR query responses MUST be sent
2938 * in their own packet.
2941 if (!is_group_query
&&
2943 (m0
->m_pkthdr
.vt_nrecs
+ 1 <= MLD_V2_REPORT_MAXRECS
) &&
2944 (m0
->m_pkthdr
.len
+ minrec0len
) <
2945 (ifp
->if_mtu
- MLD_MTUSPACE
)) {
2946 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
2947 sizeof(struct mldv2_record
)) /
2948 sizeof(struct in6_addr
);
2950 MLD_PRINTF(("%s: use existing packet\n", __func__
));
2952 if (IF_QFULL(ifq
)) {
2953 MLD_PRINTF(("%s: outbound queue full\n", __func__
));
2957 m0srcs
= (ifp
->if_mtu
- MLD_MTUSPACE
-
2958 sizeof(struct mldv2_record
)) / sizeof(struct in6_addr
);
2959 if (!is_state_change
&& !is_group_query
) {
2960 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
2963 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
2969 mld_save_context(m
, ifp
);
2971 MLD_PRINTF(("%s: allocated first packet\n", __func__
));
2975 * Append group record.
2976 * If we have sources, we don't know how many yet.
2981 mr
.mr_addr
= inm
->in6m_addr
;
2982 in6_clearscope(&mr
.mr_addr
);
2983 if (!m_append(m
, sizeof(struct mldv2_record
), (void *)&mr
)) {
2987 MLD_PRINTF(("%s: m_append() failed.\n", __func__
));
2990 nbytes
+= sizeof(struct mldv2_record
);
2993 * Append as many sources as will fit in the first packet.
2994 * If we are appending to a new packet, the chain allocation
2995 * may potentially use clusters; use m_getptr() in this case.
2996 * If we are appending to an existing packet, we need to obtain
2997 * a pointer to the group record after m_append(), in case a new
2998 * mbuf was allocated.
3000 * Only append sources which are in-mode at t1. If we are
3001 * transitioning to MCAST_UNDEFINED state on the group, and
3002 * use_block_allow is zero, do not include source entries.
3003 * Otherwise, we need to include this source in the report.
3005 * Only report recorded sources in our filter set when responding
3006 * to a group-source query.
3008 if (record_has_sources
) {
3011 pmr
= (struct mldv2_record
*)(mtod(md
, uint8_t *) +
3012 md
->m_len
- nbytes
);
3014 md
= m_getptr(m
, 0, &off
);
3015 pmr
= (struct mldv2_record
*)(mtod(md
, uint8_t *) +
3019 RB_FOREACH_SAFE(ims
, ip6_msource_tree
, &inm
->in6m_srcs
,
3021 MLD_PRINTF(("%s: visit node %s\n", __func__
,
3022 ip6_sprintf(&ims
->im6s_addr
)));
3023 now
= im6s_get_mode(inm
, ims
, 1);
3024 MLD_PRINTF(("%s: node is %d\n", __func__
, now
));
3025 if ((now
!= mode
) ||
3027 (!use_block_allow
&& mode
== MCAST_UNDEFINED
))) {
3028 MLD_PRINTF(("%s: skip node\n", __func__
));
3031 if (is_source_query
&& ims
->im6s_stp
== 0) {
3032 MLD_PRINTF(("%s: skip unrecorded node\n",
3036 MLD_PRINTF(("%s: append node\n", __func__
));
3037 if (!m_append(m
, sizeof(struct in6_addr
),
3038 (void *)&ims
->im6s_addr
)) {
3042 MLD_PRINTF(("%s: m_append() failed.\n",
3046 nbytes
+= sizeof(struct in6_addr
);
3048 if (msrcs
== m0srcs
) {
3052 MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__
,
3054 pmr
->mr_numsrc
= htons(msrcs
);
3055 nbytes
+= (msrcs
* sizeof(struct in6_addr
));
3058 if (is_source_query
&& msrcs
== 0) {
3059 MLD_PRINTF(("%s: no recorded sources to report\n", __func__
));
3067 * We are good to go with first packet.
3070 MLD_PRINTF(("%s: enqueueing first packet\n", __func__
));
3071 m
->m_pkthdr
.vt_nrecs
= 1;
3074 m
->m_pkthdr
.vt_nrecs
++;
3077 * No further work needed if no source list in packet(s).
3079 if (!record_has_sources
) {
3084 * Whilst sources remain to be announced, we need to allocate
3085 * a new packet and fill out as many sources as will fit.
3086 * Always try for a cluster first.
3088 while (nims
!= NULL
) {
3089 if (IF_QFULL(ifq
)) {
3090 MLD_PRINTF(("%s: outbound queue full\n", __func__
));
3093 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3095 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3100 mld_save_context(m
, ifp
);
3101 md
= m_getptr(m
, 0, &off
);
3102 pmr
= (struct mldv2_record
*)(mtod(md
, uint8_t *) + off
);
3103 MLD_PRINTF(("%s: allocated next packet\n", __func__
));
3105 if (!m_append(m
, sizeof(struct mldv2_record
), (void *)&mr
)) {
3109 MLD_PRINTF(("%s: m_append() failed.\n", __func__
));
3112 m
->m_pkthdr
.vt_nrecs
= 1;
3113 nbytes
+= sizeof(struct mldv2_record
);
3115 m0srcs
= (ifp
->if_mtu
- MLD_MTUSPACE
-
3116 sizeof(struct mldv2_record
)) / sizeof(struct in6_addr
);
3119 RB_FOREACH_FROM(ims
, ip6_msource_tree
, nims
) {
3120 MLD_PRINTF(("%s: visit node %s\n",
3121 __func__
, ip6_sprintf(&ims
->im6s_addr
)));
3122 now
= im6s_get_mode(inm
, ims
, 1);
3123 if ((now
!= mode
) ||
3125 (!use_block_allow
&& mode
== MCAST_UNDEFINED
))) {
3126 MLD_PRINTF(("%s: skip node\n", __func__
));
3129 if (is_source_query
&& ims
->im6s_stp
== 0) {
3130 MLD_PRINTF(("%s: skip unrecorded node\n",
3134 MLD_PRINTF(("%s: append node\n", __func__
));
3135 if (!m_append(m
, sizeof(struct in6_addr
),
3136 (void *)&ims
->im6s_addr
)) {
3140 MLD_PRINTF(("%s: m_append() failed.\n",
3145 if (msrcs
== m0srcs
) {
3149 pmr
->mr_numsrc
= htons(msrcs
);
3150 nbytes
+= (msrcs
* sizeof(struct in6_addr
));
3152 MLD_PRINTF(("%s: enqueueing next packet\n", __func__
));
3160 * Type used to mark record pass completion.
3161 * We exploit the fact we can cast to this easily from the
3162 * current filter modes on each ip_msource node.
3165 REC_NONE
= 0x00, /* MCAST_UNDEFINED */
3166 REC_ALLOW
= 0x01, /* MCAST_INCLUDE */
3167 REC_BLOCK
= 0x02, /* MCAST_EXCLUDE */
3168 REC_FULL
= REC_ALLOW
| REC_BLOCK
3172 * Enqueue an MLDv2 filter list change to the given output queue.
3174 * Source list filter state is held in an RB-tree. When the filter list
3175 * for a group is changed without changing its mode, we need to compute
3176 * the deltas between T0 and T1 for each source in the filter set,
3177 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3179 * As we may potentially queue two record types, and the entire R-B tree
3180 * needs to be walked at once, we break this out into its own function
3181 * so we can generate a tightly packed queue of packets.
3183 * XXX This could be written to only use one tree walk, although that makes
3184 * serializing into the mbuf chains a bit harder. For now we do two walks
3185 * which makes things easier on us, and it may or may not be harder on
3188 * If successful the size of all data appended to the queue is returned,
3189 * otherwise an error code less than zero is returned, or zero if
3190 * no record(s) were appended.
3193 mld_v2_enqueue_filter_change(struct ifqueue
*ifq
, struct in6_multi
*inm
)
3195 static const int MINRECLEN
=
3196 sizeof(struct mldv2_record
) + sizeof(struct in6_addr
);
3198 struct mldv2_record mr
;
3199 struct mldv2_record
*pmr
;
3200 struct ip6_msource
*ims
, *nims
;
3201 struct mbuf
*m
, *m0
, *md
;
3202 int m0srcs
, nbytes
, npbytes
, off
, rsrcs
, schanged
;
3204 uint8_t mode
, now
, then
;
3205 rectype_t crt
, drt
, nrt
;
3207 IN6M_LOCK_ASSERT_HELD(inm
);
3209 if (inm
->in6m_nsrc
== 0 ||
3210 (inm
->in6m_st
[0].iss_asm
> 0 && inm
->in6m_st
[1].iss_asm
> 0)) {
3214 ifp
= inm
->in6m_ifp
; /* interface */
3215 mode
= inm
->in6m_st
[1].iss_fmode
; /* filter mode at t1 */
3216 crt
= REC_NONE
; /* current group record type */
3217 drt
= REC_NONE
; /* mask of completed group record types */
3218 nrt
= REC_NONE
; /* record type for current node */
3219 m0srcs
= 0; /* # source which will fit in current mbuf chain */
3220 npbytes
= 0; /* # of bytes appended this packet */
3221 nbytes
= 0; /* # of bytes appended to group's state-change queue */
3222 rsrcs
= 0; /* # sources encoded in current record */
3223 schanged
= 0; /* # nodes encoded in overall filter change */
3224 nallow
= 0; /* # of source entries in ALLOW_NEW */
3225 nblock
= 0; /* # of source entries in BLOCK_OLD */
3226 nims
= NULL
; /* next tree node pointer */
3229 * For each possible filter record mode.
3230 * The first kind of source we encounter tells us which
3231 * is the first kind of record we start appending.
3232 * If a node transitioned to UNDEFINED at t1, its mode is treated
3233 * as the inverse of the group's filter mode.
3235 while (drt
!= REC_FULL
) {
3239 (m0
->m_pkthdr
.vt_nrecs
+ 1 <=
3240 MLD_V2_REPORT_MAXRECS
) &&
3241 (m0
->m_pkthdr
.len
+ MINRECLEN
) <
3242 (ifp
->if_mtu
- MLD_MTUSPACE
)) {
3244 m0srcs
= (ifp
->if_mtu
- m0
->m_pkthdr
.len
-
3245 sizeof(struct mldv2_record
)) /
3246 sizeof(struct in6_addr
);
3247 MLD_PRINTF(("%s: use previous packet\n",
3250 m
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
);
3252 m
= m_gethdr(M_DONTWAIT
, MT_DATA
);
3255 MLD_PRINTF(("%s: m_get*() failed\n",
3259 m
->m_pkthdr
.vt_nrecs
= 0;
3260 mld_save_context(m
, ifp
);
3261 m0srcs
= (ifp
->if_mtu
- MLD_MTUSPACE
-
3262 sizeof(struct mldv2_record
)) /
3263 sizeof(struct in6_addr
);
3265 MLD_PRINTF(("%s: allocated new packet\n",
3269 * Append the MLD group record header to the
3270 * current packet's data area.
3271 * Recalculate pointer to free space for next
3272 * group record, in case m_append() allocated
3273 * a new mbuf or cluster.
3275 memset(&mr
, 0, sizeof(mr
));
3276 mr
.mr_addr
= inm
->in6m_addr
;
3277 in6_clearscope(&mr
.mr_addr
);
3278 if (!m_append(m
, sizeof(mr
), (void *)&mr
)) {
3282 MLD_PRINTF(("%s: m_append() failed\n",
3286 npbytes
+= sizeof(struct mldv2_record
);
3288 /* new packet; offset in chain */
3289 md
= m_getptr(m
, npbytes
-
3290 sizeof(struct mldv2_record
), &off
);
3291 pmr
= (struct mldv2_record
*)(mtod(md
,
3294 /* current packet; offset from last append */
3296 pmr
= (struct mldv2_record
*)(mtod(md
,
3297 uint8_t *) + md
->m_len
-
3298 sizeof(struct mldv2_record
));
3301 * Begin walking the tree for this record type
3302 * pass, or continue from where we left off
3303 * previously if we had to allocate a new packet.
3304 * Only report deltas in-mode at t1.
3305 * We need not report included sources as allowed
3306 * if we are in inclusive mode on the group,
3307 * however the converse is not true.
3311 nims
= RB_MIN(ip6_msource_tree
,
3314 RB_FOREACH_FROM(ims
, ip6_msource_tree
, nims
) {
3315 MLD_PRINTF(("%s: visit node %s\n", __func__
,
3316 ip6_sprintf(&ims
->im6s_addr
)));
3317 now
= im6s_get_mode(inm
, ims
, 1);
3318 then
= im6s_get_mode(inm
, ims
, 0);
3319 MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n",
3320 __func__
, then
, now
));
3322 MLD_PRINTF(("%s: skip unchanged\n",
3326 if (mode
== MCAST_EXCLUDE
&&
3327 now
== MCAST_INCLUDE
) {
3328 MLD_PRINTF(("%s: skip IN src on EX "
3329 "group\n", __func__
));
3332 nrt
= (rectype_t
)now
;
3333 if (nrt
== REC_NONE
) {
3334 nrt
= (rectype_t
)(~mode
& REC_FULL
);
3336 if (schanged
++ == 0) {
3338 } else if (crt
!= nrt
) {
3341 if (!m_append(m
, sizeof(struct in6_addr
),
3342 (void *)&ims
->im6s_addr
)) {
3346 MLD_PRINTF(("%s: m_append() failed\n",
3350 nallow
+= !!(crt
== REC_ALLOW
);
3351 nblock
+= !!(crt
== REC_BLOCK
);
3352 if (++rsrcs
== m0srcs
) {
3357 * If we did not append any tree nodes on this
3358 * pass, back out of allocations.
3361 npbytes
-= sizeof(struct mldv2_record
);
3363 MLD_PRINTF(("%s: m_free(m)\n",
3367 MLD_PRINTF(("%s: m_adj(m, -mr)\n",
3369 m_adj(m
, -((int)sizeof(
3370 struct mldv2_record
)));
3374 npbytes
+= (rsrcs
* sizeof(struct in6_addr
));
3375 if (crt
== REC_ALLOW
) {
3376 pmr
->mr_type
= MLD_ALLOW_NEW_SOURCES
;
3377 } else if (crt
== REC_BLOCK
) {
3378 pmr
->mr_type
= MLD_BLOCK_OLD_SOURCES
;
3380 pmr
->mr_numsrc
= htons(rsrcs
);
3382 * Count the new group record, and enqueue this
3383 * packet if it wasn't already queued.
3385 m
->m_pkthdr
.vt_nrecs
++;
3390 } while (nims
!= NULL
);
3392 crt
= (~crt
& REC_FULL
);
3395 MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__
,
3402 mld_v2_merge_state_changes(struct in6_multi
*inm
, struct ifqueue
*ifscq
)
3405 struct mbuf
*m
; /* pending state-change */
3406 struct mbuf
*m0
; /* copy of pending state-change */
3407 struct mbuf
*mt
; /* last state-change in packet */
3409 int docopy
, domerge
;
3412 IN6M_LOCK_ASSERT_HELD(inm
);
3419 * If there are further pending retransmissions, make a writable
3420 * copy of each queued state-change message before merging.
3422 if (inm
->in6m_scrv
> 0) {
3426 gq
= &inm
->in6m_scq
;
3428 if (gq
->ifq_head
== NULL
) {
3429 MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n",
3430 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(inm
)));
3435 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3436 * packet might not always be at the head of the ifqueue.
3441 * Only merge the report into the current packet if
3442 * there is sufficient space to do so; an MLDv2 report
3443 * packet may only contain 65,535 group records.
3444 * Always use a simple mbuf chain concatentation to do this,
3445 * as large state changes for single groups may have
3446 * allocated clusters.
3449 mt
= ifscq
->ifq_tail
;
3451 recslen
= m_length(m
);
3453 if ((mt
->m_pkthdr
.vt_nrecs
+
3454 m
->m_pkthdr
.vt_nrecs
<=
3455 MLD_V2_REPORT_MAXRECS
) &&
3456 (mt
->m_pkthdr
.len
+ recslen
<=
3457 (inm
->in6m_ifp
->if_mtu
- MLD_MTUSPACE
))) {
3462 if (!domerge
&& IF_QFULL(gq
)) {
3463 MLD_PRINTF(("%s: outbound queue full, skipping whole "
3464 "packet 0x%llx\n", __func__
,
3465 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3476 MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__
,
3477 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3483 MLD_PRINTF(("%s: copying 0x%llx\n", __func__
,
3484 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3485 m0
= m_dup(m
, M_NOWAIT
);
3489 m0
->m_nextpkt
= NULL
;
3494 MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n",
3495 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m0
),
3496 (uint64_t)VM_KERNEL_ADDRPERM(ifscq
)));
3497 IF_ENQUEUE(ifscq
, m0
);
3499 struct mbuf
*mtl
; /* last mbuf of packet mt */
3501 MLD_PRINTF(("%s: merging 0x%llx with ifscq tail "
3502 "0x%llx)\n", __func__
,
3503 (uint64_t)VM_KERNEL_ADDRPERM(m0
),
3504 (uint64_t)VM_KERNEL_ADDRPERM(mt
)));
3507 m0
->m_flags
&= ~M_PKTHDR
;
3508 mt
->m_pkthdr
.len
+= recslen
;
3509 mt
->m_pkthdr
.vt_nrecs
+=
3510 m0
->m_pkthdr
.vt_nrecs
;
3520 * Respond to a pending MLDv2 General Query.
3523 mld_v2_dispatch_general_query(struct mld_ifinfo
*mli
)
3526 struct in6_multi
*inm
;
3527 struct in6_multistep step
;
3530 MLI_LOCK_ASSERT_HELD(mli
);
3532 VERIFY(mli
->mli_version
== MLD_VERSION_2
);
3537 in6_multihead_lock_shared();
3538 IN6_FIRST_MULTI(step
, inm
);
3539 while (inm
!= NULL
) {
3541 if (inm
->in6m_ifp
!= ifp
) {
3545 switch (inm
->in6m_state
) {
3546 case MLD_NOT_MEMBER
:
3547 case MLD_SILENT_MEMBER
:
3549 case MLD_REPORTING_MEMBER
:
3550 case MLD_IDLE_MEMBER
:
3551 case MLD_LAZY_MEMBER
:
3552 case MLD_SLEEPING_MEMBER
:
3553 case MLD_AWAKENING_MEMBER
:
3554 inm
->in6m_state
= MLD_REPORTING_MEMBER
;
3556 retval
= mld_v2_enqueue_group_record(&mli
->mli_gq
,
3559 MLD_PRINTF(("%s: enqueue record = %d\n",
3562 case MLD_G_QUERY_PENDING_MEMBER
:
3563 case MLD_SG_QUERY_PENDING_MEMBER
:
3564 case MLD_LEAVING_MEMBER
:
3569 IN6_NEXT_MULTI(step
, inm
);
3571 in6_multihead_lock_done();
3574 mld_dispatch_queue_locked(mli
, &mli
->mli_gq
, MLD_MAX_RESPONSE_BURST
);
3575 MLI_LOCK_ASSERT_HELD(mli
);
3578 * Slew transmission of bursts over 1 second intervals.
3580 if (mli
->mli_gq
.ifq_head
!= NULL
) {
3581 mli
->mli_v2_timer
= 1 + MLD_RANDOM_DELAY(
3582 MLD_RESPONSE_BURST_INTERVAL
);
3585 return mli
->mli_v2_timer
;
3589 * Transmit the next pending message in the output queue.
3591 * Must not be called with in6m_lockm or mli_lock held.
3594 mld_dispatch_packet(struct mbuf
*m
)
3596 struct ip6_moptions
*im6o
;
3598 struct ifnet
*oifp
= NULL
;
3601 struct ip6_hdr
*ip6
;
3602 struct mld_hdr
*mld
;
3607 MLD_PRINTF(("%s: transmit 0x%llx\n", __func__
,
3608 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3611 * Check if the ifnet is still attached.
3613 ifp
= mld_restore_context(m
);
3614 if (ifp
== NULL
|| !ifnet_is_attached(ifp
, 0)) {
3615 MLD_PRINTF(("%s: dropped 0x%llx as ifindex %u went away.\n",
3616 __func__
, (uint64_t)VM_KERNEL_ADDRPERM(m
),
3619 ip6stat
.ip6s_noroute
++;
3623 im6o
= ip6_allocmoptions(M_WAITOK
);
3629 im6o
->im6o_multicast_hlim
= 1;
3630 im6o
->im6o_multicast_loop
= 0;
3631 im6o
->im6o_multicast_ifp
= ifp
;
3633 if (m
->m_flags
& M_MLDV1
) {
3636 m0
= mld_v2_encap_report(ifp
, m
);
3638 MLD_PRINTF(("%s: dropped 0x%llx\n", __func__
,
3639 (uint64_t)VM_KERNEL_ADDRPERM(m
)));
3641 * mld_v2_encap_report() has already freed our mbuf.
3644 ip6stat
.ip6s_odropped
++;
3649 mld_scrub_context(m0
);
3650 m
->m_flags
&= ~(M_PROTOFLAGS
);
3651 m0
->m_pkthdr
.rcvif
= lo_ifp
;
3653 ip6
= mtod(m0
, struct ip6_hdr
*);
3654 (void)in6_setscope(&ip6
->ip6_dst
, ifp
, NULL
);
3657 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3658 * so we can bump the stats.
3660 md
= m_getptr(m0
, sizeof(struct ip6_hdr
), &off
);
3661 mld
= (struct mld_hdr
*)(mtod(md
, uint8_t *) + off
);
3662 type
= mld
->mld_type
;
3664 if (ifp
->if_eflags
& IFEF_TXSTART
) {
3666 * Use control service class if the outgoing
3667 * interface supports transmit-start model.
3669 (void) m_set_service_class(m0
, MBUF_SC_CTL
);
3672 error
= ip6_output(m0
, &mld_po
, NULL
, IPV6_UNSPECSRC
, im6o
,
3678 MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__
,
3679 (uint64_t)VM_KERNEL_ADDRPERM(m0
), error
));
3681 ifnet_release(oifp
);
3686 icmp6stat
.icp6s_outhist
[type
]++;
3688 icmp6_ifstat_inc(oifp
, ifs6_out_msg
);
3690 case MLD_LISTENER_REPORT
:
3691 case MLDV2_LISTENER_REPORT
:
3692 icmp6_ifstat_inc(oifp
, ifs6_out_mldreport
);
3694 case MLD_LISTENER_DONE
:
3695 icmp6_ifstat_inc(oifp
, ifs6_out_mlddone
);
3698 ifnet_release(oifp
);
3703 * Encapsulate an MLDv2 report.
3705 * KAME IPv6 requires that hop-by-hop options be passed separately,
3706 * and that the IPv6 header be prepended in a separate mbuf.
3708 * Returns a pointer to the new mbuf chain head, or NULL if the
3709 * allocation failed.
3711 static struct mbuf
*
3712 mld_v2_encap_report(struct ifnet
*ifp
, struct mbuf
*m
)
3715 struct mldv2_report
*mld
;
3716 struct ip6_hdr
*ip6
;
3717 struct in6_ifaddr
*ia
;
3720 VERIFY(m
->m_flags
& M_PKTHDR
);
3723 * RFC3590: OK to send as :: or tentative during DAD.
3725 ia
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
| IN6_IFF_ANYCAST
);
3727 MLD_PRINTF(("%s: warning: ia is NULL\n", __func__
));
3730 MGETHDR(mh
, M_DONTWAIT
, MT_HEADER
);
3733 IFA_REMREF(&ia
->ia_ifa
);
3738 MH_ALIGN(mh
, sizeof(struct ip6_hdr
) + sizeof(struct mldv2_report
));
3740 mldreclen
= m_length(m
);
3741 MLD_PRINTF(("%s: mldreclen is %d\n", __func__
, mldreclen
));
3743 mh
->m_len
= sizeof(struct ip6_hdr
) + sizeof(struct mldv2_report
);
3744 mh
->m_pkthdr
.len
= sizeof(struct ip6_hdr
) +
3745 sizeof(struct mldv2_report
) + mldreclen
;
3747 ip6
= mtod(mh
, struct ip6_hdr
*);
3749 ip6
->ip6_vfc
&= ~IPV6_VERSION_MASK
;
3750 ip6
->ip6_vfc
|= IPV6_VERSION
;
3751 ip6
->ip6_nxt
= IPPROTO_ICMPV6
;
3753 IFA_LOCK(&ia
->ia_ifa
);
3755 ip6
->ip6_src
= ia
? ia
->ia_addr
.sin6_addr
: in6addr_any
;
3757 IFA_UNLOCK(&ia
->ia_ifa
);
3758 IFA_REMREF(&ia
->ia_ifa
);
3761 ip6
->ip6_dst
= in6addr_linklocal_allv2routers
;
3762 /* scope ID will be set in netisr */
3764 mld
= (struct mldv2_report
*)(ip6
+ 1);
3765 mld
->mld_type
= MLDV2_LISTENER_REPORT
;
3768 mld
->mld_v2_reserved
= 0;
3769 mld
->mld_v2_numrecs
= htons(m
->m_pkthdr
.vt_nrecs
);
3770 m
->m_pkthdr
.vt_nrecs
= 0;
3771 m
->m_flags
&= ~M_PKTHDR
;
3774 mld
->mld_cksum
= in6_cksum(mh
, IPPROTO_ICMPV6
,
3775 sizeof(struct ip6_hdr
), sizeof(struct mldv2_report
) + mldreclen
);
3781 mld_rec_type_to_str(const int type
)
3784 case MLD_CHANGE_TO_EXCLUDE_MODE
:
3786 case MLD_CHANGE_TO_INCLUDE_MODE
:
3788 case MLD_MODE_IS_EXCLUDE
:
3790 case MLD_MODE_IS_INCLUDE
:
3792 case MLD_ALLOW_NEW_SOURCES
:
3794 case MLD_BLOCK_OLD_SOURCES
:
3806 MLD_PRINTF(("%s: initializing\n", __func__
));
3808 /* Setup lock group and attribute for mld_mtx */
3809 mld_mtx_grp_attr
= lck_grp_attr_alloc_init();
3810 mld_mtx_grp
= lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr
);
3811 mld_mtx_attr
= lck_attr_alloc_init();
3812 lck_mtx_init(&mld_mtx
, mld_mtx_grp
, mld_mtx_attr
);
3814 ip6_initpktopts(&mld_po
);
3815 mld_po
.ip6po_hlim
= 1;
3816 mld_po
.ip6po_hbh
= &mld_ra
.hbh
;
3817 mld_po
.ip6po_prefer_tempaddr
= IP6PO_TEMPADDR_NOTPREFER
;
3818 mld_po
.ip6po_flags
= IP6PO_DONTFRAG
;
3819 LIST_INIT(&mli_head
);
3821 mli_size
= sizeof(struct mld_ifinfo
);
3822 mli_zone
= zinit(mli_size
, MLI_ZONE_MAX
* mli_size
,
3824 if (mli_zone
== NULL
) {
3825 panic("%s: failed allocating %s", __func__
, MLI_ZONE_NAME
);
3828 zone_change(mli_zone
, Z_EXPAND
, TRUE
);
3829 zone_change(mli_zone
, Z_CALLERACCT
, FALSE
);