]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/mld6.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / bsd / netinet6 / mld6.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 2009 Bruce Simpson.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. The name of the author may not be used to endorse or promote
40 * products derived from this software without specific prior written
41 * permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 /*
57 * Copyright (c) 1988 Stephen Deering.
58 * Copyright (c) 1992, 1993
59 * The Regents of the University of California. All rights reserved.
60 *
61 * This code is derived from software contributed to Berkeley by
62 * Stephen Deering of Stanford University.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 * 1. Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in the
71 * documentation and/or other materials provided with the distribution.
72 * 3. All advertising materials mentioning features or use of this software
73 * must display the following acknowledgement:
74 * This product includes software developed by the University of
75 * California, Berkeley and its contributors.
76 * 4. Neither the name of the University nor the names of its contributors
77 * may be used to endorse or promote products derived from this software
78 * without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
81 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
82 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
83 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
84 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
85 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
86 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
87 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
88 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
89 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
90 * SUCH DAMAGE.
91 *
92 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
93 */
94 /*
95 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
96 * support for mandatory and extensible security protections. This notice
97 * is included in support of clause 2.2 (b) of the Apple Public License,
98 * Version 2.0.
99 */
100
101 #include <sys/cdefs.h>
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/socket.h>
107 #include <sys/protosw.h>
108 #include <sys/sysctl.h>
109 #include <sys/kernel.h>
110 #include <sys/malloc.h>
111 #include <sys/mcache.h>
112
113 #include <kern/zalloc.h>
114
115 #include <net/if.h>
116 #include <net/route.h>
117
118 #include <netinet/in.h>
119 #include <netinet/in_var.h>
120 #include <netinet6/in6_var.h>
121 #include <netinet/ip6.h>
122 #include <netinet6/ip6_var.h>
123 #include <netinet6/scope6_var.h>
124 #include <netinet/icmp6.h>
125 #include <netinet6/mld6.h>
126 #include <netinet6/mld6_var.h>
127
128 /* Lock group and attribute for mld6_mtx */
129 static lck_attr_t *mld_mtx_attr;
130 static lck_grp_t *mld_mtx_grp;
131 static lck_grp_attr_t *mld_mtx_grp_attr;
132
133 /*
134 * Locking and reference counting:
135 *
136 * mld_mtx mainly protects mli_head. In cases where both mld_mtx and
137 * in6_multihead_lock must be held, the former must be acquired first in order
138 * to maintain lock ordering. It is not a requirement that mld_mtx be
139 * acquired first before in6_multihead_lock, but in case both must be acquired
140 * in succession, the correct lock ordering must be followed.
141 *
142 * Instead of walking the if_multiaddrs list at the interface and returning
143 * the ifma_protospec value of a matching entry, we search the global list
144 * of in6_multi records and find it that way; this is done with in6_multihead
145 * lock held. Doing so avoids the race condition issues that many other BSDs
146 * suffer from (therefore in our implementation, ifma_protospec will never be
147 * NULL for as long as the in6_multi is valid.)
148 *
149 * The above creates a requirement for the in6_multi to stay in in6_multihead
150 * list even after the final MLD leave (in MLDv2 mode) until no longer needs
151 * be retransmitted (this is not required for MLDv1.) In order to handle
152 * this, the request and reference counts of the in6_multi are bumped up when
153 * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout
154 * handler. Each in6_multi holds a reference to the underlying mld_ifinfo.
155 *
156 * Thus, the permitted lock oder is:
157 *
158 * mld_mtx, in6_multihead_lock, inm6_lock, mli_lock
159 *
160 * Any may be taken independently, but if any are held at the same time,
161 * the above lock order must be followed.
162 */
163 static decl_lck_mtx_data(, mld_mtx);
164
165 static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int);
166 static struct mld_ifinfo *mli_alloc(int);
167 static void mli_free(struct mld_ifinfo *);
168 static void mli_delete(const struct ifnet *);
169 static void mld_dispatch_packet(struct mbuf *);
170 static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *);
171 static int mld_handle_state_change(struct in6_multi *,
172 struct mld_ifinfo *);
173 static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
174 const int);
175 #ifdef MLD_DEBUG
176 static const char * mld_rec_type_to_str(const int);
177 #endif
178 static void mld_set_version(struct mld_ifinfo *, const int);
179 static void mld_flush_relq(struct mld_ifinfo *);
180 static void mld_dispatch_queue(struct mld_ifinfo *, struct ifqueue *, int);
181 static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
182 /*const*/ struct mld_hdr *);
183 static int mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
184 /*const*/ struct mld_hdr *);
185 static void mld_v1_process_group_timer(struct in6_multi *, const int);
186 static void mld_v1_process_querier_timers(struct mld_ifinfo *);
187 static int mld_v1_transmit_report(struct in6_multi *, const int);
188 static void mld_v1_update_group(struct in6_multi *, const int);
189 static void mld_v2_cancel_link_timers(struct mld_ifinfo *);
190 static void mld_v2_dispatch_general_query(struct mld_ifinfo *);
191 static struct mbuf *
192 mld_v2_encap_report(struct ifnet *, struct mbuf *);
193 static int mld_v2_enqueue_filter_change(struct ifqueue *,
194 struct in6_multi *);
195 static int mld_v2_enqueue_group_record(struct ifqueue *,
196 struct in6_multi *, const int, const int, const int,
197 const int);
198 static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
199 struct mbuf *, const int, const int);
200 static int mld_v2_merge_state_changes(struct in6_multi *,
201 struct ifqueue *);
202 static void mld_v2_process_group_timers(struct mld_ifinfo *,
203 struct ifqueue *, struct ifqueue *,
204 struct in6_multi *, const int);
205 static int mld_v2_process_group_query(struct in6_multi *,
206 int, struct mbuf *, const int);
207 static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS;
208 static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS;
209
210 /*
211 * Normative references: RFC 2710, RFC 3590, RFC 3810.
212 *
213 * XXX LOR PREVENTION
214 * A special case for IPv6 is the in6_setscope() routine. ip6_output()
215 * will not accept an ifp; it wants an embedded scope ID, unlike
216 * ip_output(), which happily takes the ifp given to it. The embedded
217 * scope ID is only used by MLD to select the outgoing interface.
218 *
219 * As such, we exploit the fact that the scope ID is just the interface
220 * index, and embed it in the IPv6 destination address accordingly.
221 * This is potentially NOT VALID for MLDv1 reports, as they
222 * are always sent to the multicast group itself; as MLDv2
223 * reports are always sent to ff02::16, this is not an issue
224 * when MLDv2 is in use.
225 */
226
227 #define MLD_EMBEDSCOPE(pin6, zoneid) \
228 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF)
229
230 static struct timeval mld_gsrdelay = {10, 0};
231 static LIST_HEAD(, mld_ifinfo) mli_head;
232
233 static int interface_timers_running6;
234 static int state_change_timers_running6;
235 static int current_state_timers_running6;
236
237 static decl_lck_mtx_data(, mld6_mtx);
238
239 #define MLD_LOCK() \
240 lck_mtx_lock(&mld6_mtx)
241 #define MLD_LOCK_ASSERT_HELD() \
242 lck_mtx_assert(&mld6_mtx, LCK_MTX_ASSERT_OWNED)
243 #define MLD_LOCK_ASSERT_NOTHELD() \
244 lck_mtx_assert(&mld6_mtx, LCK_MTX_ASSERT_NOTOWNED)
245 #define MLD_UNLOCK() \
246 lck_mtx_unlock(&mld6_mtx)
247
248 #define MLI_ZONE_MAX 64 /* maximum elements in zone */
249 #define MLI_ZONE_NAME "mld_ifinfo" /* zone name */
250
251 static unsigned int mli_size; /* size of zone element */
252 static struct zone *mli_zone; /* zone for mld_ifinfo */
253
254 SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */
255
256 SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0,
257 "IPv6 Multicast Listener Discovery");
258 SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
259 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
260 &mld_gsrdelay.tv_sec, 0, sysctl_mld_gsr, "I",
261 "Rate limit for MLDv2 Group-and-Source queries in seconds");
262
263 SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED,
264 sysctl_mld_ifinfo, "Per-interface MLDv2 state");
265
266 static int mld_v1enable = 1;
267 SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED,
268 &mld_v1enable, 0, "Enable fallback to MLDv1");
269
270 static int mld_use_allow = 1;
271 SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED,
272 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
273
274 #ifdef MLD_DEBUG
275 int mld_debug = 0;
276 SYSCTL_INT(_net_inet6_mld, OID_AUTO,
277 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, "");
278 #endif
279 /*
280 * Packed Router Alert option structure declaration.
281 */
282 struct mld_raopt {
283 struct ip6_hbh hbh;
284 struct ip6_opt pad;
285 struct ip6_opt_router ra;
286 } __packed;
287
288 /*
289 * Router Alert hop-by-hop option header.
290 */
291 static struct mld_raopt mld_ra = {
292 .hbh = { 0, 0 },
293 .pad = { .ip6o_type = IP6OPT_PADN, 0 },
294 .ra = {
295 .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT,
296 .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2),
297 .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
298 (IP6OPT_RTALERT_MLD & 0xFF) }
299 }
300 };
301 static struct ip6_pktopts mld_po;
302
303 /*
304 * Retrieve or set threshold between group-source queries in seconds.
305 */
306 static int
307 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
308 {
309 #pragma unused(arg1, arg2)
310 int error;
311 int i;
312
313 MLD_LOCK();
314
315 i = mld_gsrdelay.tv_sec;
316
317 error = sysctl_handle_int(oidp, &i, 0, req);
318 if (error || !req->newptr)
319 goto out_locked;
320
321 if (i < -1 || i >= 60) {
322 error = EINVAL;
323 goto out_locked;
324 }
325
326 mld_gsrdelay.tv_sec = i;
327
328 out_locked:
329 MLD_UNLOCK();
330 return (error);
331 }
332 /*
333 * Expose struct mld_ifinfo to userland, keyed by ifindex.
334 * For use by ifmcstat(8).
335 *
336 */
337 static int
338 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
339 {
340 #pragma unused(oidp)
341 int *name;
342 int error;
343 u_int namelen;
344 struct ifnet *ifp;
345 struct mld_ifinfo *mli;
346 struct mld_ifinfo_u mli_u;
347
348 name = (int *)arg1;
349 namelen = arg2;
350
351 if (req->newptr != USER_ADDR_NULL)
352 return (EPERM);
353
354 if (namelen != 1)
355 return (EINVAL);
356
357 MLD_LOCK();
358
359 if (name[0] <= 0 || name[0] > (u_int)if_index) {
360 error = ENOENT;
361 goto out_locked;
362 }
363
364 error = ENOENT;
365
366 ifnet_head_lock_shared();
367 ifp = ifindex2ifnet[name[0]];
368 ifnet_head_done();
369 if (ifp == NULL)
370 goto out_locked;
371
372 bzero(&mli_u, sizeof (mli_u));
373
374 LIST_FOREACH(mli, &mli_head, mli_link) {
375 MLI_LOCK(mli);
376 if (ifp != mli->mli_ifp) {
377 MLI_UNLOCK(mli);
378 continue;
379 }
380
381 mli_u.mli_ifindex = mli->mli_ifp->if_index;
382 mli_u.mli_version = mli->mli_version;
383 mli_u.mli_v1_timer = mli->mli_v1_timer;
384 mli_u.mli_v2_timer = mli->mli_v2_timer;
385 mli_u.mli_flags = mli->mli_flags;
386 mli_u.mli_rv = mli->mli_rv;
387 mli_u.mli_qi = mli->mli_qi;
388 mli_u.mli_qri = mli->mli_qri;
389 mli_u.mli_uri = mli->mli_uri;
390 MLI_UNLOCK(mli);
391
392 error = SYSCTL_OUT(req, &mli_u, sizeof (mli_u));
393 break;
394 }
395
396 out_locked:
397 MLD_UNLOCK();
398 return (error);
399 }
400
401 /*
402 * Dispatch an entire queue of pending packet chains.
403 *
404 * Must not be called with in6m_lock held.
405 */
406 static void
407 mld_dispatch_queue(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit)
408 {
409 struct mbuf *m;
410
411 if (mli != NULL)
412 MLI_LOCK_ASSERT_HELD(mli);
413
414 for (;;) {
415 IF_DEQUEUE(ifq, m);
416 if (m == NULL)
417 break;
418 MLD_PRINTF(("%s: dispatch %p from %p\n", __func__, ifq, m));
419 if (mli != NULL)
420 MLI_UNLOCK(mli);
421 mld_dispatch_packet(m);
422 if (mli != NULL)
423 MLI_LOCK(mli);
424 if (--limit == 0)
425 break;
426 }
427
428 if (mli != NULL)
429 MLI_LOCK_ASSERT_HELD(mli);
430 }
431
432 /*
433 * Filter outgoing MLD report state by group.
434 *
435 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
436 * and node-local addresses. However, kernel and socket consumers
437 * always embed the KAME scope ID in the address provided, so strip it
438 * when performing comparison.
439 * Note: This is not the same as the *multicast* scope.
440 *
441 * Return zero if the given group is one for which MLD reports
442 * should be suppressed, or non-zero if reports should be issued.
443 */
444 static __inline__ int
445 mld_is_addr_reported(const struct in6_addr *addr)
446 {
447
448 VERIFY(IN6_IS_ADDR_MULTICAST(addr));
449
450 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
451 return (0);
452
453 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
454 struct in6_addr tmp = *addr;
455 in6_clearscope(&tmp);
456 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
457 return (0);
458 }
459
460 return (1);
461 }
462
463 /*
464 * Attach MLD when PF_INET6 is attached to an interface.
465 */
466 struct mld_ifinfo *
467 mld_domifattach(struct ifnet *ifp, int how)
468 {
469 struct mld_ifinfo *mli;
470
471 MLD_PRINTF(("%s: called for ifp %p(%s%d)\n",
472 __func__, ifp, ifp->if_name, ifp->if_unit));
473
474 mli = mli_alloc(how);
475 if (mli == NULL)
476 return (NULL);
477
478 MLD_LOCK();
479
480 MLI_LOCK(mli);
481 mli_initvar(mli, ifp, 0);
482 mli->mli_debug |= IFD_ATTACHED;
483 MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
484 MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */
485 MLI_UNLOCK(mli);
486
487 LIST_INSERT_HEAD(&mli_head, mli, mli_link);
488
489 MLD_UNLOCK();
490
491 MLD_PRINTF(("allocate mld_ifinfo for ifp %p(%s%d)\n",
492 ifp, ifp->if_name, ifp->if_unit));
493
494 return (mli);
495 }
496
497 /*
498 * Attach MLD when PF_INET6 is reattached to an interface. Caller is
499 * expected to have an outstanding reference to the mli.
500 */
501 void
502 mld_domifreattach(struct mld_ifinfo *mli)
503 {
504 struct ifnet *ifp;
505
506 MLD_LOCK();
507
508 MLI_LOCK(mli);
509 VERIFY(!(mli->mli_debug & IFD_ATTACHED));
510 ifp = mli->mli_ifp;
511 VERIFY(ifp != NULL);
512 mli_initvar(mli, ifp, 1);
513 mli->mli_debug |= IFD_ATTACHED;
514 MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */
515 MLI_UNLOCK(mli);
516
517 LIST_INSERT_HEAD(&mli_head, mli, mli_link);
518
519 MLD_UNLOCK();
520
521 MLD_PRINTF(("reattached mld_ifinfo for ifp %p(%s%d)\n",
522 ifp, ifp->if_name, ifp->if_unit));
523 }
524
525 /*
526 * Hook for domifdetach.
527 */
528 void
529 mld_domifdetach(struct ifnet *ifp)
530 {
531
532 MLD_PRINTF(("%s: called for ifp %p(%s%d)\n",
533 __func__, ifp, ifp->if_name, ifp->if_unit));
534
535 MLD_LOCK();
536 mli_delete(ifp);
537 MLD_UNLOCK();
538 }
539
540 /*
541 * Called at interface detach time. Note that we only flush all deferred
542 * responses and record releases; all remaining inm records and their source
543 * entries related to this interface are left intact, in order to handle
544 * the reattach case.
545 */
546 static void
547 mli_delete(const struct ifnet *ifp)
548 {
549 struct mld_ifinfo *mli, *tmli;
550
551 MLD_LOCK_ASSERT_HELD();
552
553 LIST_FOREACH_SAFE(mli, &mli_head, mli_link, tmli) {
554 MLI_LOCK(mli);
555 if (mli->mli_ifp == ifp) {
556 /*
557 * Free deferred General Query responses.
558 */
559 IF_DRAIN(&mli->mli_gq);
560 IF_DRAIN(&mli->mli_v1q);
561 mld_flush_relq(mli);
562 VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
563 mli->mli_debug &= ~IFD_ATTACHED;
564 MLI_UNLOCK(mli);
565
566 LIST_REMOVE(mli, mli_link);
567 MLI_REMREF(mli); /* release mli_head reference */
568 return;
569 }
570 MLI_UNLOCK(mli);
571 }
572 panic("%s: mld_ifinfo not found for ifp %p\n", __func__, ifp);
573 }
574
575 static void
576 mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach)
577 {
578 MLI_LOCK_ASSERT_HELD(mli);
579
580 mli->mli_ifp = ifp;
581 mli->mli_version = MLD_VERSION_2;
582 mli->mli_flags = 0;
583 mli->mli_rv = MLD_RV_INIT;
584 mli->mli_qi = MLD_QI_INIT;
585 mli->mli_qri = MLD_QRI_INIT;
586 mli->mli_uri = MLD_URI_INIT;
587
588 /* ifnet is not yet attached; no need to hold ifnet lock */
589 if (!(ifp->if_flags & IFF_MULTICAST))
590 mli->mli_flags |= MLIF_SILENT;
591 if (mld_use_allow)
592 mli->mli_flags |= MLIF_USEALLOW;
593 if (!reattach)
594 SLIST_INIT(&mli->mli_relinmhead);
595
596 /*
597 * Responses to general queries are subject to bounds.
598 */
599 mli->mli_gq.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
600 mli->mli_v1q.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS;
601 }
602
603 static struct mld_ifinfo *
604 mli_alloc(int how)
605 {
606 struct mld_ifinfo *mli;
607
608 mli = (how == M_WAITOK) ? zalloc(mli_zone) : zalloc_noblock(mli_zone);
609 if (mli != NULL) {
610 bzero(mli, mli_size);
611 lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr);
612 mli->mli_debug |= IFD_ALLOC;
613 }
614 return (mli);
615 }
616
617 static void
618 mli_free(struct mld_ifinfo *mli)
619 {
620 MLI_LOCK(mli);
621 if (mli->mli_debug & IFD_ATTACHED) {
622 panic("%s: attached mli=%p is being freed", __func__, mli);
623 /* NOTREACHED */
624 } else if (mli->mli_ifp != NULL) {
625 panic("%s: ifp not NULL for mli=%p", __func__, mli);
626 /* NOTREACHED */
627 } else if (!(mli->mli_debug & IFD_ALLOC)) {
628 panic("%s: mli %p cannot be freed", __func__, mli);
629 /* NOTREACHED */
630 } else if (mli->mli_refcnt != 0) {
631 panic("%s: non-zero refcnt mli=%p", __func__, mli);
632 /* NOTREACHED */
633 }
634 mli->mli_debug &= ~IFD_ALLOC;
635 MLI_UNLOCK(mli);
636
637 lck_mtx_destroy(&mli->mli_lock, mld_mtx_grp);
638 zfree(mli_zone, mli);
639 }
640
641 void
642 mli_addref(struct mld_ifinfo *mli, int locked)
643 {
644 if (!locked)
645 MLI_LOCK_SPIN(mli);
646 else
647 MLI_LOCK_ASSERT_HELD(mli);
648
649 if (++mli->mli_refcnt == 0) {
650 panic("%s: mli=%p wraparound refcnt", __func__, mli);
651 /* NOTREACHED */
652 }
653 if (!locked)
654 MLI_UNLOCK(mli);
655 }
656
657 void
658 mli_remref(struct mld_ifinfo *mli)
659 {
660 struct ifnet *ifp;
661
662 MLI_LOCK_SPIN(mli);
663
664 if (mli->mli_refcnt == 0) {
665 panic("%s: mli=%p negative refcnt", __func__, mli);
666 /* NOTREACHED */
667 }
668
669 --mli->mli_refcnt;
670 if (mli->mli_refcnt > 0) {
671 MLI_UNLOCK(mli);
672 return;
673 }
674
675 ifp = mli->mli_ifp;
676 mli->mli_ifp = NULL;
677 IF_DRAIN(&mli->mli_gq);
678 IF_DRAIN(&mli->mli_v1q);
679 mld_flush_relq(mli);
680 VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
681 MLI_UNLOCK(mli);
682
683 MLD_PRINTF(("%s: freeing mld_ifinfo for ifp %p(%s%d)\n",
684 __func__, ifp, ifp->if_name, ifp->if_unit));
685
686 mli_free(mli);
687 }
688
689 /*
690 * Process a received MLDv1 general or address-specific query.
691 * Assumes that the query header has been pulled up to sizeof(mld_hdr).
692 *
693 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
694 * mld_addr. This is OK as we own the mbuf chain.
695 */
696 static int
697 mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
698 /*const*/ struct mld_hdr *mld)
699 {
700 struct mld_ifinfo *mli;
701 struct in6_multi *inm;
702 int is_general_query;
703 uint16_t timer;
704
705 is_general_query = 0;
706
707 if (!mld_v1enable) {
708 MLD_PRINTF(("ignore v1 query %s on ifp %p(%s%d)\n",
709 ip6_sprintf(&mld->mld_addr),
710 ifp, ifp->if_name, ifp->if_unit));
711 return (0);
712 }
713
714 /*
715 * RFC3810 Section 6.2: MLD queries must originate from
716 * a router's link-local address.
717 */
718 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
719 MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n",
720 ip6_sprintf(&ip6->ip6_src),
721 ifp, ifp->if_name, ifp->if_unit));
722 return (0);
723 }
724
725 /*
726 * Do address field validation upfront before we accept
727 * the query.
728 */
729 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
730 /*
731 * MLDv1 General Query.
732 * If this was not sent to the all-nodes group, ignore it.
733 */
734 struct in6_addr dst;
735
736 dst = ip6->ip6_dst;
737 in6_clearscope(&dst);
738 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
739 return (EINVAL);
740 is_general_query = 1;
741 } else {
742 /*
743 * Embed scope ID of receiving interface in MLD query for
744 * lookup whilst we don't hold other locks.
745 */
746 in6_setscope(&mld->mld_addr, ifp, NULL);
747 }
748
749 /*
750 * Switch to MLDv1 host compatibility mode.
751 */
752 mli = MLD_IFINFO(ifp);
753 VERIFY(mli != NULL);
754
755 MLI_LOCK(mli);
756 mld_set_version(mli, MLD_VERSION_1);
757 MLI_UNLOCK(mli);
758
759 timer = (ntohs(mld->mld_maxdelay) * PR_SLOWHZ) / MLD_TIMER_SCALE;
760 if (timer == 0)
761 timer = 1;
762
763 if (is_general_query) {
764 struct in6_multistep step;
765
766 MLD_PRINTF(("process v1 general query on ifp %p(%s%d)\n",
767 ifp, ifp->if_name, ifp->if_unit));
768 /*
769 * For each reporting group joined on this
770 * interface, kick the report timer.
771 */
772 in6_multihead_lock_shared();
773 IN6_FIRST_MULTI(step, inm);
774 while (inm != NULL) {
775 IN6M_LOCK(inm);
776 if (inm->in6m_ifp == ifp)
777 mld_v1_update_group(inm, timer);
778 IN6M_UNLOCK(inm);
779 IN6_NEXT_MULTI(step, inm);
780 }
781 in6_multihead_lock_done();
782 } else {
783 /*
784 * MLDv1 Group-Specific Query.
785 * If this is a group-specific MLDv1 query, we need only
786 * look up the single group to process it.
787 */
788 in6_multihead_lock_shared();
789 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
790 in6_multihead_lock_done();
791
792 if (inm != NULL) {
793 IN6M_LOCK(inm);
794 MLD_PRINTF(("process v1 query %s on ifp %p(%s%d)\n",
795 ip6_sprintf(&mld->mld_addr),
796 ifp, ifp->if_name, ifp->if_unit));
797 mld_v1_update_group(inm, timer);
798 IN6M_UNLOCK(inm);
799 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
800 }
801 /* XXX Clear embedded scope ID as userland won't expect it. */
802 in6_clearscope(&mld->mld_addr);
803 }
804
805 return (0);
806 }
807
808 /*
809 * Update the report timer on a group in response to an MLDv1 query.
810 *
811 * If we are becoming the reporting member for this group, start the timer.
812 * If we already are the reporting member for this group, and timer is
813 * below the threshold, reset it.
814 *
815 * We may be updating the group for the first time since we switched
816 * to MLDv2. If we are, then we must clear any recorded source lists,
817 * and transition to REPORTING state; the group timer is overloaded
818 * for group and group-source query responses.
819 *
820 * Unlike MLDv2, the delay per group should be jittered
821 * to avoid bursts of MLDv1 reports.
822 */
823 static void
824 mld_v1_update_group(struct in6_multi *inm, const int timer)
825 {
826 IN6M_LOCK_ASSERT_HELD(inm);
827
828 MLD_PRINTF(("%s: %s/%s%d timer=%d\n", __func__,
829 ip6_sprintf(&inm->in6m_addr),
830 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit, timer));
831
832 switch (inm->in6m_state) {
833 case MLD_NOT_MEMBER:
834 case MLD_SILENT_MEMBER:
835 break;
836 case MLD_REPORTING_MEMBER:
837 if (inm->in6m_timer != 0 &&
838 inm->in6m_timer <= timer) {
839 MLD_PRINTF(("%s: REPORTING and timer running, "
840 "skipping.\n", __func__));
841 break;
842 }
843 /* FALLTHROUGH */
844 case MLD_SG_QUERY_PENDING_MEMBER:
845 case MLD_G_QUERY_PENDING_MEMBER:
846 case MLD_IDLE_MEMBER:
847 case MLD_LAZY_MEMBER:
848 case MLD_AWAKENING_MEMBER:
849 MLD_PRINTF(("%s: ->REPORTING\n", __func__));
850 inm->in6m_state = MLD_REPORTING_MEMBER;
851 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
852 current_state_timers_running6 = 1;
853 break;
854 case MLD_SLEEPING_MEMBER:
855 MLD_PRINTF(("%s: ->AWAKENING\n", __func__));
856 inm->in6m_state = MLD_AWAKENING_MEMBER;
857 break;
858 case MLD_LEAVING_MEMBER:
859 break;
860 }
861 }
862
863 /*
864 * Process a received MLDv2 general, group-specific or
865 * group-and-source-specific query.
866 *
867 * Assumes that the query header has been pulled up to sizeof(mldv2_query).
868 *
869 * Return 0 if successful, otherwise an appropriate error code is returned.
870 */
871 static int
872 mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
873 struct mbuf *m, const int off, const int icmp6len)
874 {
875 struct mld_ifinfo *mli;
876 struct mldv2_query *mld;
877 struct in6_multi *inm;
878 uint32_t maxdelay, nsrc, qqi;
879 int is_general_query;
880 uint16_t timer;
881 uint8_t qrv;
882
883 is_general_query = 0;
884
885 /*
886 * RFC3810 Section 6.2: MLD queries must originate from
887 * a router's link-local address.
888 */
889 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
890 MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n",
891 ip6_sprintf(&ip6->ip6_src),
892 ifp, ifp->if_name, ifp->if_unit));
893 return (0);
894 }
895
896 MLD_PRINTF(("input v2 query on ifp %p(%s%d)\n", ifp, ifp->if_name,
897 ifp->if_unit));
898
899 mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
900
901 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */
902 if (maxdelay >= 32678) {
903 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
904 (MLD_MRC_EXP(maxdelay) + 3);
905 }
906 timer = (maxdelay * PR_SLOWHZ) / MLD_TIMER_SCALE;
907 if (timer == 0)
908 timer = 1;
909
910 qrv = MLD_QRV(mld->mld_misc);
911 if (qrv < 2) {
912 MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__,
913 qrv, MLD_RV_INIT));
914 qrv = MLD_RV_INIT;
915 }
916
917 qqi = mld->mld_qqi;
918 if (qqi >= 128) {
919 qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
920 (MLD_QQIC_EXP(mld->mld_qqi) + 3);
921 }
922
923 nsrc = ntohs(mld->mld_numsrc);
924 if (nsrc > MLD_MAX_GS_SOURCES)
925 return (EMSGSIZE);
926 if (icmp6len < sizeof(struct mldv2_query) +
927 (nsrc * sizeof(struct in6_addr)))
928 return (EMSGSIZE);
929
930 /*
931 * Do further input validation upfront to avoid resetting timers
932 * should we need to discard this query.
933 */
934 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
935 /*
936 * General Queries SHOULD be directed to ff02::1.
937 * A general query with a source list has undefined
938 * behaviour; discard it.
939 */
940 struct in6_addr dst;
941
942 dst = ip6->ip6_dst;
943 in6_clearscope(&dst);
944 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes) ||
945 nsrc > 0)
946 return (EINVAL);
947 is_general_query = 1;
948 } else {
949 /*
950 * Embed scope ID of receiving interface in MLD query for
951 * lookup whilst we don't hold other locks (due to KAME
952 * locking lameness). We own this mbuf chain just now.
953 */
954 in6_setscope(&mld->mld_addr, ifp, NULL);
955 }
956
957 mli = MLD_IFINFO(ifp);
958 VERIFY(mli != NULL);
959
960 MLI_LOCK(mli);
961 /*
962 * Discard the v2 query if we're in Compatibility Mode.
963 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
964 * until the Old Version Querier Present timer expires.
965 */
966 if (mli->mli_version != MLD_VERSION_2) {
967 MLI_UNLOCK(mli);
968 return (0);
969 }
970
971 mld_set_version(mli, MLD_VERSION_2);
972 mli->mli_rv = qrv;
973 mli->mli_qi = qqi;
974 mli->mli_qri = maxdelay;
975
976 MLD_PRINTF(("%s: qrv %d qi %d maxdelay %d\n", __func__, qrv, qqi,
977 maxdelay));
978
979 if (is_general_query) {
980 /*
981 * MLDv2 General Query.
982 *
983 * Schedule a current-state report on this ifp for
984 * all groups, possibly containing source lists.
985 *
986 * If there is a pending General Query response
987 * scheduled earlier than the selected delay, do
988 * not schedule any other reports.
989 * Otherwise, reset the interface timer.
990 */
991 MLD_PRINTF(("process v2 general query on ifp %p(%s%d)\n",
992 ifp, ifp->if_name, ifp->if_unit));
993 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
994 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
995 interface_timers_running6 = 1;
996 }
997 MLI_UNLOCK(mli);
998 } else {
999 MLI_UNLOCK(mli);
1000 /*
1001 * MLDv2 Group-specific or Group-and-source-specific Query.
1002 *
1003 * Group-source-specific queries are throttled on
1004 * a per-group basis to defeat denial-of-service attempts.
1005 * Queries for groups we are not a member of on this
1006 * link are simply ignored.
1007 */
1008 in6_multihead_lock_shared();
1009 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1010 in6_multihead_lock_done();
1011 if (inm == NULL)
1012 return (0);
1013
1014 IN6M_LOCK(inm);
1015 #ifndef __APPLE__
1016 /* TODO: need ratecheck equivalent */
1017 if (nsrc > 0) {
1018 if (!ratecheck(&inm->in6m_lastgsrtv,
1019 &mld_gsrdelay)) {
1020 MLD_PRINTF(("%s: GS query throttled.\n",
1021 __func__));
1022 IN6M_UNLOCK(inm);
1023 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1024 return (0);
1025 }
1026 }
1027 #endif
1028 MLD_PRINTF(("process v2 group query on ifp %p(%s%d)\n",
1029 ifp, ifp->if_name, ifp->if_unit));
1030 /*
1031 * If there is a pending General Query response
1032 * scheduled sooner than the selected delay, no
1033 * further report need be scheduled.
1034 * Otherwise, prepare to respond to the
1035 * group-specific or group-and-source query.
1036 */
1037 MLI_LOCK(mli);
1038 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
1039 MLI_UNLOCK(mli);
1040 mld_v2_process_group_query(inm, timer, m, off);
1041 } else {
1042 MLI_UNLOCK(mli);
1043 }
1044 IN6M_UNLOCK(inm);
1045 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1046 /* XXX Clear embedded scope ID as userland won't expect it. */
1047 in6_clearscope(&mld->mld_addr);
1048 }
1049
1050 return (0);
1051 }
1052
1053 /*
1054 * Process a recieved MLDv2 group-specific or group-and-source-specific
1055 * query.
1056 * Return <0 if any error occured. Currently this is ignored.
1057 */
1058 static int
1059 mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0,
1060 const int off)
1061 {
1062 struct mldv2_query *mld;
1063 int retval;
1064 uint16_t nsrc;
1065
1066 IN6M_LOCK_ASSERT_HELD(inm);
1067
1068 retval = 0;
1069 mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
1070
1071 switch (inm->in6m_state) {
1072 case MLD_NOT_MEMBER:
1073 case MLD_SILENT_MEMBER:
1074 case MLD_SLEEPING_MEMBER:
1075 case MLD_LAZY_MEMBER:
1076 case MLD_AWAKENING_MEMBER:
1077 case MLD_IDLE_MEMBER:
1078 case MLD_LEAVING_MEMBER:
1079 return (retval);
1080 break;
1081 case MLD_REPORTING_MEMBER:
1082 case MLD_G_QUERY_PENDING_MEMBER:
1083 case MLD_SG_QUERY_PENDING_MEMBER:
1084 break;
1085 }
1086
1087 nsrc = ntohs(mld->mld_numsrc);
1088
1089 /*
1090 * Deal with group-specific queries upfront.
1091 * If any group query is already pending, purge any recorded
1092 * source-list state if it exists, and schedule a query response
1093 * for this group-specific query.
1094 */
1095 if (nsrc == 0) {
1096 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1097 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1098 in6m_clear_recorded(inm);
1099 timer = min(inm->in6m_timer, timer);
1100 }
1101 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1102 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1103 current_state_timers_running6 = 1;
1104 return (retval);
1105 }
1106
1107 /*
1108 * Deal with the case where a group-and-source-specific query has
1109 * been received but a group-specific query is already pending.
1110 */
1111 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1112 timer = min(inm->in6m_timer, timer);
1113 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1114 current_state_timers_running6 = 1;
1115 return (retval);
1116 }
1117
1118 /*
1119 * Finally, deal with the case where a group-and-source-specific
1120 * query has been received, where a response to a previous g-s-r
1121 * query exists, or none exists.
1122 * In this case, we need to parse the source-list which the Querier
1123 * has provided us with and check if we have any source list filter
1124 * entries at T1 for these sources. If we do not, there is no need
1125 * schedule a report and the query may be dropped.
1126 * If we do, we must record them and schedule a current-state
1127 * report for those sources.
1128 */
1129 if (inm->in6m_nsrc > 0) {
1130 struct mbuf *m;
1131 uint8_t *sp;
1132 int i, nrecorded;
1133 int soff;
1134
1135 m = m0;
1136 soff = off + sizeof(struct mldv2_query);
1137 nrecorded = 0;
1138 for (i = 0; i < nsrc; i++) {
1139 sp = mtod(m, uint8_t *) + soff;
1140 retval = in6m_record_source(inm,
1141 (const struct in6_addr *)sp);
1142 if (retval < 0)
1143 break;
1144 nrecorded += retval;
1145 soff += sizeof(struct in6_addr);
1146 if (soff >= m->m_len) {
1147 soff = soff - m->m_len;
1148 m = m->m_next;
1149 if (m == NULL)
1150 break;
1151 }
1152 }
1153 if (nrecorded > 0) {
1154 MLD_PRINTF(( "%s: schedule response to SG query\n",
1155 __func__));
1156 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1157 inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1158 current_state_timers_running6 = 1;
1159 }
1160 }
1161
1162 return (retval);
1163 }
1164
1165 /*
1166 * Process a received MLDv1 host membership report.
1167 * Assumes mld points to mld_hdr in pulled up mbuf chain.
1168 *
1169 * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1170 * mld_addr. This is OK as we own the mbuf chain.
1171 */
1172 static int
1173 mld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1174 /*const*/ struct mld_hdr *mld)
1175 {
1176 struct in6_addr src, dst;
1177 struct in6_ifaddr *ia;
1178 struct in6_multi *inm;
1179
1180 if (!mld_v1enable) {
1181 MLD_PRINTF(("ignore v1 report %s on ifp %p(%s%d)\n",
1182 ip6_sprintf(&mld->mld_addr),
1183 ifp, ifp->if_name, ifp->if_unit));
1184 return (0);
1185 }
1186
1187 if (ifp->if_flags & IFF_LOOPBACK)
1188 return (0);
1189
1190 /*
1191 * MLDv1 reports must originate from a host's link-local address,
1192 * or the unspecified address (when booting).
1193 */
1194 src = ip6->ip6_src;
1195 in6_clearscope(&src);
1196 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1197 MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n",
1198 ip6_sprintf(&ip6->ip6_src),
1199 ifp, ifp->if_name, ifp->if_unit));
1200 return (EINVAL);
1201 }
1202
1203 /*
1204 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1205 * group, and must be directed to the group itself.
1206 */
1207 dst = ip6->ip6_dst;
1208 in6_clearscope(&dst);
1209 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1210 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1211 MLD_PRINTF(("ignore v1 query dst %s on ifp %p(%s%d)\n",
1212 ip6_sprintf(&ip6->ip6_dst),
1213 ifp, ifp->if_name, ifp->if_unit));
1214 return (EINVAL);
1215 }
1216
1217 /*
1218 * Make sure we don't hear our own membership report, as fast
1219 * leave requires knowing that we are the only member of a
1220 * group. Assume we used the link-local address if available,
1221 * otherwise look for ::.
1222 *
1223 * XXX Note that scope ID comparison is needed for the address
1224 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1225 * performed for the on-wire address.
1226 */
1227 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1228 if (ia != NULL) {
1229 IFA_LOCK(&ia->ia_ifa);
1230 if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))){
1231 IFA_UNLOCK(&ia->ia_ifa);
1232 IFA_REMREF(&ia->ia_ifa);
1233 return (0);
1234 }
1235 IFA_UNLOCK(&ia->ia_ifa);
1236 IFA_REMREF(&ia->ia_ifa);
1237 } else if (IN6_IS_ADDR_UNSPECIFIED(&src)) {
1238 return (0);
1239 }
1240
1241 MLD_PRINTF(("process v1 report %s on ifp %p(%s%d)\n",
1242 ip6_sprintf(&mld->mld_addr), ifp, ifp->if_name, ifp->if_unit));
1243
1244 /*
1245 * Embed scope ID of receiving interface in MLD query for lookup
1246 * whilst we don't hold other locks (due to KAME locking lameness).
1247 */
1248 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1249 in6_setscope(&mld->mld_addr, ifp, NULL);
1250
1251 /*
1252 * MLDv1 report suppression.
1253 * If we are a member of this group, and our membership should be
1254 * reported, and our group timer is pending or about to be reset,
1255 * stop our group timer by transitioning to the 'lazy' state.
1256 */
1257 in6_multihead_lock_shared();
1258 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm);
1259 in6_multihead_lock_done();
1260
1261 if (inm != NULL) {
1262 struct mld_ifinfo *mli;
1263
1264 IN6M_LOCK(inm);
1265 mli = inm->in6m_mli;
1266 VERIFY(mli != NULL);
1267
1268 MLI_LOCK(mli);
1269 /*
1270 * If we are in MLDv2 host mode, do not allow the
1271 * other host's MLDv1 report to suppress our reports.
1272 */
1273 if (mli->mli_version == MLD_VERSION_2) {
1274 MLI_UNLOCK(mli);
1275 IN6M_UNLOCK(inm);
1276 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1277 goto out;
1278 }
1279 MLI_UNLOCK(mli);
1280
1281 inm->in6m_timer = 0;
1282
1283 switch (inm->in6m_state) {
1284 case MLD_NOT_MEMBER:
1285 case MLD_SILENT_MEMBER:
1286 case MLD_SLEEPING_MEMBER:
1287 break;
1288 case MLD_REPORTING_MEMBER:
1289 case MLD_IDLE_MEMBER:
1290 case MLD_AWAKENING_MEMBER:
1291 MLD_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n",
1292 ip6_sprintf(&mld->mld_addr),
1293 ifp, ifp->if_name, ifp->if_unit));
1294 case MLD_LAZY_MEMBER:
1295 inm->in6m_state = MLD_LAZY_MEMBER;
1296 break;
1297 case MLD_G_QUERY_PENDING_MEMBER:
1298 case MLD_SG_QUERY_PENDING_MEMBER:
1299 case MLD_LEAVING_MEMBER:
1300 break;
1301 }
1302 IN6M_UNLOCK(inm);
1303 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */
1304 }
1305
1306 out:
1307 /* XXX Clear embedded scope ID as userland won't expect it. */
1308 in6_clearscope(&mld->mld_addr);
1309
1310 return (0);
1311 }
1312
1313 /*
1314 * MLD input path.
1315 *
1316 * Assume query messages which fit in a single ICMPv6 message header
1317 * have been pulled up.
1318 * Assume that userland will want to see the message, even if it
1319 * otherwise fails kernel input validation; do not free it.
1320 * Pullup may however free the mbuf chain m if it fails.
1321 *
1322 * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1323 */
1324 int
1325 mld_input(struct mbuf *m, int off, int icmp6len)
1326 {
1327 struct ifnet *ifp;
1328 struct ip6_hdr *ip6;
1329 struct mld_hdr *mld;
1330 int mldlen;
1331
1332 MLD_PRINTF(("%s: called w/mbuf (%p,%d)\n", __func__, m, off));
1333
1334 ifp = m->m_pkthdr.rcvif;
1335
1336 ip6 = mtod(m, struct ip6_hdr *);
1337
1338 /* Pullup to appropriate size. */
1339 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1340 if (mld->mld_type == MLD_LISTENER_QUERY &&
1341 icmp6len >= sizeof(struct mldv2_query)) {
1342 mldlen = sizeof(struct mldv2_query);
1343 } else {
1344 mldlen = sizeof(struct mld_hdr);
1345 }
1346 IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1347 if (mld == NULL) {
1348 icmp6stat.icp6s_badlen++;
1349 return (IPPROTO_DONE);
1350 }
1351
1352 /*
1353 * Userland needs to see all of this traffic for implementing
1354 * the endpoint discovery portion of multicast routing.
1355 */
1356 switch (mld->mld_type) {
1357 case MLD_LISTENER_QUERY:
1358 icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1359 if (icmp6len == sizeof(struct mld_hdr)) {
1360 if (mld_v1_input_query(ifp, ip6, mld) != 0)
1361 return (0);
1362 } else if (icmp6len >= sizeof(struct mldv2_query)) {
1363 if (mld_v2_input_query(ifp, ip6, m, off,
1364 icmp6len) != 0)
1365 return (0);
1366 }
1367 break;
1368 case MLD_LISTENER_REPORT:
1369 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1370 if (mld_v1_input_report(ifp, ip6, mld) != 0)
1371 return (0);
1372 break;
1373 case MLDV2_LISTENER_REPORT:
1374 icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1375 break;
1376 case MLD_LISTENER_DONE:
1377 icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1378 break;
1379 default:
1380 break;
1381 }
1382
1383 return (0);
1384 }
1385
1386 /*
1387 * MLD6 slowtimo handler.
1388 * Combiles both the slow and fast timer into one. We loose some responsivness but
1389 * allows the system to avoid having a pr_fasttimo, thus allowing for power savings.
1390 */
1391 void
1392 mld_slowtimo(void)
1393 {
1394 struct ifqueue scq; /* State-change packets */
1395 struct ifqueue qrq; /* Query response packets */
1396 struct ifnet *ifp;
1397 struct mld_ifinfo *mli;
1398 struct in6_multi *inm;
1399 int uri_fasthz = 0;
1400
1401 MLD_LOCK();
1402
1403 LIST_FOREACH(mli, &mli_head, mli_link) {
1404 MLI_LOCK(mli);
1405 mld_v1_process_querier_timers(mli);
1406 MLI_UNLOCK(mli);
1407 }
1408
1409 /*
1410 * Quick check to see if any work needs to be done, in order to
1411 * minimize the overhead of fasttimo processing.
1412 */
1413 if (!current_state_timers_running6 &&
1414 !interface_timers_running6 &&
1415 !state_change_timers_running6) {
1416 MLD_UNLOCK();
1417 return;
1418 }
1419
1420 /*
1421 * MLDv2 General Query response timer processing.
1422 */
1423 if (interface_timers_running6) {
1424 #if 0
1425 MLD_PRINTF(("%s: interface timers running\n", __func__));
1426 #endif
1427 interface_timers_running6 = 0;
1428 LIST_FOREACH(mli, &mli_head, mli_link) {
1429 MLI_LOCK(mli);
1430 if (mli->mli_v2_timer == 0) {
1431 /* Do nothing. */
1432 } else if (--mli->mli_v2_timer == 0) {
1433 mld_v2_dispatch_general_query(mli);
1434 } else {
1435 interface_timers_running6 = 1;
1436 }
1437 MLI_UNLOCK(mli);
1438 }
1439 }
1440
1441 if (!current_state_timers_running6 &&
1442 !state_change_timers_running6)
1443 goto out_locked;
1444
1445 current_state_timers_running6 = 0;
1446 state_change_timers_running6 = 0;
1447 #if 0
1448 MLD_PRINTF(("%s: state change timers running\n", __func__));
1449 #endif
1450
1451 memset(&qrq, 0, sizeof(struct ifqueue));
1452 qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS;
1453
1454 memset(&scq, 0, sizeof(struct ifqueue));
1455 scq.ifq_maxlen = MLD_MAX_STATE_CHANGE_PACKETS;
1456
1457 /*
1458 * MLD host report and state-change timer processing.
1459 * Note: Processing a v2 group timer may remove a node.
1460 */
1461 LIST_FOREACH(mli, &mli_head, mli_link) {
1462 struct in6_multistep step;
1463
1464 MLI_LOCK(mli);
1465 ifp = mli->mli_ifp;
1466 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri * PR_SLOWHZ);
1467 MLI_UNLOCK(mli);
1468
1469 in6_multihead_lock_shared();
1470 IN6_FIRST_MULTI(step, inm);
1471 while (inm != NULL) {
1472 IN6M_LOCK(inm);
1473 if (inm->in6m_ifp != ifp)
1474 goto next;
1475
1476 MLI_LOCK(mli);
1477 switch (mli->mli_version) {
1478 case MLD_VERSION_1:
1479 mld_v1_process_group_timer(inm,
1480 mli->mli_version);
1481 break;
1482 case MLD_VERSION_2:
1483 mld_v2_process_group_timers(mli, &qrq,
1484 &scq, inm, uri_fasthz);
1485 break;
1486 }
1487 MLI_UNLOCK(mli);
1488 next:
1489 IN6M_UNLOCK(inm);
1490 IN6_NEXT_MULTI(step, inm);
1491 }
1492 in6_multihead_lock_done();
1493
1494 MLI_LOCK(mli);
1495 if (mli->mli_version == MLD_VERSION_1) {
1496 mld_dispatch_queue(mli, &mli->mli_v1q, 0);
1497 } else if (mli->mli_version == MLD_VERSION_2) {
1498 MLI_UNLOCK(mli);
1499 mld_dispatch_queue(NULL, &qrq, 0);
1500 mld_dispatch_queue(NULL, &scq, 0);
1501 VERIFY(qrq.ifq_len == 0);
1502 VERIFY(scq.ifq_len == 0);
1503 MLI_LOCK(mli);
1504 }
1505 /*
1506 * In case there are still any pending membership reports
1507 * which didn't get drained at version change time.
1508 */
1509 IF_DRAIN(&mli->mli_v1q);
1510 /*
1511 * Release all deferred inm records, and drain any locally
1512 * enqueued packets; do it even if the current MLD version
1513 * for the link is no longer MLDv2, in order to handle the
1514 * version change case.
1515 */
1516 mld_flush_relq(mli);
1517 VERIFY(SLIST_EMPTY(&mli->mli_relinmhead));
1518 MLI_UNLOCK(mli);
1519
1520 IF_DRAIN(&qrq);
1521 IF_DRAIN(&scq);
1522 }
1523
1524 out_locked:
1525 MLD_UNLOCK();
1526 }
1527
1528 /*
1529 * Free the in6_multi reference(s) for this MLD lifecycle.
1530 *
1531 * Caller must be holding mli_lock.
1532 */
1533 static void
1534 mld_flush_relq(struct mld_ifinfo *mli)
1535 {
1536 struct in6_multi *inm;
1537
1538 again:
1539 MLI_LOCK_ASSERT_HELD(mli);
1540 inm = SLIST_FIRST(&mli->mli_relinmhead);
1541 if (inm != NULL) {
1542 int lastref;
1543
1544 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1545 MLI_UNLOCK(mli);
1546
1547 in6_multihead_lock_exclusive();
1548 IN6M_LOCK(inm);
1549 VERIFY(inm->in6m_nrelecnt != 0);
1550 inm->in6m_nrelecnt--;
1551 lastref = in6_multi_detach(inm);
1552 VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) &&
1553 inm->in6m_reqcnt == 0));
1554 IN6M_UNLOCK(inm);
1555 in6_multihead_lock_done();
1556 /* from mli_relinmhead */
1557 IN6M_REMREF(inm);
1558 /* from in6_multihead_list */
1559 if (lastref)
1560 IN6M_REMREF(inm);
1561
1562 MLI_LOCK(mli);
1563 goto again;
1564 }
1565 }
1566
1567 /*
1568 * Update host report group timer.
1569 * Will update the global pending timer flags.
1570 */
1571 static void
1572 mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version)
1573 {
1574 #pragma unused(mld_version)
1575 int report_timer_expired;
1576
1577 IN6M_LOCK_ASSERT_HELD(inm);
1578 MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
1579
1580 if (inm->in6m_timer == 0) {
1581 report_timer_expired = 0;
1582 } else if (--inm->in6m_timer == 0) {
1583 report_timer_expired = 1;
1584 } else {
1585 current_state_timers_running6 = 1;
1586 return;
1587 }
1588
1589 switch (inm->in6m_state) {
1590 case MLD_NOT_MEMBER:
1591 case MLD_SILENT_MEMBER:
1592 case MLD_IDLE_MEMBER:
1593 case MLD_LAZY_MEMBER:
1594 case MLD_SLEEPING_MEMBER:
1595 case MLD_AWAKENING_MEMBER:
1596 break;
1597 case MLD_REPORTING_MEMBER:
1598 if (report_timer_expired) {
1599 inm->in6m_state = MLD_IDLE_MEMBER;
1600 (void) mld_v1_transmit_report(inm,
1601 MLD_LISTENER_REPORT);
1602 IN6M_LOCK_ASSERT_HELD(inm);
1603 MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
1604 }
1605 break;
1606 case MLD_G_QUERY_PENDING_MEMBER:
1607 case MLD_SG_QUERY_PENDING_MEMBER:
1608 case MLD_LEAVING_MEMBER:
1609 break;
1610 }
1611 }
1612
1613 /*
1614 * Update a group's timers for MLDv2.
1615 * Will update the global pending timer flags.
1616 * Note: Unlocked read from mli.
1617 */
1618 static void
1619 mld_v2_process_group_timers(struct mld_ifinfo *mli,
1620 struct ifqueue *qrq, struct ifqueue *scq,
1621 struct in6_multi *inm, const int uri_fasthz)
1622 {
1623 int query_response_timer_expired;
1624 int state_change_retransmit_timer_expired;
1625
1626 IN6M_LOCK_ASSERT_HELD(inm);
1627 MLI_LOCK_ASSERT_HELD(mli);
1628 VERIFY(mli == inm->in6m_mli);
1629
1630 query_response_timer_expired = 0;
1631 state_change_retransmit_timer_expired = 0;
1632
1633 /*
1634 * During a transition from compatibility mode back to MLDv2,
1635 * a group record in REPORTING state may still have its group
1636 * timer active. This is a no-op in this function; it is easier
1637 * to deal with it here than to complicate the slow-timeout path.
1638 */
1639 if (inm->in6m_timer == 0) {
1640 query_response_timer_expired = 0;
1641 } else if (--inm->in6m_timer == 0) {
1642 query_response_timer_expired = 1;
1643 } else {
1644 current_state_timers_running6 = 1;
1645 }
1646
1647 if (inm->in6m_sctimer == 0) {
1648 state_change_retransmit_timer_expired = 0;
1649 } else if (--inm->in6m_sctimer == 0) {
1650 state_change_retransmit_timer_expired = 1;
1651 } else {
1652 state_change_timers_running6 = 1;
1653 }
1654
1655 /* We are in fasttimo, so be quick about it. */
1656 if (!state_change_retransmit_timer_expired &&
1657 !query_response_timer_expired)
1658 return;
1659
1660 switch (inm->in6m_state) {
1661 case MLD_NOT_MEMBER:
1662 case MLD_SILENT_MEMBER:
1663 case MLD_SLEEPING_MEMBER:
1664 case MLD_LAZY_MEMBER:
1665 case MLD_AWAKENING_MEMBER:
1666 case MLD_IDLE_MEMBER:
1667 break;
1668 case MLD_G_QUERY_PENDING_MEMBER:
1669 case MLD_SG_QUERY_PENDING_MEMBER:
1670 /*
1671 * Respond to a previously pending Group-Specific
1672 * or Group-and-Source-Specific query by enqueueing
1673 * the appropriate Current-State report for
1674 * immediate transmission.
1675 */
1676 if (query_response_timer_expired) {
1677 int retval;
1678
1679 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1680 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
1681 0);
1682 MLD_PRINTF(("%s: enqueue record = %d\n",
1683 __func__, retval));
1684 inm->in6m_state = MLD_REPORTING_MEMBER;
1685 in6m_clear_recorded(inm);
1686 }
1687 /* FALLTHROUGH */
1688 case MLD_REPORTING_MEMBER:
1689 case MLD_LEAVING_MEMBER:
1690 if (state_change_retransmit_timer_expired) {
1691 /*
1692 * State-change retransmission timer fired.
1693 * If there are any further pending retransmissions,
1694 * set the global pending state-change flag, and
1695 * reset the timer.
1696 */
1697 if (--inm->in6m_scrv > 0) {
1698 inm->in6m_sctimer = uri_fasthz;
1699 state_change_timers_running6 = 1;
1700 }
1701 /*
1702 * Retransmit the previously computed state-change
1703 * report. If there are no further pending
1704 * retransmissions, the mbuf queue will be consumed.
1705 * Update T0 state to T1 as we have now sent
1706 * a state-change.
1707 */
1708 (void) mld_v2_merge_state_changes(inm, scq);
1709
1710 in6m_commit(inm);
1711 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
1712 ip6_sprintf(&inm->in6m_addr),
1713 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
1714
1715 /*
1716 * If we are leaving the group for good, make sure
1717 * we release MLD's reference to it.
1718 * This release must be deferred using a SLIST,
1719 * as we are called from a loop which traverses
1720 * the in_ifmultiaddr TAILQ.
1721 */
1722 if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1723 inm->in6m_scrv == 0) {
1724 inm->in6m_state = MLD_NOT_MEMBER;
1725 /*
1726 * A reference has already been held in
1727 * mld_final_leave() for this inm, so
1728 * no need to hold another one. We also
1729 * bumped up its request count then, so
1730 * that it stays in in6_multihead. Both
1731 * of them will be released when it is
1732 * dequeued later on.
1733 */
1734 VERIFY(inm->in6m_nrelecnt != 0);
1735 SLIST_INSERT_HEAD(&mli->mli_relinmhead,
1736 inm, in6m_nrele);
1737 }
1738 }
1739 break;
1740 }
1741 }
1742
1743 /*
1744 * Switch to a different version on the given interface,
1745 * as per Section 9.12.
1746 */
1747 static void
1748 mld_set_version(struct mld_ifinfo *mli, const int mld_version)
1749 {
1750 int old_version_timer;
1751
1752 MLI_LOCK_ASSERT_HELD(mli);
1753
1754 MLD_PRINTF(("%s: switching to v%d on ifp %p(%s%d)\n", __func__,
1755 mld_version, mli->mli_ifp, mli->mli_ifp->if_name,
1756 mli->mli_ifp->if_unit));
1757
1758 if (mld_version == MLD_VERSION_1) {
1759 /*
1760 * Compute the "Older Version Querier Present" timer as per
1761 * Section 9.12.
1762 */
1763 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1764 old_version_timer *= PR_SLOWHZ;
1765 mli->mli_v1_timer = old_version_timer;
1766 }
1767
1768 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1769 mli->mli_version = MLD_VERSION_1;
1770 mld_v2_cancel_link_timers(mli);
1771 }
1772
1773 MLI_LOCK_ASSERT_HELD(mli);
1774 }
1775
1776 /*
1777 * Cancel pending MLDv2 timers for the given link and all groups
1778 * joined on it; state-change, general-query, and group-query timers.
1779 */
1780 static void
1781 mld_v2_cancel_link_timers(struct mld_ifinfo *mli)
1782 {
1783 struct ifnet *ifp;
1784 struct in6_multi *inm;
1785 struct in6_multistep step;
1786
1787 MLI_LOCK_ASSERT_HELD(mli);
1788
1789 MLD_PRINTF(("%s: cancel v2 timers on ifp %p(%s%d)\n", __func__,
1790 mli->mli_ifp, mli->mli_ifp->if_name, mli->mli_ifp->if_unit));
1791
1792 /*
1793 * Fast-track this potentially expensive operation
1794 * by checking all the global 'timer pending' flags.
1795 */
1796 if (!interface_timers_running6 &&
1797 !state_change_timers_running6 &&
1798 !current_state_timers_running6)
1799 return;
1800
1801 mli->mli_v2_timer = 0;
1802 ifp = mli->mli_ifp;
1803 MLI_UNLOCK(mli);
1804
1805 in6_multihead_lock_shared();
1806 IN6_FIRST_MULTI(step, inm);
1807 while (inm != NULL) {
1808 IN6M_LOCK(inm);
1809 if (inm->in6m_ifp != ifp)
1810 goto next;
1811
1812 switch (inm->in6m_state) {
1813 case MLD_NOT_MEMBER:
1814 case MLD_SILENT_MEMBER:
1815 case MLD_IDLE_MEMBER:
1816 case MLD_LAZY_MEMBER:
1817 case MLD_SLEEPING_MEMBER:
1818 case MLD_AWAKENING_MEMBER:
1819 break;
1820 case MLD_LEAVING_MEMBER:
1821 /*
1822 * If we are leaving the group and switching
1823 * version, we need to release the final
1824 * reference held for issuing the INCLUDE {}.
1825 * During mld_final_leave(), we bumped up both the
1826 * request and reference counts. Since we cannot
1827 * call in6_multi_detach() here, defer this task to
1828 * the timer routine.
1829 */
1830 VERIFY(inm->in6m_nrelecnt != 0);
1831 MLI_LOCK(mli);
1832 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1833 in6m_nrele);
1834 MLI_UNLOCK(mli);
1835 /* FALLTHROUGH */
1836 case MLD_G_QUERY_PENDING_MEMBER:
1837 case MLD_SG_QUERY_PENDING_MEMBER:
1838 in6m_clear_recorded(inm);
1839 /* FALLTHROUGH */
1840 case MLD_REPORTING_MEMBER:
1841 inm->in6m_sctimer = 0;
1842 inm->in6m_timer = 0;
1843 inm->in6m_state = MLD_REPORTING_MEMBER;
1844 /*
1845 * Free any pending MLDv2 state-change records.
1846 */
1847 IF_DRAIN(&inm->in6m_scq);
1848 break;
1849 }
1850 next:
1851 IN6M_UNLOCK(inm);
1852 IN6_NEXT_MULTI(step, inm);
1853 }
1854 in6_multihead_lock_done();
1855
1856 MLI_LOCK(mli);
1857 }
1858
1859 /*
1860 * Update the Older Version Querier Present timers for a link.
1861 * See Section 9.12 of RFC 3810.
1862 */
1863 static void
1864 mld_v1_process_querier_timers(struct mld_ifinfo *mli)
1865 {
1866 MLI_LOCK_ASSERT_HELD(mli);
1867
1868 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1869 /*
1870 * MLDv1 Querier Present timer expired; revert to MLDv2.
1871 */
1872 MLD_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n",
1873 __func__, mli->mli_version, MLD_VERSION_2,
1874 mli->mli_ifp, mli->mli_ifp->if_name, mli->mli_ifp->if_unit));
1875 mli->mli_version = MLD_VERSION_2;
1876 }
1877 }
1878
1879 /*
1880 * Transmit an MLDv1 report immediately.
1881 */
1882 static int
1883 mld_v1_transmit_report(struct in6_multi *in6m, const int type)
1884 {
1885 struct ifnet *ifp;
1886 struct in6_ifaddr *ia;
1887 struct ip6_hdr *ip6;
1888 struct mbuf *mh, *md;
1889 struct mld_hdr *mld;
1890 int error = 0;
1891
1892 IN6M_LOCK_ASSERT_HELD(in6m);
1893 MLI_LOCK_ASSERT_HELD(in6m->in6m_mli);
1894
1895 ifp = in6m->in6m_ifp;
1896 /* ia may be NULL if link-local address is tentative. */
1897 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1898
1899 MGETHDR(mh, M_DONTWAIT, MT_HEADER);
1900 if (mh == NULL) {
1901 if (ia != NULL)
1902 IFA_REMREF(&ia->ia_ifa);
1903 return (ENOMEM);
1904 }
1905 MGET(md, M_DONTWAIT, MT_DATA);
1906 if (md == NULL) {
1907 m_free(mh);
1908 if (ia != NULL)
1909 IFA_REMREF(&ia->ia_ifa);
1910 return (ENOMEM);
1911 }
1912 mh->m_next = md;
1913
1914 /*
1915 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1916 * that ether_output() does not need to allocate another mbuf
1917 * for the header in the most common case.
1918 */
1919 MH_ALIGN(mh, sizeof(struct ip6_hdr));
1920 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
1921 mh->m_len = sizeof(struct ip6_hdr);
1922
1923 ip6 = mtod(mh, struct ip6_hdr *);
1924 ip6->ip6_flow = 0;
1925 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
1926 ip6->ip6_vfc |= IPV6_VERSION;
1927 ip6->ip6_nxt = IPPROTO_ICMPV6;
1928 if (ia != NULL)
1929 IFA_LOCK(&ia->ia_ifa);
1930 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1931 if (ia != NULL) {
1932 IFA_UNLOCK(&ia->ia_ifa);
1933 IFA_REMREF(&ia->ia_ifa);
1934 ia = NULL;
1935 }
1936 ip6->ip6_dst = in6m->in6m_addr;
1937
1938 md->m_len = sizeof(struct mld_hdr);
1939 mld = mtod(md, struct mld_hdr *);
1940 mld->mld_type = type;
1941 mld->mld_code = 0;
1942 mld->mld_cksum = 0;
1943 mld->mld_maxdelay = 0;
1944 mld->mld_reserved = 0;
1945 mld->mld_addr = in6m->in6m_addr;
1946 in6_clearscope(&mld->mld_addr);
1947 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1948 sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
1949
1950 mh->m_flags |= M_MLDV1;
1951
1952
1953 /*
1954 * Due to the fact that at this point we are possibly holding
1955 * in6_multihead_lock in shared or exclusive mode, we can't call
1956 * mld_dispatch_packet() here since that will eventually call
1957 * ip6_output(), which will try to lock in6_multihead_lock and cause
1958 * a deadlock.
1959 * Instead we defer the work to the mld_slowtimo() thread, thus
1960 * avoiding unlocking in_multihead_lock here.
1961 */
1962 if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) {
1963 MLD_PRINTF(("%s: v1 outbound queue full\n", __func__));
1964 error = ENOMEM;
1965 m_freem(mh);
1966 } else
1967 IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh);
1968
1969 return (error);
1970 }
1971
1972 /*
1973 * Process a state change from the upper layer for the given IPv6 group.
1974 *
1975 * Each socket holds a reference on the in6_multi in its own ip_moptions.
1976 * The socket layer will have made the necessary updates to.the group
1977 * state, it is now up to MLD to issue a state change report if there
1978 * has been any change between T0 (when the last state-change was issued)
1979 * and T1 (now).
1980 *
1981 * We use the MLDv2 state machine at group level. The MLd module
1982 * however makes the decision as to which MLD protocol version to speak.
1983 * A state change *from* INCLUDE {} always means an initial join.
1984 * A state change *to* INCLUDE {} always means a final leave.
1985 *
1986 * If delay is non-zero, and the state change is an initial multicast
1987 * join, the state change report will be delayed by 'delay' ticks
1988 * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
1989 * the initial MLDv2 state change report will be delayed by whichever
1990 * is sooner, a pending state-change timer or delay itself.
1991 */
1992 int
1993 mld_change_state(struct in6_multi *inm, const int delay)
1994 {
1995 struct mld_ifinfo *mli;
1996 struct ifnet *ifp;
1997 int error = 0;
1998
1999 IN6M_LOCK_ASSERT_HELD(inm);
2000 VERIFY(inm->in6m_mli != NULL);
2001 MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli);
2002
2003 /*
2004 * Try to detect if the upper layer just asked us to change state
2005 * for an interface which has now gone away.
2006 */
2007 VERIFY(inm->in6m_ifma != NULL);
2008 ifp = inm->in6m_ifma->ifma_ifp;
2009 /*
2010 * Sanity check that netinet6's notion of ifp is the same as net's.
2011 */
2012 VERIFY(inm->in6m_ifp == ifp);
2013
2014 mli = MLD_IFINFO(ifp);
2015 VERIFY(mli != NULL);
2016
2017 /*
2018 * If we detect a state transition to or from MCAST_UNDEFINED
2019 * for this group, then we are starting or finishing an MLD
2020 * life cycle for this group.
2021 */
2022 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
2023 MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__,
2024 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode));
2025 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
2026 MLD_PRINTF(("%s: initial join\n", __func__));
2027 error = mld_initial_join(inm, mli, delay);
2028 goto out;
2029 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
2030 MLD_PRINTF(("%s: final leave\n", __func__));
2031 mld_final_leave(inm, mli);
2032 goto out;
2033 }
2034 } else {
2035 MLD_PRINTF(("%s: filter set change\n", __func__));
2036 }
2037
2038 error = mld_handle_state_change(inm, mli);
2039
2040 out:
2041 return (error);
2042 }
2043
2044 /*
2045 * Perform the initial join for an MLD group.
2046 *
2047 * When joining a group:
2048 * If the group should have its MLD traffic suppressed, do nothing.
2049 * MLDv1 starts sending MLDv1 host membership reports.
2050 * MLDv2 will schedule an MLDv2 state-change report containing the
2051 * initial state of the membership.
2052 *
2053 * If the delay argument is non-zero, then we must delay sending the
2054 * initial state change for delay ticks (in units of PR_FASTHZ).
2055 */
2056 static int
2057 mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
2058 const int delay)
2059 {
2060 struct ifnet *ifp;
2061 struct ifqueue *ifq;
2062 int error, retval, syncstates;
2063 int odelay;
2064
2065 IN6M_LOCK_ASSERT_HELD(inm);
2066 MLI_LOCK_ASSERT_NOTHELD(mli);
2067
2068 MLD_PRINTF(("%s: initial join %s on ifp %p(%s%d)\n",
2069 __func__, ip6_sprintf(&inm->in6m_addr),
2070 inm->in6m_ifp, inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2071
2072 error = 0;
2073 syncstates = 1;
2074
2075 ifp = inm->in6m_ifp;
2076
2077 MLI_LOCK(mli);
2078 VERIFY(mli->mli_ifp == ifp);
2079
2080 /*
2081 * Groups joined on loopback or marked as 'not reported',
2082 * enter the MLD_SILENT_MEMBER state and
2083 * are never reported in any protocol exchanges.
2084 * All other groups enter the appropriate state machine
2085 * for the version in use on this link.
2086 * A link marked as MLIF_SILENT causes MLD to be completely
2087 * disabled for the link.
2088 */
2089 if ((ifp->if_flags & IFF_LOOPBACK) ||
2090 (mli->mli_flags & MLIF_SILENT) ||
2091 !mld_is_addr_reported(&inm->in6m_addr)) {
2092 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2093 __func__));
2094 inm->in6m_state = MLD_SILENT_MEMBER;
2095 inm->in6m_timer = 0;
2096 } else {
2097 /*
2098 * Deal with overlapping in6_multi lifecycle.
2099 * If this group was LEAVING, then make sure
2100 * we drop the reference we picked up to keep the
2101 * group around for the final INCLUDE {} enqueue.
2102 * Since we cannot call in6_multi_detach() here,
2103 * defer this task to the timer routine.
2104 */
2105 if (mli->mli_version == MLD_VERSION_2 &&
2106 inm->in6m_state == MLD_LEAVING_MEMBER) {
2107 VERIFY(inm->in6m_nrelecnt != 0);
2108 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
2109 in6m_nrele);
2110 }
2111
2112 inm->in6m_state = MLD_REPORTING_MEMBER;
2113
2114 switch (mli->mli_version) {
2115 case MLD_VERSION_1:
2116 /*
2117 * If a delay was provided, only use it if
2118 * it is greater than the delay normally
2119 * used for an MLDv1 state change report,
2120 * and delay sending the initial MLDv1 report
2121 * by not transitioning to the IDLE state.
2122 */
2123 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_SLOWHZ);
2124 if (delay) {
2125 inm->in6m_timer = max(delay, odelay);
2126 current_state_timers_running6 = 1;
2127 } else {
2128 inm->in6m_state = MLD_IDLE_MEMBER;
2129 error = mld_v1_transmit_report(inm,
2130 MLD_LISTENER_REPORT);
2131
2132 IN6M_LOCK_ASSERT_HELD(inm);
2133 MLI_LOCK_ASSERT_HELD(mli);
2134
2135 if (error == 0) {
2136 inm->in6m_timer = odelay;
2137 current_state_timers_running6 = 1;
2138 }
2139 }
2140 break;
2141
2142 case MLD_VERSION_2:
2143 /*
2144 * Defer update of T0 to T1, until the first copy
2145 * of the state change has been transmitted.
2146 */
2147 syncstates = 0;
2148
2149 /*
2150 * Immediately enqueue a State-Change Report for
2151 * this interface, freeing any previous reports.
2152 * Don't kick the timers if there is nothing to do,
2153 * or if an error occurred.
2154 */
2155 ifq = &inm->in6m_scq;
2156 IF_DRAIN(ifq);
2157 retval = mld_v2_enqueue_group_record(ifq, inm, 1,
2158 0, 0, (mli->mli_flags & MLIF_USEALLOW));
2159 MLD_PRINTF(("%s: enqueue record = %d\n",
2160 __func__, retval));
2161 if (retval <= 0) {
2162 error = retval * -1;
2163 break;
2164 }
2165
2166 /*
2167 * Schedule transmission of pending state-change
2168 * report up to RV times for this link. The timer
2169 * will fire at the next mld_fasttimo (~200ms),
2170 * giving us an opportunity to merge the reports.
2171 *
2172 * If a delay was provided to this function, only
2173 * use this delay if sooner than the existing one.
2174 */
2175 VERIFY(mli->mli_rv > 1);
2176 inm->in6m_scrv = mli->mli_rv;
2177 if (delay) {
2178 if (inm->in6m_sctimer > 1) {
2179 inm->in6m_sctimer =
2180 min(inm->in6m_sctimer, delay);
2181 } else
2182 inm->in6m_sctimer = delay;
2183 } else
2184 inm->in6m_sctimer = 1;
2185 state_change_timers_running6 = 1;
2186
2187 error = 0;
2188 break;
2189 }
2190 }
2191 MLI_UNLOCK(mli);
2192
2193 /*
2194 * Only update the T0 state if state change is atomic,
2195 * i.e. we don't need to wait for a timer to fire before we
2196 * can consider the state change to have been communicated.
2197 */
2198 if (syncstates) {
2199 in6m_commit(inm);
2200 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
2201 ip6_sprintf(&inm->in6m_addr),
2202 inm->in6m_ifp->if_name, ifp->if_unit));
2203 }
2204
2205 return (error);
2206 }
2207
2208 /*
2209 * Issue an intermediate state change during the life-cycle.
2210 */
2211 static int
2212 mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli)
2213 {
2214 struct ifnet *ifp;
2215 int retval;
2216
2217 IN6M_LOCK_ASSERT_HELD(inm);
2218 MLI_LOCK_ASSERT_NOTHELD(mli);
2219
2220 MLD_PRINTF(("%s: state change for %s on ifp %p(%s%d)\n",
2221 __func__, ip6_sprintf(&inm->in6m_addr),
2222 inm->in6m_ifp, inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2223
2224 ifp = inm->in6m_ifp;
2225
2226 MLI_LOCK(mli);
2227 VERIFY(mli->mli_ifp == ifp);
2228
2229 if ((ifp->if_flags & IFF_LOOPBACK) ||
2230 (mli->mli_flags & MLIF_SILENT) ||
2231 !mld_is_addr_reported(&inm->in6m_addr) ||
2232 (mli->mli_version != MLD_VERSION_2)) {
2233 MLI_UNLOCK(mli);
2234 if (!mld_is_addr_reported(&inm->in6m_addr)) {
2235 MLD_PRINTF(("%s: not kicking state machine for silent "
2236 "group\n", __func__));
2237 }
2238 MLD_PRINTF(("%s: nothing to do\n", __func__));
2239 in6m_commit(inm);
2240 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
2241 ip6_sprintf(&inm->in6m_addr),
2242 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2243 return (0);
2244 }
2245
2246 IF_DRAIN(&inm->in6m_scq);
2247
2248 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2249 (mli->mli_flags & MLIF_USEALLOW));
2250 MLD_PRINTF(("%s: enqueue record = %d\n", __func__, retval));
2251 if (retval <= 0) {
2252 MLI_UNLOCK(mli);
2253 return (-retval);
2254 }
2255 /*
2256 * If record(s) were enqueued, start the state-change
2257 * report timer for this group.
2258 */
2259 inm->in6m_scrv = mli->mli_rv;
2260 inm->in6m_sctimer = 1;
2261 state_change_timers_running6 = 1;
2262 MLI_UNLOCK(mli);
2263
2264 return (0);
2265 }
2266
2267 /*
2268 * Perform the final leave for a multicast address.
2269 *
2270 * When leaving a group:
2271 * MLDv1 sends a DONE message, if and only if we are the reporter.
2272 * MLDv2 enqueues a state-change report containing a transition
2273 * to INCLUDE {} for immediate transmission.
2274 */
2275 static void
2276 mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli)
2277 {
2278 int syncstates = 1;
2279
2280 IN6M_LOCK_ASSERT_HELD(inm);
2281 MLI_LOCK_ASSERT_NOTHELD(mli);
2282
2283 MLD_PRINTF(("%s: final leave %s on ifp %p(%s%d)\n",
2284 __func__, ip6_sprintf(&inm->in6m_addr),
2285 inm->in6m_ifp, inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2286
2287 switch (inm->in6m_state) {
2288 case MLD_NOT_MEMBER:
2289 case MLD_SILENT_MEMBER:
2290 case MLD_LEAVING_MEMBER:
2291 /* Already leaving or left; do nothing. */
2292 MLD_PRINTF(("%s: not kicking state machine for silent group\n",
2293 __func__));
2294 break;
2295 case MLD_REPORTING_MEMBER:
2296 case MLD_IDLE_MEMBER:
2297 case MLD_G_QUERY_PENDING_MEMBER:
2298 case MLD_SG_QUERY_PENDING_MEMBER:
2299 MLI_LOCK(mli);
2300 if (mli->mli_version == MLD_VERSION_1) {
2301 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2302 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
2303 panic("%s: MLDv2 state reached, not MLDv2 "
2304 "mode\n", __func__);
2305 /* NOTREACHED */
2306 }
2307 mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2308
2309 IN6M_LOCK_ASSERT_HELD(inm);
2310 MLI_LOCK_ASSERT_HELD(mli);
2311
2312 inm->in6m_state = MLD_NOT_MEMBER;
2313 } else if (mli->mli_version == MLD_VERSION_2) {
2314 /*
2315 * Stop group timer and all pending reports.
2316 * Immediately enqueue a state-change report
2317 * TO_IN {} to be sent on the next fast timeout,
2318 * giving us an opportunity to merge reports.
2319 */
2320 IF_DRAIN(&inm->in6m_scq);
2321 inm->in6m_timer = 0;
2322 inm->in6m_scrv = mli->mli_rv;
2323 MLD_PRINTF(("%s: Leaving %s/%s%d with %d "
2324 "pending retransmissions.\n", __func__,
2325 ip6_sprintf(&inm->in6m_addr),
2326 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit,
2327 inm->in6m_scrv));
2328 if (inm->in6m_scrv == 0) {
2329 inm->in6m_state = MLD_NOT_MEMBER;
2330 inm->in6m_sctimer = 0;
2331 } else {
2332 int retval;
2333 /*
2334 * Stick around in the in6_multihead list;
2335 * the final detach will be issued by
2336 * mld_v2_process_group_timers() when
2337 * the retransmit timer expires.
2338 */
2339 IN6M_ADDREF_LOCKED(inm);
2340 VERIFY(inm->in6m_debug & IFD_ATTACHED);
2341 inm->in6m_reqcnt++;
2342 VERIFY(inm->in6m_reqcnt >= 1);
2343 inm->in6m_nrelecnt++;
2344 VERIFY(inm->in6m_nrelecnt != 0);
2345
2346 retval = mld_v2_enqueue_group_record(
2347 &inm->in6m_scq, inm, 1, 0, 0,
2348 (mli->mli_flags & MLIF_USEALLOW));
2349 KASSERT(retval != 0,
2350 ("%s: enqueue record = %d\n", __func__,
2351 retval));
2352
2353 inm->in6m_state = MLD_LEAVING_MEMBER;
2354 inm->in6m_sctimer = 1;
2355 state_change_timers_running6 = 1;
2356 syncstates = 0;
2357 }
2358 }
2359 MLI_UNLOCK(mli);
2360 break;
2361 case MLD_LAZY_MEMBER:
2362 case MLD_SLEEPING_MEMBER:
2363 case MLD_AWAKENING_MEMBER:
2364 /* Our reports are suppressed; do nothing. */
2365 break;
2366 }
2367
2368 if (syncstates) {
2369 in6m_commit(inm);
2370 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__,
2371 ip6_sprintf(&inm->in6m_addr),
2372 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2373 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2374 MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for %p/%s%d\n",
2375 __func__, &inm->in6m_addr, inm->in6m_ifp->if_name,
2376 inm->in6m_ifp->if_unit));
2377 }
2378 }
2379
2380 /*
2381 * Enqueue an MLDv2 group record to the given output queue.
2382 *
2383 * If is_state_change is zero, a current-state record is appended.
2384 * If is_state_change is non-zero, a state-change report is appended.
2385 *
2386 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2387 * If is_group_query is zero, and if there is a packet with free space
2388 * at the tail of the queue, it will be appended to providing there
2389 * is enough free space.
2390 * Otherwise a new mbuf packet chain is allocated.
2391 *
2392 * If is_source_query is non-zero, each source is checked to see if
2393 * it was recorded for a Group-Source query, and will be omitted if
2394 * it is not both in-mode and recorded.
2395 *
2396 * If use_block_allow is non-zero, state change reports for initial join
2397 * and final leave, on an inclusive mode group with a source list, will be
2398 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2399 *
2400 * The function will attempt to allocate leading space in the packet
2401 * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2402 *
2403 * If successful the size of all data appended to the queue is returned,
2404 * otherwise an error code less than zero is returned, or zero if
2405 * no record(s) were appended.
2406 */
2407 static int
2408 mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
2409 const int is_state_change, const int is_group_query,
2410 const int is_source_query, const int use_block_allow)
2411 {
2412 struct mldv2_record mr;
2413 struct mldv2_record *pmr;
2414 struct ifnet *ifp;
2415 struct ip6_msource *ims, *nims;
2416 struct mbuf *m0, *m, *md;
2417 int error, is_filter_list_change;
2418 int minrec0len, m0srcs, msrcs, nbytes, off;
2419 int record_has_sources;
2420 int now;
2421 int type;
2422 uint8_t mode;
2423
2424 IN6M_LOCK_ASSERT_HELD(inm);
2425 MLI_LOCK_ASSERT_HELD(inm->in6m_mli);
2426
2427 error = 0;
2428 ifp = inm->in6m_ifp;
2429 is_filter_list_change = 0;
2430 m = NULL;
2431 m0 = NULL;
2432 m0srcs = 0;
2433 msrcs = 0;
2434 nbytes = 0;
2435 nims = NULL;
2436 record_has_sources = 1;
2437 pmr = NULL;
2438 type = MLD_DO_NOTHING;
2439 mode = inm->in6m_st[1].iss_fmode;
2440
2441 /*
2442 * If we did not transition out of ASM mode during t0->t1,
2443 * and there are no source nodes to process, we can skip
2444 * the generation of source records.
2445 */
2446 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2447 inm->in6m_nsrc == 0)
2448 record_has_sources = 0;
2449
2450 if (is_state_change) {
2451 /*
2452 * Queue a state change record.
2453 * If the mode did not change, and there are non-ASM
2454 * listeners or source filters present,
2455 * we potentially need to issue two records for the group.
2456 * If there are ASM listeners, and there was no filter
2457 * mode transition of any kind, do nothing.
2458 *
2459 * If we are transitioning to MCAST_UNDEFINED, we need
2460 * not send any sources. A transition to/from this state is
2461 * considered inclusive with some special treatment.
2462 *
2463 * If we are rewriting initial joins/leaves to use
2464 * ALLOW/BLOCK, and the group's membership is inclusive,
2465 * we need to send sources in all cases.
2466 */
2467 if (mode != inm->in6m_st[0].iss_fmode) {
2468 if (mode == MCAST_EXCLUDE) {
2469 MLD_PRINTF(("%s: change to EXCLUDE\n",
2470 __func__));
2471 type = MLD_CHANGE_TO_EXCLUDE_MODE;
2472 } else {
2473 MLD_PRINTF(("%s: change to INCLUDE\n",
2474 __func__));
2475 if (use_block_allow) {
2476 /*
2477 * XXX
2478 * Here we're interested in state
2479 * edges either direction between
2480 * MCAST_UNDEFINED and MCAST_INCLUDE.
2481 * Perhaps we should just check
2482 * the group state, rather than
2483 * the filter mode.
2484 */
2485 if (mode == MCAST_UNDEFINED) {
2486 type = MLD_BLOCK_OLD_SOURCES;
2487 } else {
2488 type = MLD_ALLOW_NEW_SOURCES;
2489 }
2490 } else {
2491 type = MLD_CHANGE_TO_INCLUDE_MODE;
2492 if (mode == MCAST_UNDEFINED)
2493 record_has_sources = 0;
2494 }
2495 }
2496 } else {
2497 if (record_has_sources) {
2498 is_filter_list_change = 1;
2499 } else {
2500 type = MLD_DO_NOTHING;
2501 }
2502 }
2503 } else {
2504 /*
2505 * Queue a current state record.
2506 */
2507 if (mode == MCAST_EXCLUDE) {
2508 type = MLD_MODE_IS_EXCLUDE;
2509 } else if (mode == MCAST_INCLUDE) {
2510 type = MLD_MODE_IS_INCLUDE;
2511 VERIFY(inm->in6m_st[1].iss_asm == 0);
2512 }
2513 }
2514
2515 /*
2516 * Generate the filter list changes using a separate function.
2517 */
2518 if (is_filter_list_change)
2519 return (mld_v2_enqueue_filter_change(ifq, inm));
2520
2521 if (type == MLD_DO_NOTHING) {
2522 MLD_PRINTF(("%s: nothing to do for %s/%s%d\n",
2523 __func__, ip6_sprintf(&inm->in6m_addr),
2524 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2525 return (0);
2526 }
2527
2528 /*
2529 * If any sources are present, we must be able to fit at least
2530 * one in the trailing space of the tail packet's mbuf,
2531 * ideally more.
2532 */
2533 minrec0len = sizeof(struct mldv2_record);
2534 if (record_has_sources)
2535 minrec0len += sizeof(struct in6_addr);
2536 MLD_PRINTF(("%s: queueing %s for %s/%s%d\n", __func__,
2537 mld_rec_type_to_str(type),
2538 ip6_sprintf(&inm->in6m_addr),
2539 inm->in6m_ifp->if_name, inm->in6m_ifp->if_unit));
2540
2541 /*
2542 * Check if we have a packet in the tail of the queue for this
2543 * group into which the first group record for this group will fit.
2544 * Otherwise allocate a new packet.
2545 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2546 * Note: Group records for G/GSR query responses MUST be sent
2547 * in their own packet.
2548 */
2549 m0 = ifq->ifq_tail;
2550 if (!is_group_query &&
2551 m0 != NULL &&
2552 (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2553 (m0->m_pkthdr.len + minrec0len) <
2554 (ifp->if_mtu - MLD_MTUSPACE)) {
2555 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2556 sizeof(struct mldv2_record)) /
2557 sizeof(struct in6_addr);
2558 m = m0;
2559 MLD_PRINTF(("%s: use existing packet\n", __func__));
2560 } else {
2561 if (IF_QFULL(ifq)) {
2562 MLD_PRINTF(("%s: outbound queue full\n", __func__));
2563 return (-ENOMEM);
2564 }
2565 m = NULL;
2566 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2567 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2568 if (!is_state_change && !is_group_query)
2569 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2570 if (m == NULL)
2571 m = m_gethdr(M_DONTWAIT, MT_DATA);
2572 if (m == NULL)
2573 return (-ENOMEM);
2574
2575 MLD_PRINTF(("%s: allocated first packet\n", __func__));
2576 }
2577
2578 /*
2579 * Append group record.
2580 * If we have sources, we don't know how many yet.
2581 */
2582 mr.mr_type = type;
2583 mr.mr_datalen = 0;
2584 mr.mr_numsrc = 0;
2585 mr.mr_addr = inm->in6m_addr;
2586 in6_clearscope(&mr.mr_addr);
2587 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2588 if (m != m0)
2589 m_freem(m);
2590 MLD_PRINTF(("%s: m_append() failed.\n", __func__));
2591 return (-ENOMEM);
2592 }
2593 nbytes += sizeof(struct mldv2_record);
2594
2595 /*
2596 * Append as many sources as will fit in the first packet.
2597 * If we are appending to a new packet, the chain allocation
2598 * may potentially use clusters; use m_getptr() in this case.
2599 * If we are appending to an existing packet, we need to obtain
2600 * a pointer to the group record after m_append(), in case a new
2601 * mbuf was allocated.
2602 *
2603 * Only append sources which are in-mode at t1. If we are
2604 * transitioning to MCAST_UNDEFINED state on the group, and
2605 * use_block_allow is zero, do not include source entries.
2606 * Otherwise, we need to include this source in the report.
2607 *
2608 * Only report recorded sources in our filter set when responding
2609 * to a group-source query.
2610 */
2611 if (record_has_sources) {
2612 if (m == m0) {
2613 md = m_last(m);
2614 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2615 md->m_len - nbytes);
2616 } else {
2617 md = m_getptr(m, 0, &off);
2618 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2619 off);
2620 }
2621 msrcs = 0;
2622 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2623 nims) {
2624 MLD_PRINTF(("%s: visit node %s\n", __func__,
2625 ip6_sprintf(&ims->im6s_addr)));
2626 now = im6s_get_mode(inm, ims, 1);
2627 MLD_PRINTF(("%s: node is %d\n", __func__, now));
2628 if ((now != mode) ||
2629 (now == mode &&
2630 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2631 MLD_PRINTF(("%s: skip node\n", __func__));
2632 continue;
2633 }
2634 if (is_source_query && ims->im6s_stp == 0) {
2635 MLD_PRINTF(("%s: skip unrecorded node\n",
2636 __func__));
2637 continue;
2638 }
2639 MLD_PRINTF(("%s: append node\n", __func__));
2640 if (!m_append(m, sizeof(struct in6_addr),
2641 (void *)&ims->im6s_addr)) {
2642 if (m != m0)
2643 m_freem(m);
2644 MLD_PRINTF(("%s: m_append() failed.\n",
2645 __func__));
2646 return (-ENOMEM);
2647 }
2648 nbytes += sizeof(struct in6_addr);
2649 ++msrcs;
2650 if (msrcs == m0srcs)
2651 break;
2652 }
2653 MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__,
2654 msrcs));
2655 pmr->mr_numsrc = htons(msrcs);
2656 nbytes += (msrcs * sizeof(struct in6_addr));
2657 }
2658
2659 if (is_source_query && msrcs == 0) {
2660 MLD_PRINTF(("%s: no recorded sources to report\n", __func__));
2661 if (m != m0)
2662 m_freem(m);
2663 return (0);
2664 }
2665
2666 /*
2667 * We are good to go with first packet.
2668 */
2669 if (m != m0) {
2670 MLD_PRINTF(("%s: enqueueing first packet\n", __func__));
2671 m->m_pkthdr.vt_nrecs = 1;
2672 m->m_pkthdr.rcvif = ifp;
2673 IF_ENQUEUE(ifq, m);
2674 } else {
2675 m->m_pkthdr.vt_nrecs++;
2676 }
2677 /*
2678 * No further work needed if no source list in packet(s).
2679 */
2680 if (!record_has_sources)
2681 return (nbytes);
2682
2683 /*
2684 * Whilst sources remain to be announced, we need to allocate
2685 * a new packet and fill out as many sources as will fit.
2686 * Always try for a cluster first.
2687 */
2688 while (nims != NULL) {
2689 if (IF_QFULL(ifq)) {
2690 MLD_PRINTF(("%s: outbound queue full\n", __func__));
2691 return (-ENOMEM);
2692 }
2693 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2694 if (m == NULL)
2695 m = m_gethdr(M_DONTWAIT, MT_DATA);
2696 if (m == NULL)
2697 return (-ENOMEM);
2698 md = m_getptr(m, 0, &off);
2699 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2700 MLD_PRINTF(("%s: allocated next packet\n", __func__));
2701
2702 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2703 if (m != m0)
2704 m_freem(m);
2705 MLD_PRINTF(("%s: m_append() failed.\n", __func__));
2706 return (-ENOMEM);
2707 }
2708 m->m_pkthdr.vt_nrecs = 1;
2709 nbytes += sizeof(struct mldv2_record);
2710
2711 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2712 sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2713
2714 msrcs = 0;
2715 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2716 MLD_PRINTF(("%s: visit node %s\n",
2717 __func__, ip6_sprintf(&ims->im6s_addr)));
2718 now = im6s_get_mode(inm, ims, 1);
2719 if ((now != mode) ||
2720 (now == mode &&
2721 (!use_block_allow && mode == MCAST_UNDEFINED))) {
2722 MLD_PRINTF(("%s: skip node\n", __func__));
2723 continue;
2724 }
2725 if (is_source_query && ims->im6s_stp == 0) {
2726 MLD_PRINTF(("%s: skip unrecorded node\n",
2727 __func__));
2728 continue;
2729 }
2730 MLD_PRINTF(("%s: append node\n", __func__));
2731 if (!m_append(m, sizeof(struct in6_addr),
2732 (void *)&ims->im6s_addr)) {
2733 if (m != m0)
2734 m_freem(m);
2735 MLD_PRINTF(("%s: m_append() failed.\n",
2736 __func__));
2737 return (-ENOMEM);
2738 }
2739 ++msrcs;
2740 if (msrcs == m0srcs)
2741 break;
2742 }
2743 pmr->mr_numsrc = htons(msrcs);
2744 nbytes += (msrcs * sizeof(struct in6_addr));
2745
2746 MLD_PRINTF(("%s: enqueueing next packet\n", __func__));
2747 m->m_pkthdr.rcvif = ifp;
2748 IF_ENQUEUE(ifq, m);
2749 }
2750
2751 return (nbytes);
2752 }
2753
2754 /*
2755 * Type used to mark record pass completion.
2756 * We exploit the fact we can cast to this easily from the
2757 * current filter modes on each ip_msource node.
2758 */
2759 typedef enum {
2760 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2761 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2762 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2763 REC_FULL = REC_ALLOW | REC_BLOCK
2764 } rectype_t;
2765
2766 /*
2767 * Enqueue an MLDv2 filter list change to the given output queue.
2768 *
2769 * Source list filter state is held in an RB-tree. When the filter list
2770 * for a group is changed without changing its mode, we need to compute
2771 * the deltas between T0 and T1 for each source in the filter set,
2772 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2773 *
2774 * As we may potentially queue two record types, and the entire R-B tree
2775 * needs to be walked at once, we break this out into its own function
2776 * so we can generate a tightly packed queue of packets.
2777 *
2778 * XXX This could be written to only use one tree walk, although that makes
2779 * serializing into the mbuf chains a bit harder. For now we do two walks
2780 * which makes things easier on us, and it may or may not be harder on
2781 * the L2 cache.
2782 *
2783 * If successful the size of all data appended to the queue is returned,
2784 * otherwise an error code less than zero is returned, or zero if
2785 * no record(s) were appended.
2786 */
2787 static int
2788 mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
2789 {
2790 static const int MINRECLEN =
2791 sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2792 struct ifnet *ifp;
2793 struct mldv2_record mr;
2794 struct mldv2_record *pmr;
2795 struct ip6_msource *ims, *nims;
2796 struct mbuf *m, *m0, *md;
2797 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2798 int nallow, nblock;
2799 uint8_t mode, now, then;
2800 rectype_t crt, drt, nrt;
2801
2802 IN6M_LOCK_ASSERT_HELD(inm);
2803
2804 if (inm->in6m_nsrc == 0 ||
2805 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2806 return (0);
2807
2808 ifp = inm->in6m_ifp; /* interface */
2809 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */
2810 crt = REC_NONE; /* current group record type */
2811 drt = REC_NONE; /* mask of completed group record types */
2812 nrt = REC_NONE; /* record type for current node */
2813 m0srcs = 0; /* # source which will fit in current mbuf chain */
2814 npbytes = 0; /* # of bytes appended this packet */
2815 nbytes = 0; /* # of bytes appended to group's state-change queue */
2816 rsrcs = 0; /* # sources encoded in current record */
2817 schanged = 0; /* # nodes encoded in overall filter change */
2818 nallow = 0; /* # of source entries in ALLOW_NEW */
2819 nblock = 0; /* # of source entries in BLOCK_OLD */
2820 nims = NULL; /* next tree node pointer */
2821
2822 /*
2823 * For each possible filter record mode.
2824 * The first kind of source we encounter tells us which
2825 * is the first kind of record we start appending.
2826 * If a node transitioned to UNDEFINED at t1, its mode is treated
2827 * as the inverse of the group's filter mode.
2828 */
2829 while (drt != REC_FULL) {
2830 do {
2831 m0 = ifq->ifq_tail;
2832 if (m0 != NULL &&
2833 (m0->m_pkthdr.vt_nrecs + 1 <=
2834 MLD_V2_REPORT_MAXRECS) &&
2835 (m0->m_pkthdr.len + MINRECLEN) <
2836 (ifp->if_mtu - MLD_MTUSPACE)) {
2837 m = m0;
2838 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2839 sizeof(struct mldv2_record)) /
2840 sizeof(struct in6_addr);
2841 MLD_PRINTF(("%s: use previous packet\n",
2842 __func__));
2843 } else {
2844 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2845 if (m == NULL)
2846 m = m_gethdr(M_DONTWAIT, MT_DATA);
2847 if (m == NULL) {
2848 MLD_PRINTF(("%s: m_get*() failed\n",
2849 __func__));
2850 return (-ENOMEM);
2851 }
2852 m->m_pkthdr.vt_nrecs = 0;
2853 m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2854 sizeof(struct mldv2_record)) /
2855 sizeof(struct in6_addr);
2856 npbytes = 0;
2857 MLD_PRINTF(("%s: allocated new packet\n",
2858 __func__));
2859 }
2860 /*
2861 * Append the MLD group record header to the
2862 * current packet's data area.
2863 * Recalculate pointer to free space for next
2864 * group record, in case m_append() allocated
2865 * a new mbuf or cluster.
2866 */
2867 memset(&mr, 0, sizeof(mr));
2868 mr.mr_addr = inm->in6m_addr;
2869 in6_clearscope(&mr.mr_addr);
2870 if (!m_append(m, sizeof(mr), (void *)&mr)) {
2871 if (m != m0)
2872 m_freem(m);
2873 MLD_PRINTF(("%s: m_append() failed\n",
2874 __func__));
2875 return (-ENOMEM);
2876 }
2877 npbytes += sizeof(struct mldv2_record);
2878 if (m != m0) {
2879 /* new packet; offset in chain */
2880 md = m_getptr(m, npbytes -
2881 sizeof(struct mldv2_record), &off);
2882 pmr = (struct mldv2_record *)(mtod(md,
2883 uint8_t *) + off);
2884 } else {
2885 /* current packet; offset from last append */
2886 md = m_last(m);
2887 pmr = (struct mldv2_record *)(mtod(md,
2888 uint8_t *) + md->m_len -
2889 sizeof(struct mldv2_record));
2890 }
2891 /*
2892 * Begin walking the tree for this record type
2893 * pass, or continue from where we left off
2894 * previously if we had to allocate a new packet.
2895 * Only report deltas in-mode at t1.
2896 * We need not report included sources as allowed
2897 * if we are in inclusive mode on the group,
2898 * however the converse is not true.
2899 */
2900 rsrcs = 0;
2901 if (nims == NULL) {
2902 nims = RB_MIN(ip6_msource_tree,
2903 &inm->in6m_srcs);
2904 }
2905 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2906 MLD_PRINTF(("%s: visit node %s\n", __func__,
2907 ip6_sprintf(&ims->im6s_addr)));
2908 now = im6s_get_mode(inm, ims, 1);
2909 then = im6s_get_mode(inm, ims, 0);
2910 MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n",
2911 __func__, then, now));
2912 if (now == then) {
2913 MLD_PRINTF(("%s: skip unchanged\n",
2914 __func__));
2915 continue;
2916 }
2917 if (mode == MCAST_EXCLUDE &&
2918 now == MCAST_INCLUDE) {
2919 MLD_PRINTF(("%s: skip IN src on EX "
2920 "group\n", __func__));
2921 continue;
2922 }
2923 nrt = (rectype_t)now;
2924 if (nrt == REC_NONE)
2925 nrt = (rectype_t)(~mode & REC_FULL);
2926 if (schanged++ == 0) {
2927 crt = nrt;
2928 } else if (crt != nrt)
2929 continue;
2930 if (!m_append(m, sizeof(struct in6_addr),
2931 (void *)&ims->im6s_addr)) {
2932 if (m != m0)
2933 m_freem(m);
2934 MLD_PRINTF(("%s: m_append() failed\n",
2935 __func__));
2936 return (-ENOMEM);
2937 }
2938 nallow += !!(crt == REC_ALLOW);
2939 nblock += !!(crt == REC_BLOCK);
2940 if (++rsrcs == m0srcs)
2941 break;
2942 }
2943 /*
2944 * If we did not append any tree nodes on this
2945 * pass, back out of allocations.
2946 */
2947 if (rsrcs == 0) {
2948 npbytes -= sizeof(struct mldv2_record);
2949 if (m != m0) {
2950 MLD_PRINTF(("%s: m_free(m)\n",
2951 __func__));
2952 m_freem(m);
2953 } else {
2954 MLD_PRINTF(("%s: m_adj(m, -mr)\n",
2955 __func__));
2956 m_adj(m, -((int)sizeof(
2957 struct mldv2_record)));
2958 }
2959 continue;
2960 }
2961 npbytes += (rsrcs * sizeof(struct in6_addr));
2962 if (crt == REC_ALLOW)
2963 pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
2964 else if (crt == REC_BLOCK)
2965 pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
2966 pmr->mr_numsrc = htons(rsrcs);
2967 /*
2968 * Count the new group record, and enqueue this
2969 * packet if it wasn't already queued.
2970 */
2971 m->m_pkthdr.vt_nrecs++;
2972 m->m_pkthdr.rcvif = ifp;
2973 if (m != m0)
2974 IF_ENQUEUE(ifq, m);
2975 nbytes += npbytes;
2976 } while (nims != NULL);
2977 drt |= crt;
2978 crt = (~crt & REC_FULL);
2979 }
2980
2981 MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__,
2982 nallow, nblock));
2983
2984 return (nbytes);
2985 }
2986
2987 static int
2988 mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
2989 {
2990 struct ifqueue *gq;
2991 struct mbuf *m; /* pending state-change */
2992 struct mbuf *m0; /* copy of pending state-change */
2993 struct mbuf *mt; /* last state-change in packet */
2994 struct mbuf *n;
2995 int docopy, domerge;
2996 u_int recslen;
2997
2998 IN6M_LOCK_ASSERT_HELD(inm);
2999
3000 docopy = 0;
3001 domerge = 0;
3002 recslen = 0;
3003
3004 /*
3005 * If there are further pending retransmissions, make a writable
3006 * copy of each queued state-change message before merging.
3007 */
3008 if (inm->in6m_scrv > 0)
3009 docopy = 1;
3010
3011 gq = &inm->in6m_scq;
3012 #ifdef MLD_DEBUG
3013 if (gq->ifq_head == NULL) {
3014 MLD_PRINTF(("%s: WARNING: queue for inm %p is empty\n",
3015 __func__, inm));
3016 }
3017 #endif
3018
3019 /*
3020 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the
3021 * packet might not always be at the head of the ifqueue.
3022 */
3023 m = gq->ifq_head;
3024 while (m != NULL) {
3025 /*
3026 * Only merge the report into the current packet if
3027 * there is sufficient space to do so; an MLDv2 report
3028 * packet may only contain 65,535 group records.
3029 * Always use a simple mbuf chain concatentation to do this,
3030 * as large state changes for single groups may have
3031 * allocated clusters.
3032 */
3033 domerge = 0;
3034 mt = ifscq->ifq_tail;
3035 if (mt != NULL) {
3036 recslen = m_length(m);
3037
3038 if ((mt->m_pkthdr.vt_nrecs +
3039 m->m_pkthdr.vt_nrecs <=
3040 MLD_V2_REPORT_MAXRECS) &&
3041 (mt->m_pkthdr.len + recslen <=
3042 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
3043 domerge = 1;
3044 }
3045
3046 if (!domerge && IF_QFULL(gq)) {
3047 MLD_PRINTF(("%s: outbound queue full, skipping whole "
3048 "packet %p\n", __func__, m));
3049 n = m->m_nextpkt;
3050 if (!docopy) {
3051 IF_REMQUEUE(gq, m);
3052 m_freem(m);
3053 }
3054 m = n;
3055 continue;
3056 }
3057
3058 if (!docopy) {
3059 MLD_PRINTF(("%s: dequeueing %p\n", __func__, m));
3060 n = m->m_nextpkt;
3061 IF_REMQUEUE(gq, m);
3062 m0 = m;
3063 m = n;
3064 } else {
3065 MLD_PRINTF(("%s: copying %p\n", __func__, m));
3066 m0 = m_dup(m, M_NOWAIT);
3067 if (m0 == NULL)
3068 return (ENOMEM);
3069 m0->m_nextpkt = NULL;
3070 m = m->m_nextpkt;
3071 }
3072
3073 if (!domerge) {
3074 MLD_PRINTF(("%s: queueing %p to ifscq %p)\n",
3075 __func__, m0, ifscq));
3076 m0->m_pkthdr.rcvif = inm->in6m_ifp;
3077 IF_ENQUEUE(ifscq, m0);
3078 } else {
3079 struct mbuf *mtl; /* last mbuf of packet mt */
3080
3081 MLD_PRINTF(("%s: merging %p with ifscq tail %p)\n",
3082 __func__, m0, mt));
3083
3084 mtl = m_last(mt);
3085 m0->m_flags &= ~M_PKTHDR;
3086 mt->m_pkthdr.len += recslen;
3087 mt->m_pkthdr.vt_nrecs +=
3088 m0->m_pkthdr.vt_nrecs;
3089
3090 mtl->m_next = m0;
3091 }
3092 }
3093
3094 return (0);
3095 }
3096
3097 /*
3098 * Respond to a pending MLDv2 General Query.
3099 */
3100 static void
3101 mld_v2_dispatch_general_query(struct mld_ifinfo *mli)
3102 {
3103 struct ifnet *ifp;
3104 struct in6_multi *inm;
3105 struct in6_multistep step;
3106 int retval;
3107
3108 MLI_LOCK_ASSERT_HELD(mli);
3109
3110 VERIFY(mli->mli_version == MLD_VERSION_2);
3111
3112 ifp = mli->mli_ifp;
3113 MLI_UNLOCK(mli);
3114
3115 in6_multihead_lock_shared();
3116 IN6_FIRST_MULTI(step, inm);
3117 while (inm != NULL) {
3118 IN6M_LOCK(inm);
3119 if (inm->in6m_ifp != ifp)
3120 goto next;
3121
3122 switch (inm->in6m_state) {
3123 case MLD_NOT_MEMBER:
3124 case MLD_SILENT_MEMBER:
3125 break;
3126 case MLD_REPORTING_MEMBER:
3127 case MLD_IDLE_MEMBER:
3128 case MLD_LAZY_MEMBER:
3129 case MLD_SLEEPING_MEMBER:
3130 case MLD_AWAKENING_MEMBER:
3131 inm->in6m_state = MLD_REPORTING_MEMBER;
3132 MLI_LOCK(mli);
3133 retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3134 inm, 0, 0, 0, 0);
3135 MLI_UNLOCK(mli);
3136 MLD_PRINTF(("%s: enqueue record = %d\n",
3137 __func__, retval));
3138 break;
3139 case MLD_G_QUERY_PENDING_MEMBER:
3140 case MLD_SG_QUERY_PENDING_MEMBER:
3141 case MLD_LEAVING_MEMBER:
3142 break;
3143 }
3144 next:
3145 IN6M_UNLOCK(inm);
3146 IN6_NEXT_MULTI(step, inm);
3147 }
3148 in6_multihead_lock_done();
3149
3150 MLI_LOCK(mli);
3151 mld_dispatch_queue(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3152 MLI_LOCK_ASSERT_HELD(mli);
3153
3154 /*
3155 * Slew transmission of bursts over 500ms intervals.
3156 */
3157 if (mli->mli_gq.ifq_head != NULL) {
3158 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3159 MLD_RESPONSE_BURST_INTERVAL);
3160 interface_timers_running6 = 1;
3161 }
3162 }
3163
3164 /*
3165 * Transmit the next pending message in the output queue.
3166 *
3167 * Must not be called with in6m_lockm or mli_lock held.
3168 */
3169 static void
3170 mld_dispatch_packet(struct mbuf *m)
3171 {
3172 struct ip6_moptions *im6o;
3173 struct ifnet *ifp;
3174 struct ifnet *oifp = NULL;
3175 struct mbuf *m0;
3176 struct mbuf *md;
3177 struct ip6_hdr *ip6;
3178 struct mld_hdr *mld;
3179 int error;
3180 int off;
3181 int type;
3182
3183 MLD_PRINTF(("%s: transmit %p\n", __func__, m));
3184
3185 /*
3186 * Check if the ifnet is still attached.
3187 */
3188 ifp = m->m_pkthdr.rcvif;
3189 if (ifp == NULL || !ifnet_is_attached(ifp, 0)) {
3190 MLD_PRINTF(("%s: dropped %p as ifindex %u went away.\n",
3191 __func__, m, (u_int)if_index));
3192 m_freem(m);
3193 ip6stat.ip6s_noroute++;
3194 return;
3195 }
3196
3197 im6o = ip6_allocmoptions(M_WAITOK);
3198 if (im6o == NULL) {
3199 m_freem(m);
3200 return;
3201 }
3202
3203 im6o->im6o_multicast_hlim = 1;
3204 #if MROUTING
3205 im6o->im6o_multicast_loop = (ip6_mrouter != NULL);
3206 #else
3207 im6o->im6o_multicast_loop = 0;
3208 #endif
3209 im6o->im6o_multicast_ifp = ifp;
3210
3211 if (m->m_flags & M_MLDV1) {
3212 m0 = m;
3213 } else {
3214 m0 = mld_v2_encap_report(ifp, m);
3215 if (m0 == NULL) {
3216 MLD_PRINTF(("%s: dropped %p\n", __func__, m));
3217 /*
3218 * mld_v2_encap_report() has already freed our mbuf.
3219 */
3220 IM6O_REMREF(im6o);
3221 ip6stat.ip6s_odropped++;
3222 return;
3223 }
3224 }
3225
3226 m->m_flags &= ~(M_PROTOFLAGS);
3227 m0->m_pkthdr.rcvif = lo_ifp;
3228
3229 ip6 = mtod(m0, struct ip6_hdr *);
3230 #if 0
3231 (void) in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */
3232 #else
3233 /*
3234 * XXX XXX Break some KPI rules to prevent an LOR which would
3235 * occur if we called in6_setscope() at transmission.
3236 * See comments at top of file.
3237 */
3238 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3239 #endif
3240
3241 /*
3242 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3243 * so we can bump the stats.
3244 */
3245 md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3246 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3247 type = mld->mld_type;
3248
3249 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o,
3250 &oifp, NULL);
3251
3252 IM6O_REMREF(im6o);
3253
3254 if (error) {
3255 MLD_PRINTF(("%s: ip6_output(%p) = %d\n", __func__, m0, error));
3256 if (oifp != NULL)
3257 ifnet_release(oifp);
3258 return;
3259 }
3260
3261 icmp6stat.icp6s_outhist[type]++;
3262 if (oifp != NULL) {
3263 icmp6_ifstat_inc(oifp, ifs6_out_msg);
3264 switch (type) {
3265 case MLD_LISTENER_REPORT:
3266 case MLDV2_LISTENER_REPORT:
3267 icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3268 break;
3269 case MLD_LISTENER_DONE:
3270 icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3271 break;
3272 }
3273 ifnet_release(oifp);
3274 }
3275 }
3276
3277 /*
3278 * Encapsulate an MLDv2 report.
3279 *
3280 * KAME IPv6 requires that hop-by-hop options be passed separately,
3281 * and that the IPv6 header be prepended in a separate mbuf.
3282 *
3283 * Returns a pointer to the new mbuf chain head, or NULL if the
3284 * allocation failed.
3285 */
3286 static struct mbuf *
3287 mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3288 {
3289 struct mbuf *mh;
3290 struct mldv2_report *mld;
3291 struct ip6_hdr *ip6;
3292 struct in6_ifaddr *ia;
3293 int mldreclen;
3294
3295 VERIFY(m->m_flags & M_PKTHDR);
3296
3297 /*
3298 * RFC3590: OK to send as :: or tentative during DAD.
3299 */
3300 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
3301 if (ia == NULL)
3302 MLD_PRINTF(("%s: warning: ia is NULL\n", __func__));
3303
3304 MGETHDR(mh, M_DONTWAIT, MT_HEADER);
3305 if (mh == NULL) {
3306 if (ia != NULL)
3307 IFA_REMREF(&ia->ia_ifa);
3308 m_freem(m);
3309 return (NULL);
3310 }
3311 MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3312
3313 mldreclen = m_length(m);
3314 MLD_PRINTF(("%s: mldreclen is %d\n", __func__, mldreclen));
3315
3316 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3317 mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3318 sizeof(struct mldv2_report) + mldreclen;
3319
3320 ip6 = mtod(mh, struct ip6_hdr *);
3321 ip6->ip6_flow = 0;
3322 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3323 ip6->ip6_vfc |= IPV6_VERSION;
3324 ip6->ip6_nxt = IPPROTO_ICMPV6;
3325 if (ia != NULL)
3326 IFA_LOCK(&ia->ia_ifa);
3327 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3328 if (ia != NULL) {
3329 IFA_UNLOCK(&ia->ia_ifa);
3330 IFA_REMREF(&ia->ia_ifa);
3331 ia = NULL;
3332 }
3333 ip6->ip6_dst = in6addr_linklocal_allv2routers;
3334 /* scope ID will be set in netisr */
3335
3336 mld = (struct mldv2_report *)(ip6 + 1);
3337 mld->mld_type = MLDV2_LISTENER_REPORT;
3338 mld->mld_code = 0;
3339 mld->mld_cksum = 0;
3340 mld->mld_v2_reserved = 0;
3341 mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs);
3342 m->m_pkthdr.vt_nrecs = 0;
3343 m->m_flags &= ~M_PKTHDR;
3344
3345 mh->m_next = m;
3346 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3347 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3348 return (mh);
3349 }
3350
3351 #ifdef MLD_DEBUG
3352 static const char *
3353 mld_rec_type_to_str(const int type)
3354 {
3355 switch (type) {
3356 case MLD_CHANGE_TO_EXCLUDE_MODE:
3357 return "TO_EX";
3358 break;
3359 case MLD_CHANGE_TO_INCLUDE_MODE:
3360 return "TO_IN";
3361 break;
3362 case MLD_MODE_IS_EXCLUDE:
3363 return "MODE_EX";
3364 break;
3365 case MLD_MODE_IS_INCLUDE:
3366 return "MODE_IN";
3367 break;
3368 case MLD_ALLOW_NEW_SOURCES:
3369 return "ALLOW_NEW";
3370 break;
3371 case MLD_BLOCK_OLD_SOURCES:
3372 return "BLOCK_OLD";
3373 break;
3374 default:
3375 break;
3376 }
3377 return "unknown";
3378 }
3379 #endif
3380
3381 void
3382 mld_init(void)
3383 {
3384
3385 MLD_PRINTF(("%s: initializing\n", __func__));
3386
3387 /* Setup lock group and attribute for mld6_mtx */
3388 mld_mtx_grp_attr = lck_grp_attr_alloc_init();
3389 mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr);
3390 mld_mtx_attr = lck_attr_alloc_init();
3391 lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr);
3392
3393 ip6_initpktopts(&mld_po);
3394 mld_po.ip6po_hlim = 1;
3395 mld_po.ip6po_hbh = &mld_ra.hbh;
3396 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3397 mld_po.ip6po_flags = IP6PO_DONTFRAG;
3398 LIST_INIT(&mli_head);
3399
3400 mli_size = sizeof (struct mld_ifinfo);
3401 mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size,
3402 0, MLI_ZONE_NAME);
3403 if (mli_zone == NULL) {
3404 panic("%s: failed allocating %s", __func__, MLI_ZONE_NAME);
3405 /* NOTREACHED */
3406 }
3407 zone_change(mli_zone, Z_EXPAND, TRUE);
3408 zone_change(mli_zone, Z_CALLERACCT, FALSE);
3409 }