2  * Copyright (c) 2000-2011 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  * Copyright (c) 2009 Bruce Simpson. 
  31  * Redistribution and use in source and binary forms, with or without 
  32  * modification, are permitted provided that the following conditions 
  34  * 1. Redistributions of source code must retain the above copyright 
  35  *    notice, this list of conditions and the following disclaimer. 
  36  * 2. Redistributions in binary form must reproduce the above copyright 
  37  *    notice, this list of conditions and the following disclaimer in the 
  38  *    documentation and/or other materials provided with the distribution. 
  39  * 3. The name of the author may not be used to endorse or promote 
  40  *    products derived from this software without specific prior written 
  43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 
  44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 
  47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  57  * Copyright (c) 1988 Stephen Deering. 
  58  * Copyright (c) 1992, 1993 
  59  *      The Regents of the University of California.  All rights reserved. 
  61  * This code is derived from software contributed to Berkeley by 
  62  * Stephen Deering of Stanford University. 
  64  * Redistribution and use in source and binary forms, with or without 
  65  * modification, are permitted provided that the following conditions 
  67  * 1. Redistributions of source code must retain the above copyright 
  68  *    notice, this list of conditions and the following disclaimer. 
  69  * 2. Redistributions in binary form must reproduce the above copyright 
  70  *    notice, this list of conditions and the following disclaimer in the 
  71  *    documentation and/or other materials provided with the distribution. 
  72  * 3. All advertising materials mentioning features or use of this software 
  73  *    must display the following acknowledgement: 
  74  *      This product includes software developed by the University of 
  75  *      California, Berkeley and its contributors. 
  76  * 4. Neither the name of the University nor the names of its contributors 
  77  *    may be used to endorse or promote products derived from this software 
  78  *    without specific prior written permission. 
  80  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  81  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  82  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  83  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  84  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  85  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  86  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  87  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  88  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  89  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  92  *      @(#)igmp.c      8.1 (Berkeley) 7/19/93 
  95  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce 
  96  * support for mandatory and extensible security protections.  This notice 
  97  * is included in support of clause 2.2 (b) of the Apple Public License, 
 101 #include <sys/cdefs.h> 
 103 #include <sys/param.h> 
 104 #include <sys/systm.h> 
 105 #include <sys/mbuf.h> 
 106 #include <sys/socket.h> 
 107 #include <sys/protosw.h> 
 108 #include <sys/sysctl.h> 
 109 #include <sys/kernel.h> 
 110 #include <sys/malloc.h> 
 111 #include <sys/mcache.h> 
 113 #include <kern/zalloc.h> 
 116 #include <net/route.h> 
 118 #include <netinet/in.h> 
 119 #include <netinet/in_var.h> 
 120 #include <netinet6/in6_var.h> 
 121 #include <netinet/ip6.h> 
 122 #include <netinet6/ip6_var.h> 
 123 #include <netinet6/scope6_var.h> 
 124 #include <netinet/icmp6.h> 
 125 #include <netinet6/mld6.h> 
 126 #include <netinet6/mld6_var.h> 
 128 /* Lock group and attribute for mld6_mtx */ 
 129 static lck_attr_t       
*mld_mtx_attr
; 
 130 static lck_grp_t        
*mld_mtx_grp
; 
 131 static lck_grp_attr_t   
*mld_mtx_grp_attr
; 
 134  * Locking and reference counting: 
 136  * mld_mtx mainly protects mli_head.  In cases where both mld_mtx and 
 137  * in6_multihead_lock must be held, the former must be acquired first in order 
 138  * to maintain lock ordering.  It is not a requirement that mld_mtx be 
 139  * acquired first before in6_multihead_lock, but in case both must be acquired 
 140  * in succession, the correct lock ordering must be followed. 
 142  * Instead of walking the if_multiaddrs list at the interface and returning 
 143  * the ifma_protospec value of a matching entry, we search the global list 
 144  * of in6_multi records and find it that way; this is done with in6_multihead 
 145  * lock held.  Doing so avoids the race condition issues that many other BSDs 
 146  * suffer from (therefore in our implementation, ifma_protospec will never be 
 147  * NULL for as long as the in6_multi is valid.) 
 149  * The above creates a requirement for the in6_multi to stay in in6_multihead 
 150  * list even after the final MLD leave (in MLDv2 mode) until no longer needs 
 151  * be retransmitted (this is not required for MLDv1.)  In order to handle 
 152  * this, the request and reference counts of the in6_multi are bumped up when 
 153  * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout 
 154  * handler.  Each in6_multi holds a reference to the underlying mld_ifinfo. 
 156  * Thus, the permitted lock oder is: 
 158  *      mld_mtx, in6_multihead_lock, inm6_lock, mli_lock 
 160  * Any may be taken independently, but if any are held at the same time, 
 161  * the above lock order must be followed. 
 163 static decl_lck_mtx_data(, mld_mtx
); 
 165 static void     mli_initvar(struct mld_ifinfo 
*, struct ifnet 
*, int); 
 166 static struct mld_ifinfo 
*mli_alloc(int); 
 167 static void     mli_free(struct mld_ifinfo 
*); 
 168 static void     mli_delete(const struct ifnet 
*); 
 169 static void     mld_dispatch_packet(struct mbuf 
*); 
 170 static void     mld_final_leave(struct in6_multi 
*, struct mld_ifinfo 
*); 
 171 static int      mld_handle_state_change(struct in6_multi 
*, 
 172                     struct mld_ifinfo 
*); 
 173 static int      mld_initial_join(struct in6_multi 
*, struct mld_ifinfo 
*, 
 176 static const char *     mld_rec_type_to_str(const int); 
 178 static void     mld_set_version(struct mld_ifinfo 
*, const int); 
 179 static void     mld_flush_relq(struct mld_ifinfo 
*); 
 180 static void     mld_dispatch_queue(struct mld_ifinfo 
*, struct ifqueue 
*, int); 
 181 static int      mld_v1_input_query(struct ifnet 
*, const struct ip6_hdr 
*, 
 182                     /*const*/ struct mld_hdr 
*); 
 183 static int      mld_v1_input_report(struct ifnet 
*, const struct ip6_hdr 
*, 
 184                     /*const*/ struct mld_hdr 
*); 
 185 static void     mld_v1_process_group_timer(struct in6_multi 
*, const int); 
 186 static void     mld_v1_process_querier_timers(struct mld_ifinfo 
*); 
 187 static int      mld_v1_transmit_report(struct in6_multi 
*, const int); 
 188 static void     mld_v1_update_group(struct in6_multi 
*, const int); 
 189 static void     mld_v2_cancel_link_timers(struct mld_ifinfo 
*); 
 190 static void     mld_v2_dispatch_general_query(struct mld_ifinfo 
*); 
 192                 mld_v2_encap_report(struct ifnet 
*, struct mbuf 
*); 
 193 static int      mld_v2_enqueue_filter_change(struct ifqueue 
*, 
 195 static int      mld_v2_enqueue_group_record(struct ifqueue 
*, 
 196                     struct in6_multi 
*, const int, const int, const int, 
 198 static int      mld_v2_input_query(struct ifnet 
*, const struct ip6_hdr 
*, 
 199                     struct mbuf 
*, const int, const int); 
 200 static int      mld_v2_merge_state_changes(struct in6_multi 
*, 
 202 static void     mld_v2_process_group_timers(struct mld_ifinfo 
*, 
 203                     struct ifqueue 
*, struct ifqueue 
*, 
 204                     struct in6_multi 
*, const int); 
 205 static int      mld_v2_process_group_query(struct in6_multi 
*, 
 206                     int, struct mbuf 
*, const int); 
 207 static int      sysctl_mld_gsr SYSCTL_HANDLER_ARGS
; 
 208 static int      sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
; 
 211  * Normative references: RFC 2710, RFC 3590, RFC 3810. 
 214  *  A special case for IPv6 is the in6_setscope() routine. ip6_output() 
 215  *  will not accept an ifp; it wants an embedded scope ID, unlike 
 216  *  ip_output(), which happily takes the ifp given to it. The embedded 
 217  *  scope ID is only used by MLD to select the outgoing interface. 
 219  *  As such, we exploit the fact that the scope ID is just the interface 
 220  *  index, and embed it in the IPv6 destination address accordingly. 
 221  *  This is potentially NOT VALID for MLDv1 reports, as they 
 222  *  are always sent to the multicast group itself; as MLDv2 
 223  *  reports are always sent to ff02::16, this is not an issue 
 224  *  when MLDv2 is in use. 
 227 #define MLD_EMBEDSCOPE(pin6, zoneid) \ 
 228         (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) 
 230 static struct timeval mld_gsrdelay 
= {10, 0}; 
 231 static LIST_HEAD(, mld_ifinfo
) mli_head
; 
 233 static int interface_timers_running6
; 
 234 static int state_change_timers_running6
; 
 235 static int current_state_timers_running6
; 
 237 static decl_lck_mtx_data(, mld6_mtx
); 
 240         lck_mtx_lock(&mld6_mtx) 
 241 #define MLD_LOCK_ASSERT_HELD()          \ 
 242         lck_mtx_assert(&mld6_mtx, LCK_MTX_ASSERT_OWNED) 
 243 #define MLD_LOCK_ASSERT_NOTHELD()       \ 
 244         lck_mtx_assert(&mld6_mtx, LCK_MTX_ASSERT_NOTOWNED) 
 245 #define MLD_UNLOCK()                    \ 
 246         lck_mtx_unlock(&mld6_mtx) 
 248 #define MLI_ZONE_MAX            64              /* maximum elements in zone */ 
 249 #define MLI_ZONE_NAME           "mld_ifinfo"    /* zone name */ 
 251 static unsigned int mli_size
;                   /* size of zone element */ 
 252 static struct zone 
*mli_zone
;                   /* zone for mld_ifinfo */ 
 254 SYSCTL_DECL(_net_inet6
);        /* Note: Not in any common header. */ 
 256 SYSCTL_NODE(_net_inet6
, OID_AUTO
, mld
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, 0, 
 257     "IPv6 Multicast Listener Discovery"); 
 258 SYSCTL_PROC(_net_inet6_mld
, OID_AUTO
, gsrdelay
, 
 259     CTLTYPE_INT 
| CTLFLAG_RW 
| CTLFLAG_LOCKED
, 
 260     &mld_gsrdelay
.tv_sec
, 0, sysctl_mld_gsr
, "I", 
 261     "Rate limit for MLDv2 Group-and-Source queries in seconds"); 
 263 SYSCTL_NODE(_net_inet6_mld
, OID_AUTO
, ifinfo
, CTLFLAG_RD 
| CTLFLAG_LOCKED
, 
 264    sysctl_mld_ifinfo
, "Per-interface MLDv2 state"); 
 266 static int      mld_v1enable 
= 1; 
 267 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, v1enable
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, 
 268     &mld_v1enable
, 0, "Enable fallback to MLDv1"); 
 270 static int      mld_use_allow 
= 1; 
 271 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, use_allow
, CTLFLAG_RW 
| CTLFLAG_LOCKED
, 
 272     &mld_use_allow
, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves"); 
 276 SYSCTL_INT(_net_inet6_mld
, OID_AUTO
, 
 277         debug
, CTLFLAG_RW 
| CTLFLAG_LOCKED
,     &mld_debug
, 0, ""); 
 280  * Packed Router Alert option structure declaration. 
 285         struct ip6_opt_router   ra
; 
 289  * Router Alert hop-by-hop option header. 
 291 static struct mld_raopt mld_ra 
= { 
 293         .pad 
= { .ip6o_type 
= IP6OPT_PADN
, 0 }, 
 295             .ip6or_type 
= (u_int8_t
)IP6OPT_ROUTER_ALERT
, 
 296             .ip6or_len 
= (u_int8_t
)(IP6OPT_RTALERT_LEN 
- 2), 
 297             .ip6or_value 
=  {((IP6OPT_RTALERT_MLD 
>> 8) & 0xFF), 
 298                 (IP6OPT_RTALERT_MLD 
& 0xFF) } 
 301 static struct ip6_pktopts mld_po
; 
 304  * Retrieve or set threshold between group-source queries in seconds. 
 307 sysctl_mld_gsr SYSCTL_HANDLER_ARGS
 
 309 #pragma unused(arg1, arg2) 
 315         i 
= mld_gsrdelay
.tv_sec
; 
 317         error 
= sysctl_handle_int(oidp
, &i
, 0, req
); 
 318         if (error 
|| !req
->newptr
) 
 321         if (i 
< -1 || i 
>= 60) { 
 326         mld_gsrdelay
.tv_sec 
= i
; 
 333  * Expose struct mld_ifinfo to userland, keyed by ifindex. 
 334  * For use by ifmcstat(8). 
 338 sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS
 
 345         struct mld_ifinfo       
*mli
; 
 346         struct mld_ifinfo_u     mli_u
; 
 351         if (req
->newptr 
!= USER_ADDR_NULL
) 
 359         if (name
[0] <= 0 || name
[0] > (u_int
)if_index
) { 
 366         ifnet_head_lock_shared(); 
 367         ifp 
= ifindex2ifnet
[name
[0]]; 
 372         bzero(&mli_u
, sizeof (mli_u
)); 
 374         LIST_FOREACH(mli
, &mli_head
, mli_link
) { 
 376                 if (ifp 
!= mli
->mli_ifp
) { 
 381                 mli_u
.mli_ifindex 
= mli
->mli_ifp
->if_index
; 
 382                 mli_u
.mli_version 
= mli
->mli_version
; 
 383                 mli_u
.mli_v1_timer 
= mli
->mli_v1_timer
; 
 384                 mli_u
.mli_v2_timer 
= mli
->mli_v2_timer
; 
 385                 mli_u
.mli_flags 
= mli
->mli_flags
; 
 386                 mli_u
.mli_rv 
= mli
->mli_rv
; 
 387                 mli_u
.mli_qi 
= mli
->mli_qi
; 
 388                 mli_u
.mli_qri 
= mli
->mli_qri
; 
 389                 mli_u
.mli_uri 
= mli
->mli_uri
; 
 392                 error 
= SYSCTL_OUT(req
, &mli_u
, sizeof (mli_u
)); 
 402  * Dispatch an entire queue of pending packet chains. 
 404  * Must not be called with in6m_lock held. 
 407 mld_dispatch_queue(struct mld_ifinfo 
*mli
, struct ifqueue 
*ifq
, int limit
) 
 412                 MLI_LOCK_ASSERT_HELD(mli
); 
 418                 MLD_PRINTF(("%s: dispatch %p from %p\n", __func__
, ifq
, m
)); 
 421                 mld_dispatch_packet(m
); 
 429                 MLI_LOCK_ASSERT_HELD(mli
); 
 433  * Filter outgoing MLD report state by group. 
 435  * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1) 
 436  * and node-local addresses. However, kernel and socket consumers 
 437  * always embed the KAME scope ID in the address provided, so strip it 
 438  * when performing comparison. 
 439  * Note: This is not the same as the *multicast* scope. 
 441  * Return zero if the given group is one for which MLD reports 
 442  * should be suppressed, or non-zero if reports should be issued. 
 444 static __inline__ 
int 
 445 mld_is_addr_reported(const struct in6_addr 
*addr
) 
 448         VERIFY(IN6_IS_ADDR_MULTICAST(addr
)); 
 450         if (IPV6_ADDR_MC_SCOPE(addr
) == IPV6_ADDR_SCOPE_NODELOCAL
) 
 453         if (IPV6_ADDR_MC_SCOPE(addr
) == IPV6_ADDR_SCOPE_LINKLOCAL
) { 
 454                 struct in6_addr tmp 
= *addr
; 
 455                 in6_clearscope(&tmp
); 
 456                 if (IN6_ARE_ADDR_EQUAL(&tmp
, &in6addr_linklocal_allnodes
)) 
 464  * Attach MLD when PF_INET6 is attached to an interface. 
 467 mld_domifattach(struct ifnet 
*ifp
, int how
) 
 469         struct mld_ifinfo 
*mli
; 
 471         MLD_PRINTF(("%s: called for ifp %p(%s%d)\n", 
 472             __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 474         mli 
= mli_alloc(how
); 
 481         mli_initvar(mli
, ifp
, 0); 
 482         mli
->mli_debug 
|= IFD_ATTACHED
; 
 483         MLI_ADDREF_LOCKED(mli
); /* hold a reference for mli_head */ 
 484         MLI_ADDREF_LOCKED(mli
); /* hold a reference for caller */ 
 487         LIST_INSERT_HEAD(&mli_head
, mli
, mli_link
); 
 491         MLD_PRINTF(("allocate mld_ifinfo for ifp %p(%s%d)\n", 
 492              ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 498  * Attach MLD when PF_INET6 is reattached to an interface.  Caller is 
 499  * expected to have an outstanding reference to the mli. 
 502 mld_domifreattach(struct mld_ifinfo 
*mli
) 
 509         VERIFY(!(mli
->mli_debug 
& IFD_ATTACHED
)); 
 512         mli_initvar(mli
, ifp
, 1); 
 513         mli
->mli_debug 
|= IFD_ATTACHED
; 
 514         MLI_ADDREF_LOCKED(mli
); /* hold a reference for mli_head */ 
 517         LIST_INSERT_HEAD(&mli_head
, mli
, mli_link
); 
 521         MLD_PRINTF(("reattached mld_ifinfo for ifp %p(%s%d)\n", 
 522              ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 526  * Hook for domifdetach. 
 529 mld_domifdetach(struct ifnet 
*ifp
) 
 532         MLD_PRINTF(("%s: called for ifp %p(%s%d)\n", 
 533             __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 541  * Called at interface detach time.  Note that we only flush all deferred 
 542  * responses and record releases; all remaining inm records and their source 
 543  * entries related to this interface are left intact, in order to handle 
 547 mli_delete(const struct ifnet 
*ifp
) 
 549         struct mld_ifinfo 
*mli
, *tmli
; 
 551         MLD_LOCK_ASSERT_HELD(); 
 553         LIST_FOREACH_SAFE(mli
, &mli_head
, mli_link
, tmli
) { 
 555                 if (mli
->mli_ifp 
== ifp
) { 
 557                          * Free deferred General Query responses. 
 559                         IF_DRAIN(&mli
->mli_gq
); 
 560                         IF_DRAIN(&mli
->mli_v1q
); 
 562                         VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
)); 
 563                         mli
->mli_debug 
&= ~IFD_ATTACHED
; 
 566                         LIST_REMOVE(mli
, mli_link
); 
 567                         MLI_REMREF(mli
); /* release mli_head reference */ 
 572         panic("%s: mld_ifinfo not found for ifp %p\n", __func__
,  ifp
); 
 576 mli_initvar(struct mld_ifinfo 
*mli
, struct ifnet 
*ifp
, int reattach
) 
 578         MLI_LOCK_ASSERT_HELD(mli
); 
 581         mli
->mli_version 
= MLD_VERSION_2
; 
 583         mli
->mli_rv 
= MLD_RV_INIT
; 
 584         mli
->mli_qi 
= MLD_QI_INIT
; 
 585         mli
->mli_qri 
= MLD_QRI_INIT
; 
 586         mli
->mli_uri 
= MLD_URI_INIT
; 
 588         /* ifnet is not yet attached; no need to hold ifnet lock */ 
 589         if (!(ifp
->if_flags 
& IFF_MULTICAST
)) 
 590                 mli
->mli_flags 
|= MLIF_SILENT
; 
 592                 mli
->mli_flags 
|= MLIF_USEALLOW
; 
 594                 SLIST_INIT(&mli
->mli_relinmhead
); 
 597          * Responses to general queries are subject to bounds. 
 599         mli
->mli_gq
.ifq_maxlen 
= MLD_MAX_RESPONSE_PACKETS
; 
 600         mli
->mli_v1q
.ifq_maxlen 
= MLD_MAX_RESPONSE_PACKETS
; 
 603 static struct mld_ifinfo 
* 
 606         struct mld_ifinfo 
*mli
; 
 608         mli 
= (how 
== M_WAITOK
) ? zalloc(mli_zone
) : zalloc_noblock(mli_zone
); 
 610                 bzero(mli
, mli_size
); 
 611                 lck_mtx_init(&mli
->mli_lock
, mld_mtx_grp
, mld_mtx_attr
); 
 612                 mli
->mli_debug 
|= IFD_ALLOC
; 
 618 mli_free(struct mld_ifinfo 
*mli
) 
 621         if (mli
->mli_debug 
& IFD_ATTACHED
) { 
 622                 panic("%s: attached mli=%p is being freed", __func__
, mli
); 
 624         } else if (mli
->mli_ifp 
!= NULL
) { 
 625                 panic("%s: ifp not NULL for mli=%p", __func__
, mli
); 
 627         } else if (!(mli
->mli_debug 
& IFD_ALLOC
)) { 
 628                 panic("%s: mli %p cannot be freed", __func__
, mli
); 
 630         } else if (mli
->mli_refcnt 
!= 0) { 
 631                 panic("%s: non-zero refcnt mli=%p", __func__
, mli
); 
 634         mli
->mli_debug 
&= ~IFD_ALLOC
; 
 637         lck_mtx_destroy(&mli
->mli_lock
, mld_mtx_grp
); 
 638         zfree(mli_zone
, mli
); 
 642 mli_addref(struct mld_ifinfo 
*mli
, int locked
) 
 647                 MLI_LOCK_ASSERT_HELD(mli
); 
 649         if (++mli
->mli_refcnt 
== 0) { 
 650                 panic("%s: mli=%p wraparound refcnt", __func__
, mli
); 
 658 mli_remref(struct mld_ifinfo 
*mli
) 
 664         if (mli
->mli_refcnt 
== 0) { 
 665                 panic("%s: mli=%p negative refcnt", __func__
, mli
); 
 670         if (mli
->mli_refcnt 
> 0) { 
 677         IF_DRAIN(&mli
->mli_gq
); 
 678         IF_DRAIN(&mli
->mli_v1q
); 
 680         VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
)); 
 683         MLD_PRINTF(("%s: freeing mld_ifinfo for ifp %p(%s%d)\n", 
 684             __func__
, ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 690  * Process a received MLDv1 general or address-specific query. 
 691  * Assumes that the query header has been pulled up to sizeof(mld_hdr). 
 693  * NOTE: Can't be fully const correct as we temporarily embed scope ID in 
 694  * mld_addr. This is OK as we own the mbuf chain. 
 697 mld_v1_input_query(struct ifnet 
*ifp
, const struct ip6_hdr 
*ip6
, 
 698     /*const*/ struct mld_hdr 
*mld
) 
 700         struct mld_ifinfo       
*mli
; 
 701         struct in6_multi        
*inm
; 
 702         int                      is_general_query
; 
 705         is_general_query 
= 0; 
 708                 MLD_PRINTF(("ignore v1 query %s on ifp %p(%s%d)\n", 
 709                     ip6_sprintf(&mld
->mld_addr
), 
 710                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 715          * RFC3810 Section 6.2: MLD queries must originate from 
 716          * a router's link-local address. 
 718         if (!IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_src
)) { 
 719                 MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n", 
 720                     ip6_sprintf(&ip6
->ip6_src
), 
 721                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 726          * Do address field validation upfront before we accept 
 729         if (IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) { 
 731                  * MLDv1 General Query. 
 732                  * If this was not sent to the all-nodes group, ignore it. 
 737                 in6_clearscope(&dst
); 
 738                 if (!IN6_ARE_ADDR_EQUAL(&dst
, &in6addr_linklocal_allnodes
)) 
 740                 is_general_query 
= 1; 
 743                  * Embed scope ID of receiving interface in MLD query for 
 744                  * lookup whilst we don't hold other locks. 
 746                 in6_setscope(&mld
->mld_addr
, ifp
, NULL
); 
 750          * Switch to MLDv1 host compatibility mode. 
 752         mli 
= MLD_IFINFO(ifp
); 
 756         mld_set_version(mli
, MLD_VERSION_1
); 
 759         timer 
= (ntohs(mld
->mld_maxdelay
) * PR_SLOWHZ
) / MLD_TIMER_SCALE
; 
 763         if (is_general_query
) { 
 764                 struct in6_multistep step
; 
 766                 MLD_PRINTF(("process v1 general query on ifp %p(%s%d)\n", 
 767                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 769                  * For each reporting group joined on this 
 770                  * interface, kick the report timer. 
 772                 in6_multihead_lock_shared(); 
 773                 IN6_FIRST_MULTI(step
, inm
); 
 774                 while (inm 
!= NULL
) { 
 776                         if (inm
->in6m_ifp 
== ifp
) 
 777                                 mld_v1_update_group(inm
, timer
); 
 779                         IN6_NEXT_MULTI(step
, inm
); 
 781                 in6_multihead_lock_done(); 
 784                  * MLDv1 Group-Specific Query. 
 785                  * If this is a group-specific MLDv1 query, we need only 
 786                  * look up the single group to process it. 
 788                 in6_multihead_lock_shared(); 
 789                 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
); 
 790                 in6_multihead_lock_done(); 
 794                         MLD_PRINTF(("process v1 query %s on ifp %p(%s%d)\n", 
 795                             ip6_sprintf(&mld
->mld_addr
), 
 796                             ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 797                         mld_v1_update_group(inm
, timer
); 
 799                         IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */ 
 801                 /* XXX Clear embedded scope ID as userland won't expect it. */ 
 802                 in6_clearscope(&mld
->mld_addr
); 
 809  * Update the report timer on a group in response to an MLDv1 query. 
 811  * If we are becoming the reporting member for this group, start the timer. 
 812  * If we already are the reporting member for this group, and timer is 
 813  * below the threshold, reset it. 
 815  * We may be updating the group for the first time since we switched 
 816  * to MLDv2. If we are, then we must clear any recorded source lists, 
 817  * and transition to REPORTING state; the group timer is overloaded 
 818  * for group and group-source query responses.  
 820  * Unlike MLDv2, the delay per group should be jittered 
 821  * to avoid bursts of MLDv1 reports. 
 824 mld_v1_update_group(struct in6_multi 
*inm
, const int timer
) 
 826         IN6M_LOCK_ASSERT_HELD(inm
); 
 828         MLD_PRINTF(("%s: %s/%s%d timer=%d\n", __func__
, 
 829             ip6_sprintf(&inm
->in6m_addr
), 
 830             inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
, timer
)); 
 832         switch (inm
->in6m_state
) { 
 834         case MLD_SILENT_MEMBER
: 
 836         case MLD_REPORTING_MEMBER
: 
 837                 if (inm
->in6m_timer 
!= 0 && 
 838                     inm
->in6m_timer 
<= timer
) { 
 839                         MLD_PRINTF(("%s: REPORTING and timer running, " 
 840                             "skipping.\n", __func__
)); 
 844         case MLD_SG_QUERY_PENDING_MEMBER
: 
 845         case MLD_G_QUERY_PENDING_MEMBER
: 
 846         case MLD_IDLE_MEMBER
: 
 847         case MLD_LAZY_MEMBER
: 
 848         case MLD_AWAKENING_MEMBER
: 
 849                 MLD_PRINTF(("%s: ->REPORTING\n", __func__
)); 
 850                 inm
->in6m_state 
= MLD_REPORTING_MEMBER
; 
 851                 inm
->in6m_timer 
= MLD_RANDOM_DELAY(timer
); 
 852                 current_state_timers_running6 
= 1; 
 854         case MLD_SLEEPING_MEMBER
: 
 855                 MLD_PRINTF(("%s: ->AWAKENING\n", __func__
)); 
 856                 inm
->in6m_state 
= MLD_AWAKENING_MEMBER
; 
 858         case MLD_LEAVING_MEMBER
: 
 864  * Process a received MLDv2 general, group-specific or 
 865  * group-and-source-specific query. 
 867  * Assumes that the query header has been pulled up to sizeof(mldv2_query). 
 869  * Return 0 if successful, otherwise an appropriate error code is returned. 
 872 mld_v2_input_query(struct ifnet 
*ifp
, const struct ip6_hdr 
*ip6
, 
 873     struct mbuf 
*m
, const int off
, const int icmp6len
) 
 875         struct mld_ifinfo       
*mli
; 
 876         struct mldv2_query      
*mld
; 
 877         struct in6_multi        
*inm
; 
 878         uint32_t                 maxdelay
, nsrc
, qqi
; 
 879         int                      is_general_query
; 
 883         is_general_query 
= 0; 
 886          * RFC3810 Section 6.2: MLD queries must originate from 
 887          * a router's link-local address. 
 889         if (!IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_src
)) { 
 890                 MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n", 
 891                     ip6_sprintf(&ip6
->ip6_src
), 
 892                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 896         MLD_PRINTF(("input v2 query on ifp %p(%s%d)\n", ifp
, ifp
->if_name
, 
 899         mld 
= (struct mldv2_query 
*)(mtod(m
, uint8_t *) + off
); 
 901         maxdelay 
= ntohs(mld
->mld_maxdelay
);    /* in 1/10ths of a second */ 
 902         if (maxdelay 
>= 32678) { 
 903                 maxdelay 
= (MLD_MRC_MANT(maxdelay
) | 0x1000) << 
 904                            (MLD_MRC_EXP(maxdelay
) + 3); 
 906         timer 
= (maxdelay 
* PR_SLOWHZ
) / MLD_TIMER_SCALE
; 
 910         qrv 
= MLD_QRV(mld
->mld_misc
); 
 912                 MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__
, 
 919                 qqi 
= MLD_QQIC_MANT(mld
->mld_qqi
) << 
 920                      (MLD_QQIC_EXP(mld
->mld_qqi
) + 3); 
 923         nsrc 
= ntohs(mld
->mld_numsrc
); 
 924         if (nsrc 
> MLD_MAX_GS_SOURCES
) 
 926         if (icmp6len 
< sizeof(struct mldv2_query
) + 
 927             (nsrc 
* sizeof(struct in6_addr
))) 
 931          * Do further input validation upfront to avoid resetting timers 
 932          * should we need to discard this query. 
 934         if (IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) { 
 936                  * General Queries SHOULD be directed to ff02::1. 
 937                  * A general query with a source list has undefined 
 938                  * behaviour; discard it. 
 943                 in6_clearscope(&dst
); 
 944                 if (!IN6_ARE_ADDR_EQUAL(&dst
, &in6addr_linklocal_allnodes
) || 
 947                 is_general_query 
= 1; 
 950                  * Embed scope ID of receiving interface in MLD query for 
 951                  * lookup whilst we don't hold other locks (due to KAME 
 952                  * locking lameness). We own this mbuf chain just now. 
 954                 in6_setscope(&mld
->mld_addr
, ifp
, NULL
); 
 957         mli 
= MLD_IFINFO(ifp
); 
 962          * Discard the v2 query if we're in Compatibility Mode. 
 963          * The RFC is pretty clear that hosts need to stay in MLDv1 mode 
 964          * until the Old Version Querier Present timer expires. 
 966         if (mli
->mli_version 
!= MLD_VERSION_2
) { 
 971         mld_set_version(mli
, MLD_VERSION_2
); 
 974         mli
->mli_qri 
= maxdelay
; 
 976         MLD_PRINTF(("%s: qrv %d qi %d maxdelay %d\n", __func__
, qrv
, qqi
, 
 979         if (is_general_query
) { 
 981                  * MLDv2 General Query. 
 983                  * Schedule a current-state report on this ifp for 
 984                  * all groups, possibly containing source lists. 
 986                  * If there is a pending General Query response 
 987                  * scheduled earlier than the selected delay, do 
 988                  * not schedule any other reports. 
 989                  * Otherwise, reset the interface timer. 
 991                 MLD_PRINTF(("process v2 general query on ifp %p(%s%d)\n", 
 992                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
 993                 if (mli
->mli_v2_timer 
== 0 || mli
->mli_v2_timer 
>= timer
) { 
 994                         mli
->mli_v2_timer 
= MLD_RANDOM_DELAY(timer
); 
 995                         interface_timers_running6 
= 1; 
1001                  * MLDv2 Group-specific or Group-and-source-specific Query. 
1003                  * Group-source-specific queries are throttled on 
1004                  * a per-group basis to defeat denial-of-service attempts. 
1005                  * Queries for groups we are not a member of on this 
1006                  * link are simply ignored. 
1008                 in6_multihead_lock_shared(); 
1009                 IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
); 
1010                 in6_multihead_lock_done(); 
1016                 /* TODO: need ratecheck equivalent */ 
1018                         if (!ratecheck(&inm
->in6m_lastgsrtv
, 
1020                                 MLD_PRINTF(("%s: GS query throttled.\n", 
1023                                 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */ 
1028                 MLD_PRINTF(("process v2 group query on ifp %p(%s%d)\n", 
1029                      ifp
, ifp
->if_name
, ifp
->if_unit
)); 
1031                  * If there is a pending General Query response 
1032                  * scheduled sooner than the selected delay, no 
1033                  * further report need be scheduled. 
1034                  * Otherwise, prepare to respond to the 
1035                  * group-specific or group-and-source query. 
1038                 if (mli
->mli_v2_timer 
== 0 || mli
->mli_v2_timer 
>= timer
) { 
1040                         mld_v2_process_group_query(inm
, timer
, m
, off
); 
1045                 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */ 
1046                 /* XXX Clear embedded scope ID as userland won't expect it. */ 
1047                 in6_clearscope(&mld
->mld_addr
); 
1054  * Process a recieved MLDv2 group-specific or group-and-source-specific 
1056  * Return <0 if any error occured. Currently this is ignored. 
1059 mld_v2_process_group_query(struct in6_multi 
*inm
, int timer
, struct mbuf 
*m0
, 
1062         struct mldv2_query      
*mld
; 
1066         IN6M_LOCK_ASSERT_HELD(inm
); 
1069         mld 
= (struct mldv2_query 
*)(mtod(m0
, uint8_t *) + off
); 
1071         switch (inm
->in6m_state
) { 
1072         case MLD_NOT_MEMBER
: 
1073         case MLD_SILENT_MEMBER
: 
1074         case MLD_SLEEPING_MEMBER
: 
1075         case MLD_LAZY_MEMBER
: 
1076         case MLD_AWAKENING_MEMBER
: 
1077         case MLD_IDLE_MEMBER
: 
1078         case MLD_LEAVING_MEMBER
: 
1081         case MLD_REPORTING_MEMBER
: 
1082         case MLD_G_QUERY_PENDING_MEMBER
: 
1083         case MLD_SG_QUERY_PENDING_MEMBER
: 
1087         nsrc 
= ntohs(mld
->mld_numsrc
); 
1090          * Deal with group-specific queries upfront. 
1091          * If any group query is already pending, purge any recorded 
1092          * source-list state if it exists, and schedule a query response 
1093          * for this group-specific query. 
1096                 if (inm
->in6m_state 
== MLD_G_QUERY_PENDING_MEMBER 
|| 
1097                     inm
->in6m_state 
== MLD_SG_QUERY_PENDING_MEMBER
) { 
1098                         in6m_clear_recorded(inm
); 
1099                         timer 
= min(inm
->in6m_timer
, timer
); 
1101                 inm
->in6m_state 
= MLD_G_QUERY_PENDING_MEMBER
; 
1102                 inm
->in6m_timer 
= MLD_RANDOM_DELAY(timer
); 
1103                 current_state_timers_running6 
= 1; 
1108          * Deal with the case where a group-and-source-specific query has 
1109          * been received but a group-specific query is already pending. 
1111         if (inm
->in6m_state 
== MLD_G_QUERY_PENDING_MEMBER
) { 
1112                 timer 
= min(inm
->in6m_timer
, timer
); 
1113                 inm
->in6m_timer 
= MLD_RANDOM_DELAY(timer
); 
1114                 current_state_timers_running6 
= 1; 
1119          * Finally, deal with the case where a group-and-source-specific 
1120          * query has been received, where a response to a previous g-s-r 
1121          * query exists, or none exists. 
1122          * In this case, we need to parse the source-list which the Querier 
1123          * has provided us with and check if we have any source list filter 
1124          * entries at T1 for these sources. If we do not, there is no need 
1125          * schedule a report and the query may be dropped. 
1126          * If we do, we must record them and schedule a current-state 
1127          * report for those sources. 
1129         if (inm
->in6m_nsrc 
> 0) { 
1136                 soff 
= off 
+ sizeof(struct mldv2_query
); 
1138                 for (i 
= 0; i 
< nsrc
; i
++) { 
1139                         sp 
= mtod(m
, uint8_t *) + soff
; 
1140                         retval 
= in6m_record_source(inm
, 
1141                             (const struct in6_addr 
*)sp
); 
1144                         nrecorded 
+= retval
; 
1145                         soff 
+= sizeof(struct in6_addr
); 
1146                         if (soff 
>= m
->m_len
) { 
1147                                 soff 
= soff 
- m
->m_len
; 
1153                 if (nrecorded 
> 0) { 
1154                         MLD_PRINTF(( "%s: schedule response to SG query\n", 
1156                         inm
->in6m_state 
= MLD_SG_QUERY_PENDING_MEMBER
; 
1157                         inm
->in6m_timer 
= MLD_RANDOM_DELAY(timer
); 
1158                         current_state_timers_running6 
= 1; 
1166  * Process a received MLDv1 host membership report. 
1167  * Assumes mld points to mld_hdr in pulled up mbuf chain. 
1169  * NOTE: Can't be fully const correct as we temporarily embed scope ID in 
1170  * mld_addr. This is OK as we own the mbuf chain. 
1173 mld_v1_input_report(struct ifnet 
*ifp
, const struct ip6_hdr 
*ip6
, 
1174     /*const*/ struct mld_hdr 
*mld
) 
1176         struct in6_addr          src
, dst
; 
1177         struct in6_ifaddr       
*ia
; 
1178         struct in6_multi        
*inm
; 
1180         if (!mld_v1enable
) { 
1181                 MLD_PRINTF(("ignore v1 report %s on ifp %p(%s%d)\n", 
1182                     ip6_sprintf(&mld
->mld_addr
), 
1183                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
1187         if (ifp
->if_flags 
& IFF_LOOPBACK
) 
1191          * MLDv1 reports must originate from a host's link-local address, 
1192          * or the unspecified address (when booting). 
1195         in6_clearscope(&src
); 
1196         if (!IN6_IS_SCOPE_LINKLOCAL(&src
) && !IN6_IS_ADDR_UNSPECIFIED(&src
)) { 
1197                 MLD_PRINTF(("ignore v1 query src %s on ifp %p(%s%d)\n", 
1198                     ip6_sprintf(&ip6
->ip6_src
), 
1199                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
1204          * RFC2710 Section 4: MLDv1 reports must pertain to a multicast 
1205          * group, and must be directed to the group itself. 
1208         in6_clearscope(&dst
); 
1209         if (!IN6_IS_ADDR_MULTICAST(&mld
->mld_addr
) || 
1210             !IN6_ARE_ADDR_EQUAL(&mld
->mld_addr
, &dst
)) { 
1211                 MLD_PRINTF(("ignore v1 query dst %s on ifp %p(%s%d)\n", 
1212                     ip6_sprintf(&ip6
->ip6_dst
), 
1213                     ifp
, ifp
->if_name
, ifp
->if_unit
)); 
1218          * Make sure we don't hear our own membership report, as fast 
1219          * leave requires knowing that we are the only member of a 
1220          * group. Assume we used the link-local address if available, 
1221          * otherwise look for ::. 
1223          * XXX Note that scope ID comparison is needed for the address 
1224          * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be 
1225          * performed for the on-wire address. 
1227         ia 
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
|IN6_IFF_ANYCAST
); 
1229                 IFA_LOCK(&ia
->ia_ifa
); 
1230                 if ((IN6_ARE_ADDR_EQUAL(&ip6
->ip6_src
, IA6_IN6(ia
)))){ 
1231                         IFA_UNLOCK(&ia
->ia_ifa
); 
1232                         IFA_REMREF(&ia
->ia_ifa
); 
1235                 IFA_UNLOCK(&ia
->ia_ifa
); 
1236                 IFA_REMREF(&ia
->ia_ifa
); 
1237         } else if (IN6_IS_ADDR_UNSPECIFIED(&src
)) { 
1241         MLD_PRINTF(("process v1 report %s on ifp %p(%s%d)\n", 
1242             ip6_sprintf(&mld
->mld_addr
), ifp
, ifp
->if_name
, ifp
->if_unit
)); 
1245          * Embed scope ID of receiving interface in MLD query for lookup 
1246          * whilst we don't hold other locks (due to KAME locking lameness). 
1248         if (!IN6_IS_ADDR_UNSPECIFIED(&mld
->mld_addr
)) 
1249                 in6_setscope(&mld
->mld_addr
, ifp
, NULL
); 
1252          * MLDv1 report suppression. 
1253          * If we are a member of this group, and our membership should be 
1254          * reported, and our group timer is pending or about to be reset, 
1255          * stop our group timer by transitioning to the 'lazy' state. 
1257         in6_multihead_lock_shared(); 
1258         IN6_LOOKUP_MULTI(&mld
->mld_addr
, ifp
, inm
); 
1259         in6_multihead_lock_done(); 
1262                 struct mld_ifinfo 
*mli
; 
1265                 mli 
= inm
->in6m_mli
; 
1266                 VERIFY(mli 
!= NULL
); 
1270                  * If we are in MLDv2 host mode, do not allow the 
1271                  * other host's MLDv1 report to suppress our reports. 
1273                 if (mli
->mli_version 
== MLD_VERSION_2
) { 
1276                         IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */ 
1281                 inm
->in6m_timer 
= 0; 
1283                 switch (inm
->in6m_state
) { 
1284                 case MLD_NOT_MEMBER
: 
1285                 case MLD_SILENT_MEMBER
: 
1286                 case MLD_SLEEPING_MEMBER
: 
1288                 case MLD_REPORTING_MEMBER
: 
1289                 case MLD_IDLE_MEMBER
: 
1290                 case MLD_AWAKENING_MEMBER
: 
1291                         MLD_PRINTF(("report suppressed for %s on ifp %p(%s%d)\n", 
1292                             ip6_sprintf(&mld
->mld_addr
), 
1293                             ifp
, ifp
->if_name
, ifp
->if_unit
)); 
1294                 case MLD_LAZY_MEMBER
: 
1295                         inm
->in6m_state 
= MLD_LAZY_MEMBER
; 
1297                 case MLD_G_QUERY_PENDING_MEMBER
: 
1298                 case MLD_SG_QUERY_PENDING_MEMBER
: 
1299                 case MLD_LEAVING_MEMBER
: 
1303                 IN6M_REMREF(inm
); /* from IN6_LOOKUP_MULTI */ 
1307         /* XXX Clear embedded scope ID as userland won't expect it. */ 
1308         in6_clearscope(&mld
->mld_addr
); 
1316  * Assume query messages which fit in a single ICMPv6 message header 
1317  * have been pulled up. 
1318  * Assume that userland will want to see the message, even if it 
1319  * otherwise fails kernel input validation; do not free it. 
1320  * Pullup may however free the mbuf chain m if it fails. 
1322  * Return IPPROTO_DONE if we freed m. Otherwise, return 0. 
1325 mld_input(struct mbuf 
*m
, int off
, int icmp6len
) 
1328         struct ip6_hdr  
*ip6
; 
1329         struct mld_hdr  
*mld
; 
1332         MLD_PRINTF(("%s: called w/mbuf (%p,%d)\n", __func__
, m
, off
)); 
1334         ifp 
= m
->m_pkthdr
.rcvif
; 
1336         ip6 
= mtod(m
, struct ip6_hdr 
*); 
1338         /* Pullup to appropriate size. */ 
1339         mld 
= (struct mld_hdr 
*)(mtod(m
, uint8_t *) + off
); 
1340         if (mld
->mld_type 
== MLD_LISTENER_QUERY 
&& 
1341             icmp6len 
>= sizeof(struct mldv2_query
)) { 
1342                 mldlen 
= sizeof(struct mldv2_query
); 
1344                 mldlen 
= sizeof(struct mld_hdr
); 
1346         IP6_EXTHDR_GET(mld
, struct mld_hdr 
*, m
, off
, mldlen
); 
1348                 icmp6stat
.icp6s_badlen
++; 
1349                 return (IPPROTO_DONE
); 
1353          * Userland needs to see all of this traffic for implementing 
1354          * the endpoint discovery portion of multicast routing. 
1356         switch (mld
->mld_type
) { 
1357         case MLD_LISTENER_QUERY
: 
1358                 icmp6_ifstat_inc(ifp
, ifs6_in_mldquery
); 
1359                 if (icmp6len 
== sizeof(struct mld_hdr
)) { 
1360                         if (mld_v1_input_query(ifp
, ip6
, mld
) != 0) 
1362                 } else if (icmp6len 
>= sizeof(struct mldv2_query
)) { 
1363                         if (mld_v2_input_query(ifp
, ip6
, m
, off
, 
1368         case MLD_LISTENER_REPORT
: 
1369                 icmp6_ifstat_inc(ifp
, ifs6_in_mldreport
); 
1370                 if (mld_v1_input_report(ifp
, ip6
, mld
) != 0) 
1373         case MLDV2_LISTENER_REPORT
: 
1374                 icmp6_ifstat_inc(ifp
, ifs6_in_mldreport
); 
1376         case MLD_LISTENER_DONE
: 
1377                 icmp6_ifstat_inc(ifp
, ifs6_in_mlddone
); 
1387  * MLD6 slowtimo handler. 
1388  * Combiles both the slow and fast timer into one. We loose some responsivness but 
1389  * allows the system to avoid having a pr_fasttimo, thus allowing for power savings. 
1394         struct ifqueue           scq
;   /* State-change packets */ 
1395         struct ifqueue           qrq
;   /* Query response packets */ 
1397         struct mld_ifinfo       
*mli
; 
1398         struct in6_multi        
*inm
; 
1403         LIST_FOREACH(mli
, &mli_head
, mli_link
) { 
1405                 mld_v1_process_querier_timers(mli
); 
1410          * Quick check to see if any work needs to be done, in order to 
1411          * minimize the overhead of fasttimo processing. 
1413         if (!current_state_timers_running6 
&& 
1414             !interface_timers_running6 
&& 
1415             !state_change_timers_running6
) { 
1421          * MLDv2 General Query response timer processing. 
1423         if (interface_timers_running6
) { 
1425                 MLD_PRINTF(("%s: interface timers running\n", __func__
)); 
1427                 interface_timers_running6 
= 0; 
1428                 LIST_FOREACH(mli
, &mli_head
, mli_link
) { 
1430                         if (mli
->mli_v2_timer 
== 0) { 
1432                         } else if (--mli
->mli_v2_timer 
== 0) { 
1433                                 mld_v2_dispatch_general_query(mli
); 
1435                                 interface_timers_running6 
= 1; 
1441         if (!current_state_timers_running6 
&& 
1442             !state_change_timers_running6
) 
1445         current_state_timers_running6 
= 0; 
1446         state_change_timers_running6 
= 0; 
1448         MLD_PRINTF(("%s: state change timers running\n", __func__
)); 
1451         memset(&qrq
, 0, sizeof(struct ifqueue
)); 
1452         qrq
.ifq_maxlen 
= MLD_MAX_G_GS_PACKETS
; 
1454         memset(&scq
, 0, sizeof(struct ifqueue
)); 
1455         scq
.ifq_maxlen 
= MLD_MAX_STATE_CHANGE_PACKETS
; 
1458          * MLD host report and state-change timer processing. 
1459          * Note: Processing a v2 group timer may remove a node. 
1461         LIST_FOREACH(mli
, &mli_head
, mli_link
) { 
1462                 struct in6_multistep step
; 
1466                 uri_fasthz 
= MLD_RANDOM_DELAY(mli
->mli_uri 
* PR_SLOWHZ
); 
1469                 in6_multihead_lock_shared(); 
1470                 IN6_FIRST_MULTI(step
, inm
); 
1471                 while (inm 
!= NULL
) { 
1473                         if (inm
->in6m_ifp 
!= ifp
) 
1477                         switch (mli
->mli_version
) { 
1479                                 mld_v1_process_group_timer(inm
, 
1483                                 mld_v2_process_group_timers(mli
, &qrq
, 
1484                                     &scq
, inm
, uri_fasthz
); 
1490                         IN6_NEXT_MULTI(step
, inm
); 
1492                 in6_multihead_lock_done(); 
1495                 if (mli
->mli_version 
== MLD_VERSION_1
) { 
1496                         mld_dispatch_queue(mli
, &mli
->mli_v1q
, 0); 
1497                 } else if (mli
->mli_version 
== MLD_VERSION_2
) { 
1499                         mld_dispatch_queue(NULL
, &qrq
, 0); 
1500                         mld_dispatch_queue(NULL
, &scq
, 0); 
1501                         VERIFY(qrq
.ifq_len 
== 0); 
1502                         VERIFY(scq
.ifq_len 
== 0); 
1506                  * In case there are still any pending membership reports 
1507                  * which didn't get drained at version change time. 
1509                 IF_DRAIN(&mli
->mli_v1q
); 
1511                  * Release all deferred inm records, and drain any locally 
1512                  * enqueued packets; do it even if the current MLD version 
1513                  * for the link is no longer MLDv2, in order to handle the 
1514                  * version change case. 
1516                 mld_flush_relq(mli
); 
1517                 VERIFY(SLIST_EMPTY(&mli
->mli_relinmhead
)); 
1529  * Free the in6_multi reference(s) for this MLD lifecycle. 
1531  * Caller must be holding mli_lock. 
1534 mld_flush_relq(struct mld_ifinfo 
*mli
) 
1536         struct in6_multi 
*inm
; 
1539         MLI_LOCK_ASSERT_HELD(mli
); 
1540         inm 
= SLIST_FIRST(&mli
->mli_relinmhead
); 
1544                 SLIST_REMOVE_HEAD(&mli
->mli_relinmhead
, in6m_nrele
); 
1547                 in6_multihead_lock_exclusive(); 
1549                 VERIFY(inm
->in6m_nrelecnt 
!= 0); 
1550                 inm
->in6m_nrelecnt
--; 
1551                 lastref 
= in6_multi_detach(inm
); 
1552                 VERIFY(!lastref 
|| (!(inm
->in6m_debug 
& IFD_ATTACHED
) && 
1553                     inm
->in6m_reqcnt 
== 0)); 
1555                 in6_multihead_lock_done(); 
1556                 /* from mli_relinmhead */ 
1558                 /* from in6_multihead_list */ 
1568  * Update host report group timer. 
1569  * Will update the global pending timer flags. 
1572 mld_v1_process_group_timer(struct in6_multi 
*inm
, const int mld_version
) 
1574 #pragma unused(mld_version) 
1575         int report_timer_expired
; 
1577         IN6M_LOCK_ASSERT_HELD(inm
); 
1578         MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
); 
1580         if (inm
->in6m_timer 
== 0) { 
1581                 report_timer_expired 
= 0; 
1582         } else if (--inm
->in6m_timer 
== 0) { 
1583                 report_timer_expired 
= 1; 
1585                 current_state_timers_running6 
= 1; 
1589         switch (inm
->in6m_state
) { 
1590         case MLD_NOT_MEMBER
: 
1591         case MLD_SILENT_MEMBER
: 
1592         case MLD_IDLE_MEMBER
: 
1593         case MLD_LAZY_MEMBER
: 
1594         case MLD_SLEEPING_MEMBER
: 
1595         case MLD_AWAKENING_MEMBER
: 
1597         case MLD_REPORTING_MEMBER
: 
1598                 if (report_timer_expired
) { 
1599                         inm
->in6m_state 
= MLD_IDLE_MEMBER
; 
1600                         (void) mld_v1_transmit_report(inm
, 
1601                              MLD_LISTENER_REPORT
); 
1602                         IN6M_LOCK_ASSERT_HELD(inm
); 
1603                         MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
); 
1606         case MLD_G_QUERY_PENDING_MEMBER
: 
1607         case MLD_SG_QUERY_PENDING_MEMBER
: 
1608         case MLD_LEAVING_MEMBER
: 
1614  * Update a group's timers for MLDv2. 
1615  * Will update the global pending timer flags. 
1616  * Note: Unlocked read from mli. 
1619 mld_v2_process_group_timers(struct mld_ifinfo 
*mli
, 
1620     struct ifqueue 
*qrq
, struct ifqueue 
*scq
, 
1621     struct in6_multi 
*inm
, const int uri_fasthz
) 
1623         int query_response_timer_expired
; 
1624         int state_change_retransmit_timer_expired
; 
1626         IN6M_LOCK_ASSERT_HELD(inm
); 
1627         MLI_LOCK_ASSERT_HELD(mli
); 
1628         VERIFY(mli 
== inm
->in6m_mli
); 
1630         query_response_timer_expired 
= 0; 
1631         state_change_retransmit_timer_expired 
= 0; 
1634          * During a transition from compatibility mode back to MLDv2, 
1635          * a group record in REPORTING state may still have its group 
1636          * timer active. This is a no-op in this function; it is easier 
1637          * to deal with it here than to complicate the slow-timeout path. 
1639         if (inm
->in6m_timer 
== 0) { 
1640                 query_response_timer_expired 
= 0; 
1641         } else if (--inm
->in6m_timer 
== 0) { 
1642                 query_response_timer_expired 
= 1; 
1644                 current_state_timers_running6 
= 1; 
1647         if (inm
->in6m_sctimer 
== 0) { 
1648                 state_change_retransmit_timer_expired 
= 0; 
1649         } else if (--inm
->in6m_sctimer 
== 0) { 
1650                 state_change_retransmit_timer_expired 
= 1; 
1652                 state_change_timers_running6 
= 1; 
1655         /* We are in fasttimo, so be quick about it. */ 
1656         if (!state_change_retransmit_timer_expired 
&& 
1657             !query_response_timer_expired
) 
1660         switch (inm
->in6m_state
) { 
1661         case MLD_NOT_MEMBER
: 
1662         case MLD_SILENT_MEMBER
: 
1663         case MLD_SLEEPING_MEMBER
: 
1664         case MLD_LAZY_MEMBER
: 
1665         case MLD_AWAKENING_MEMBER
: 
1666         case MLD_IDLE_MEMBER
: 
1668         case MLD_G_QUERY_PENDING_MEMBER
: 
1669         case MLD_SG_QUERY_PENDING_MEMBER
: 
1671                  * Respond to a previously pending Group-Specific 
1672                  * or Group-and-Source-Specific query by enqueueing 
1673                  * the appropriate Current-State report for 
1674                  * immediate transmission. 
1676                 if (query_response_timer_expired
) { 
1679                         retval 
= mld_v2_enqueue_group_record(qrq
, inm
, 0, 1, 
1680                             (inm
->in6m_state 
== MLD_SG_QUERY_PENDING_MEMBER
), 
1682                         MLD_PRINTF(("%s: enqueue record = %d\n", 
1684                         inm
->in6m_state 
= MLD_REPORTING_MEMBER
; 
1685                         in6m_clear_recorded(inm
); 
1688         case MLD_REPORTING_MEMBER
: 
1689         case MLD_LEAVING_MEMBER
: 
1690                 if (state_change_retransmit_timer_expired
) { 
1692                          * State-change retransmission timer fired. 
1693                          * If there are any further pending retransmissions, 
1694                          * set the global pending state-change flag, and 
1697                         if (--inm
->in6m_scrv 
> 0) { 
1698                                 inm
->in6m_sctimer 
= uri_fasthz
; 
1699                                 state_change_timers_running6 
= 1; 
1702                          * Retransmit the previously computed state-change 
1703                          * report. If there are no further pending 
1704                          * retransmissions, the mbuf queue will be consumed. 
1705                          * Update T0 state to T1 as we have now sent 
1708                         (void) mld_v2_merge_state_changes(inm
, scq
); 
1711                         MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
, 
1712                             ip6_sprintf(&inm
->in6m_addr
), 
1713                             inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
1716                          * If we are leaving the group for good, make sure 
1717                          * we release MLD's reference to it. 
1718                          * This release must be deferred using a SLIST, 
1719                          * as we are called from a loop which traverses 
1720                          * the in_ifmultiaddr TAILQ. 
1722                         if (inm
->in6m_state 
== MLD_LEAVING_MEMBER 
&& 
1723                             inm
->in6m_scrv 
== 0) { 
1724                                 inm
->in6m_state 
= MLD_NOT_MEMBER
; 
1726                                  * A reference has already been held in 
1727                                  * mld_final_leave() for this inm, so 
1728                                  * no need to hold another one.  We also 
1729                                  * bumped up its request count then, so 
1730                                  * that it stays in in6_multihead.  Both 
1731                                  * of them will be released when it is 
1732                                  * dequeued later on. 
1734                                 VERIFY(inm
->in6m_nrelecnt 
!= 0); 
1735                                 SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, 
1744  * Switch to a different version on the given interface, 
1745  * as per Section 9.12. 
1748 mld_set_version(struct mld_ifinfo 
*mli
, const int mld_version
) 
1750         int old_version_timer
; 
1752         MLI_LOCK_ASSERT_HELD(mli
); 
1754         MLD_PRINTF(("%s: switching to v%d on ifp %p(%s%d)\n", __func__
, 
1755             mld_version
, mli
->mli_ifp
, mli
->mli_ifp
->if_name
, 
1756             mli
->mli_ifp
->if_unit
)); 
1758         if (mld_version 
== MLD_VERSION_1
) { 
1760                  * Compute the "Older Version Querier Present" timer as per 
1763                 old_version_timer 
= (mli
->mli_rv 
* mli
->mli_qi
) + mli
->mli_qri
; 
1764                 old_version_timer 
*= PR_SLOWHZ
; 
1765                 mli
->mli_v1_timer 
= old_version_timer
; 
1768         if (mli
->mli_v1_timer 
> 0 && mli
->mli_version 
!= MLD_VERSION_1
) { 
1769                 mli
->mli_version 
= MLD_VERSION_1
; 
1770                 mld_v2_cancel_link_timers(mli
); 
1773         MLI_LOCK_ASSERT_HELD(mli
); 
1777  * Cancel pending MLDv2 timers for the given link and all groups 
1778  * joined on it; state-change, general-query, and group-query timers. 
1781 mld_v2_cancel_link_timers(struct mld_ifinfo 
*mli
) 
1784         struct in6_multi        
*inm
; 
1785         struct in6_multistep    step
; 
1787         MLI_LOCK_ASSERT_HELD(mli
); 
1789         MLD_PRINTF(("%s: cancel v2 timers on ifp %p(%s%d)\n", __func__
, 
1790             mli
->mli_ifp
, mli
->mli_ifp
->if_name
, mli
->mli_ifp
->if_unit
)); 
1793          * Fast-track this potentially expensive operation 
1794          * by checking all the global 'timer pending' flags. 
1796         if (!interface_timers_running6 
&& 
1797             !state_change_timers_running6 
&& 
1798             !current_state_timers_running6
) 
1801         mli
->mli_v2_timer 
= 0; 
1805         in6_multihead_lock_shared(); 
1806         IN6_FIRST_MULTI(step
, inm
); 
1807         while (inm 
!= NULL
) { 
1809                 if (inm
->in6m_ifp 
!= ifp
) 
1812                 switch (inm
->in6m_state
) { 
1813                 case MLD_NOT_MEMBER
: 
1814                 case MLD_SILENT_MEMBER
: 
1815                 case MLD_IDLE_MEMBER
: 
1816                 case MLD_LAZY_MEMBER
: 
1817                 case MLD_SLEEPING_MEMBER
: 
1818                 case MLD_AWAKENING_MEMBER
: 
1820                 case MLD_LEAVING_MEMBER
: 
1822                          * If we are leaving the group and switching 
1823                          * version, we need to release the final 
1824                          * reference held for issuing the INCLUDE {}. 
1825                          * During mld_final_leave(), we bumped up both the 
1826                          * request and reference counts.  Since we cannot 
1827                          * call in6_multi_detach() here, defer this task to 
1828                          * the timer routine. 
1830                         VERIFY(inm
->in6m_nrelecnt 
!= 0); 
1832                         SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, inm
, 
1836                 case MLD_G_QUERY_PENDING_MEMBER
: 
1837                 case MLD_SG_QUERY_PENDING_MEMBER
: 
1838                         in6m_clear_recorded(inm
); 
1840                 case MLD_REPORTING_MEMBER
: 
1841                         inm
->in6m_sctimer 
= 0; 
1842                         inm
->in6m_timer 
= 0; 
1843                         inm
->in6m_state 
= MLD_REPORTING_MEMBER
; 
1845                          * Free any pending MLDv2 state-change records. 
1847                         IF_DRAIN(&inm
->in6m_scq
); 
1852                 IN6_NEXT_MULTI(step
, inm
); 
1854         in6_multihead_lock_done(); 
1860  * Update the Older Version Querier Present timers for a link. 
1861  * See Section 9.12 of RFC 3810. 
1864 mld_v1_process_querier_timers(struct mld_ifinfo 
*mli
) 
1866         MLI_LOCK_ASSERT_HELD(mli
); 
1868         if (mli
->mli_version 
!= MLD_VERSION_2 
&& --mli
->mli_v1_timer 
== 0) { 
1870                  * MLDv1 Querier Present timer expired; revert to MLDv2. 
1872                 MLD_PRINTF(("%s: transition from v%d -> v%d on %p(%s%d)\n", 
1873                     __func__
, mli
->mli_version
, MLD_VERSION_2
, 
1874                     mli
->mli_ifp
, mli
->mli_ifp
->if_name
, mli
->mli_ifp
->if_unit
)); 
1875                 mli
->mli_version 
= MLD_VERSION_2
; 
1880  * Transmit an MLDv1 report immediately. 
1883 mld_v1_transmit_report(struct in6_multi 
*in6m
, const int type
) 
1886         struct in6_ifaddr       
*ia
; 
1887         struct ip6_hdr          
*ip6
; 
1888         struct mbuf             
*mh
, *md
; 
1889         struct mld_hdr          
*mld
; 
1892         IN6M_LOCK_ASSERT_HELD(in6m
); 
1893         MLI_LOCK_ASSERT_HELD(in6m
->in6m_mli
); 
1895         ifp 
= in6m
->in6m_ifp
; 
1896         /* ia may be NULL if link-local address is tentative. */ 
1897         ia 
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
|IN6_IFF_ANYCAST
); 
1899         MGETHDR(mh
, M_DONTWAIT
, MT_HEADER
); 
1902                         IFA_REMREF(&ia
->ia_ifa
); 
1905         MGET(md
, M_DONTWAIT
, MT_DATA
); 
1909                         IFA_REMREF(&ia
->ia_ifa
); 
1915          * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so 
1916          * that ether_output() does not need to allocate another mbuf 
1917          * for the header in the most common case. 
1919         MH_ALIGN(mh
, sizeof(struct ip6_hdr
)); 
1920         mh
->m_pkthdr
.len 
= sizeof(struct ip6_hdr
) + sizeof(struct mld_hdr
); 
1921         mh
->m_len 
= sizeof(struct ip6_hdr
); 
1923         ip6 
= mtod(mh
, struct ip6_hdr 
*); 
1925         ip6
->ip6_vfc 
&= ~IPV6_VERSION_MASK
; 
1926         ip6
->ip6_vfc 
|= IPV6_VERSION
; 
1927         ip6
->ip6_nxt 
= IPPROTO_ICMPV6
; 
1929                 IFA_LOCK(&ia
->ia_ifa
); 
1930         ip6
->ip6_src 
= ia 
? ia
->ia_addr
.sin6_addr 
: in6addr_any
; 
1932                 IFA_UNLOCK(&ia
->ia_ifa
); 
1933                 IFA_REMREF(&ia
->ia_ifa
); 
1936         ip6
->ip6_dst 
= in6m
->in6m_addr
; 
1938         md
->m_len 
= sizeof(struct mld_hdr
); 
1939         mld 
= mtod(md
, struct mld_hdr 
*); 
1940         mld
->mld_type 
= type
; 
1943         mld
->mld_maxdelay 
= 0; 
1944         mld
->mld_reserved 
= 0; 
1945         mld
->mld_addr 
= in6m
->in6m_addr
; 
1946         in6_clearscope(&mld
->mld_addr
); 
1947         mld
->mld_cksum 
= in6_cksum(mh
, IPPROTO_ICMPV6
, 
1948             sizeof(struct ip6_hdr
), sizeof(struct mld_hdr
)); 
1950         mh
->m_flags 
|= M_MLDV1
; 
1954          * Due to the fact that at this point we are possibly holding 
1955          * in6_multihead_lock in shared or exclusive mode, we can't call 
1956          * mld_dispatch_packet() here since that will eventually call 
1957          * ip6_output(), which will try to lock in6_multihead_lock and cause 
1959          * Instead we defer the work to the mld_slowtimo() thread, thus 
1960          * avoiding unlocking in_multihead_lock here. 
1962         if (IF_QFULL(&in6m
->in6m_mli
->mli_v1q
)) { 
1963                 MLD_PRINTF(("%s: v1 outbound queue full\n", __func__
)); 
1967                 IF_ENQUEUE(&in6m
->in6m_mli
->mli_v1q
, mh
); 
1973  * Process a state change from the upper layer for the given IPv6 group. 
1975  * Each socket holds a reference on the in6_multi in its own ip_moptions. 
1976  * The socket layer will have made the necessary updates to.the group 
1977  * state, it is now up to MLD to issue a state change report if there 
1978  * has been any change between T0 (when the last state-change was issued) 
1981  * We use the MLDv2 state machine at group level. The MLd module 
1982  * however makes the decision as to which MLD protocol version to speak. 
1983  * A state change *from* INCLUDE {} always means an initial join. 
1984  * A state change *to* INCLUDE {} always means a final leave. 
1986  * If delay is non-zero, and the state change is an initial multicast 
1987  * join, the state change report will be delayed by 'delay' ticks 
1988  * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise 
1989  * the initial MLDv2 state change report will be delayed by whichever 
1990  * is sooner, a pending state-change timer or delay itself. 
1993 mld_change_state(struct in6_multi 
*inm
, const int delay
) 
1995         struct mld_ifinfo 
*mli
; 
1999         IN6M_LOCK_ASSERT_HELD(inm
); 
2000         VERIFY(inm
->in6m_mli 
!= NULL
); 
2001         MLI_LOCK_ASSERT_NOTHELD(inm
->in6m_mli
); 
2004          * Try to detect if the upper layer just asked us to change state 
2005          * for an interface which has now gone away. 
2007         VERIFY(inm
->in6m_ifma 
!= NULL
); 
2008         ifp 
= inm
->in6m_ifma
->ifma_ifp
; 
2010          * Sanity check that netinet6's notion of ifp is the same as net's. 
2012         VERIFY(inm
->in6m_ifp 
== ifp
); 
2014         mli 
= MLD_IFINFO(ifp
); 
2015         VERIFY(mli 
!= NULL
); 
2018          * If we detect a state transition to or from MCAST_UNDEFINED 
2019          * for this group, then we are starting or finishing an MLD 
2020          * life cycle for this group. 
2022         if (inm
->in6m_st
[1].iss_fmode 
!= inm
->in6m_st
[0].iss_fmode
) { 
2023                 MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__
, 
2024                     inm
->in6m_st
[0].iss_fmode
, inm
->in6m_st
[1].iss_fmode
)); 
2025                 if (inm
->in6m_st
[0].iss_fmode 
== MCAST_UNDEFINED
) { 
2026                         MLD_PRINTF(("%s: initial join\n", __func__
)); 
2027                         error 
= mld_initial_join(inm
, mli
, delay
); 
2029                 } else if (inm
->in6m_st
[1].iss_fmode 
== MCAST_UNDEFINED
) { 
2030                         MLD_PRINTF(("%s: final leave\n", __func__
)); 
2031                         mld_final_leave(inm
, mli
); 
2035                 MLD_PRINTF(("%s: filter set change\n", __func__
)); 
2038         error 
= mld_handle_state_change(inm
, mli
); 
2045  * Perform the initial join for an MLD group. 
2047  * When joining a group: 
2048  *  If the group should have its MLD traffic suppressed, do nothing. 
2049  *  MLDv1 starts sending MLDv1 host membership reports. 
2050  *  MLDv2 will schedule an MLDv2 state-change report containing the 
2051  *  initial state of the membership. 
2053  * If the delay argument is non-zero, then we must delay sending the 
2054  * initial state change for delay ticks (in units of PR_FASTHZ). 
2057 mld_initial_join(struct in6_multi 
*inm
, struct mld_ifinfo 
*mli
, 
2061         struct ifqueue          
*ifq
; 
2062         int                      error
, retval
, syncstates
; 
2065         IN6M_LOCK_ASSERT_HELD(inm
); 
2066         MLI_LOCK_ASSERT_NOTHELD(mli
); 
2068         MLD_PRINTF(("%s: initial join %s on ifp %p(%s%d)\n", 
2069             __func__
, ip6_sprintf(&inm
->in6m_addr
), 
2070             inm
->in6m_ifp
, inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2075         ifp 
= inm
->in6m_ifp
; 
2078         VERIFY(mli
->mli_ifp 
== ifp
); 
2081          * Groups joined on loopback or marked as 'not reported', 
2082          * enter the MLD_SILENT_MEMBER state and 
2083          * are never reported in any protocol exchanges. 
2084          * All other groups enter the appropriate state machine 
2085          * for the version in use on this link. 
2086          * A link marked as MLIF_SILENT causes MLD to be completely 
2087          * disabled for the link. 
2089         if ((ifp
->if_flags 
& IFF_LOOPBACK
) || 
2090             (mli
->mli_flags 
& MLIF_SILENT
) || 
2091             !mld_is_addr_reported(&inm
->in6m_addr
)) { 
2092                 MLD_PRINTF(("%s: not kicking state machine for silent group\n", 
2094                 inm
->in6m_state 
= MLD_SILENT_MEMBER
; 
2095                 inm
->in6m_timer 
= 0; 
2098                  * Deal with overlapping in6_multi lifecycle. 
2099                  * If this group was LEAVING, then make sure 
2100                  * we drop the reference we picked up to keep the 
2101                  * group around for the final INCLUDE {} enqueue. 
2102                  * Since we cannot call in6_multi_detach() here, 
2103                  * defer this task to the timer routine. 
2105                 if (mli
->mli_version 
== MLD_VERSION_2 
&& 
2106                     inm
->in6m_state 
== MLD_LEAVING_MEMBER
) { 
2107                         VERIFY(inm
->in6m_nrelecnt 
!= 0); 
2108                         SLIST_INSERT_HEAD(&mli
->mli_relinmhead
, inm
, 
2112                 inm
->in6m_state 
= MLD_REPORTING_MEMBER
; 
2114                 switch (mli
->mli_version
) { 
2117                          * If a delay was provided, only use it if 
2118                          * it is greater than the delay normally 
2119                          * used for an MLDv1 state change report, 
2120                          * and delay sending the initial MLDv1 report 
2121                          * by not transitioning to the IDLE state. 
2123                         odelay 
= MLD_RANDOM_DELAY(MLD_V1_MAX_RI 
* PR_SLOWHZ
); 
2125                                 inm
->in6m_timer 
= max(delay
, odelay
); 
2126                                 current_state_timers_running6 
= 1; 
2128                                 inm
->in6m_state 
= MLD_IDLE_MEMBER
; 
2129                                 error 
= mld_v1_transmit_report(inm
, 
2130                                      MLD_LISTENER_REPORT
); 
2132                                 IN6M_LOCK_ASSERT_HELD(inm
); 
2133                                 MLI_LOCK_ASSERT_HELD(mli
); 
2136                                         inm
->in6m_timer 
= odelay
; 
2137                                         current_state_timers_running6 
= 1; 
2144                          * Defer update of T0 to T1, until the first copy 
2145                          * of the state change has been transmitted. 
2150                          * Immediately enqueue a State-Change Report for 
2151                          * this interface, freeing any previous reports. 
2152                          * Don't kick the timers if there is nothing to do, 
2153                          * or if an error occurred. 
2155                         ifq 
= &inm
->in6m_scq
; 
2157                         retval 
= mld_v2_enqueue_group_record(ifq
, inm
, 1, 
2158                             0, 0, (mli
->mli_flags 
& MLIF_USEALLOW
)); 
2159                         MLD_PRINTF(("%s: enqueue record = %d\n", 
2162                                 error 
= retval 
* -1; 
2167                          * Schedule transmission of pending state-change 
2168                          * report up to RV times for this link. The timer 
2169                          * will fire at the next mld_fasttimo (~200ms), 
2170                          * giving us an opportunity to merge the reports. 
2172                          * If a delay was provided to this function, only 
2173                          * use this delay if sooner than the existing one. 
2175                         VERIFY(mli
->mli_rv 
> 1); 
2176                         inm
->in6m_scrv 
= mli
->mli_rv
; 
2178                                 if (inm
->in6m_sctimer 
> 1) { 
2180                                             min(inm
->in6m_sctimer
, delay
); 
2182                                         inm
->in6m_sctimer 
= delay
; 
2184                                 inm
->in6m_sctimer 
= 1; 
2185                         state_change_timers_running6 
= 1; 
2194          * Only update the T0 state if state change is atomic, 
2195          * i.e. we don't need to wait for a timer to fire before we 
2196          * can consider the state change to have been communicated. 
2200                 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
, 
2201                     ip6_sprintf(&inm
->in6m_addr
), 
2202                     inm
->in6m_ifp
->if_name
, ifp
->if_unit
)); 
2209  * Issue an intermediate state change during the life-cycle. 
2212 mld_handle_state_change(struct in6_multi 
*inm
, struct mld_ifinfo 
*mli
) 
2217         IN6M_LOCK_ASSERT_HELD(inm
); 
2218         MLI_LOCK_ASSERT_NOTHELD(mli
); 
2220         MLD_PRINTF(("%s: state change for %s on ifp %p(%s%d)\n", 
2221             __func__
, ip6_sprintf(&inm
->in6m_addr
), 
2222             inm
->in6m_ifp
, inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2224         ifp 
= inm
->in6m_ifp
; 
2227         VERIFY(mli
->mli_ifp 
== ifp
); 
2229         if ((ifp
->if_flags 
& IFF_LOOPBACK
) || 
2230             (mli
->mli_flags 
& MLIF_SILENT
) || 
2231             !mld_is_addr_reported(&inm
->in6m_addr
) || 
2232             (mli
->mli_version 
!= MLD_VERSION_2
)) { 
2234                 if (!mld_is_addr_reported(&inm
->in6m_addr
)) { 
2235                         MLD_PRINTF(("%s: not kicking state machine for silent " 
2236                             "group\n", __func__
)); 
2238                 MLD_PRINTF(("%s: nothing to do\n", __func__
)); 
2240                 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
, 
2241                     ip6_sprintf(&inm
->in6m_addr
), 
2242                     inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2246         IF_DRAIN(&inm
->in6m_scq
); 
2248         retval 
= mld_v2_enqueue_group_record(&inm
->in6m_scq
, inm
, 1, 0, 0, 
2249             (mli
->mli_flags 
& MLIF_USEALLOW
)); 
2250         MLD_PRINTF(("%s: enqueue record = %d\n", __func__
, retval
)); 
2256          * If record(s) were enqueued, start the state-change 
2257          * report timer for this group. 
2259         inm
->in6m_scrv 
= mli
->mli_rv
; 
2260         inm
->in6m_sctimer 
= 1; 
2261         state_change_timers_running6 
= 1; 
2268  * Perform the final leave for a multicast address. 
2270  * When leaving a group: 
2271  *  MLDv1 sends a DONE message, if and only if we are the reporter. 
2272  *  MLDv2 enqueues a state-change report containing a transition 
2273  *  to INCLUDE {} for immediate transmission. 
2276 mld_final_leave(struct in6_multi 
*inm
, struct mld_ifinfo 
*mli
) 
2280         IN6M_LOCK_ASSERT_HELD(inm
); 
2281         MLI_LOCK_ASSERT_NOTHELD(mli
); 
2283         MLD_PRINTF(("%s: final leave %s on ifp %p(%s%d)\n", 
2284             __func__
, ip6_sprintf(&inm
->in6m_addr
), 
2285             inm
->in6m_ifp
, inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2287         switch (inm
->in6m_state
) { 
2288         case MLD_NOT_MEMBER
: 
2289         case MLD_SILENT_MEMBER
: 
2290         case MLD_LEAVING_MEMBER
: 
2291                 /* Already leaving or left; do nothing. */ 
2292                 MLD_PRINTF(("%s: not kicking state machine for silent group\n", 
2295         case MLD_REPORTING_MEMBER
: 
2296         case MLD_IDLE_MEMBER
: 
2297         case MLD_G_QUERY_PENDING_MEMBER
: 
2298         case MLD_SG_QUERY_PENDING_MEMBER
: 
2300                 if (mli
->mli_version 
== MLD_VERSION_1
) { 
2301                         if (inm
->in6m_state 
== MLD_G_QUERY_PENDING_MEMBER 
|| 
2302                             inm
->in6m_state 
== MLD_SG_QUERY_PENDING_MEMBER
) { 
2303                                 panic("%s: MLDv2 state reached, not MLDv2 " 
2304                                     "mode\n", __func__
); 
2307                         mld_v1_transmit_report(inm
, MLD_LISTENER_DONE
); 
2309                         IN6M_LOCK_ASSERT_HELD(inm
); 
2310                         MLI_LOCK_ASSERT_HELD(mli
); 
2312                         inm
->in6m_state 
= MLD_NOT_MEMBER
; 
2313                 } else if (mli
->mli_version 
== MLD_VERSION_2
) { 
2315                          * Stop group timer and all pending reports. 
2316                          * Immediately enqueue a state-change report 
2317                          * TO_IN {} to be sent on the next fast timeout, 
2318                          * giving us an opportunity to merge reports. 
2320                         IF_DRAIN(&inm
->in6m_scq
); 
2321                         inm
->in6m_timer 
= 0; 
2322                         inm
->in6m_scrv 
= mli
->mli_rv
; 
2323                         MLD_PRINTF(("%s: Leaving %s/%s%d with %d " 
2324                             "pending retransmissions.\n", __func__
, 
2325                             ip6_sprintf(&inm
->in6m_addr
), 
2326                             inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
, 
2328                         if (inm
->in6m_scrv 
== 0) { 
2329                                 inm
->in6m_state 
= MLD_NOT_MEMBER
; 
2330                                 inm
->in6m_sctimer 
= 0; 
2334                                  * Stick around in the in6_multihead list; 
2335                                  * the final detach will be issued by 
2336                                  * mld_v2_process_group_timers() when 
2337                                  * the retransmit timer expires. 
2339                                 IN6M_ADDREF_LOCKED(inm
); 
2340                                 VERIFY(inm
->in6m_debug 
& IFD_ATTACHED
); 
2342                                 VERIFY(inm
->in6m_reqcnt 
>= 1); 
2343                                 inm
->in6m_nrelecnt
++; 
2344                                 VERIFY(inm
->in6m_nrelecnt 
!= 0); 
2346                                 retval 
= mld_v2_enqueue_group_record( 
2347                                     &inm
->in6m_scq
, inm
, 1, 0, 0, 
2348                                     (mli
->mli_flags 
& MLIF_USEALLOW
)); 
2349                                 KASSERT(retval 
!= 0, 
2350                                     ("%s: enqueue record = %d\n", __func__
, 
2353                                 inm
->in6m_state 
= MLD_LEAVING_MEMBER
; 
2354                                 inm
->in6m_sctimer 
= 1; 
2355                                 state_change_timers_running6 
= 1; 
2361         case MLD_LAZY_MEMBER
: 
2362         case MLD_SLEEPING_MEMBER
: 
2363         case MLD_AWAKENING_MEMBER
: 
2364                 /* Our reports are suppressed; do nothing. */ 
2370                 MLD_PRINTF(("%s: T1 -> T0 for %s/%s%d\n", __func__
, 
2371                     ip6_sprintf(&inm
->in6m_addr
), 
2372                     inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2373                 inm
->in6m_st
[1].iss_fmode 
= MCAST_UNDEFINED
; 
2374                 MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for %p/%s%d\n", 
2375                     __func__
, &inm
->in6m_addr
, inm
->in6m_ifp
->if_name
, 
2376                     inm
->in6m_ifp
->if_unit
)); 
2381  * Enqueue an MLDv2 group record to the given output queue. 
2383  * If is_state_change is zero, a current-state record is appended. 
2384  * If is_state_change is non-zero, a state-change report is appended. 
2386  * If is_group_query is non-zero, an mbuf packet chain is allocated. 
2387  * If is_group_query is zero, and if there is a packet with free space 
2388  * at the tail of the queue, it will be appended to providing there 
2389  * is enough free space. 
2390  * Otherwise a new mbuf packet chain is allocated. 
2392  * If is_source_query is non-zero, each source is checked to see if 
2393  * it was recorded for a Group-Source query, and will be omitted if 
2394  * it is not both in-mode and recorded. 
2396  * If use_block_allow is non-zero, state change reports for initial join 
2397  * and final leave, on an inclusive mode group with a source list, will be 
2398  * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively. 
2400  * The function will attempt to allocate leading space in the packet 
2401  * for the IPv6+ICMP headers to be prepended without fragmenting the chain. 
2403  * If successful the size of all data appended to the queue is returned, 
2404  * otherwise an error code less than zero is returned, or zero if 
2405  * no record(s) were appended. 
2408 mld_v2_enqueue_group_record(struct ifqueue 
*ifq
, struct in6_multi 
*inm
, 
2409     const int is_state_change
, const int is_group_query
, 
2410     const int is_source_query
, const int use_block_allow
) 
2412         struct mldv2_record      mr
; 
2413         struct mldv2_record     
*pmr
; 
2415         struct ip6_msource      
*ims
, *nims
; 
2416         struct mbuf             
*m0
, *m
, *md
; 
2417         int                      error
, is_filter_list_change
; 
2418         int                      minrec0len
, m0srcs
, msrcs
, nbytes
, off
; 
2419         int                      record_has_sources
; 
2424         IN6M_LOCK_ASSERT_HELD(inm
); 
2425         MLI_LOCK_ASSERT_HELD(inm
->in6m_mli
); 
2428         ifp 
= inm
->in6m_ifp
; 
2429         is_filter_list_change 
= 0; 
2436         record_has_sources 
= 1; 
2438         type 
= MLD_DO_NOTHING
; 
2439         mode 
= inm
->in6m_st
[1].iss_fmode
; 
2442          * If we did not transition out of ASM mode during t0->t1, 
2443          * and there are no source nodes to process, we can skip 
2444          * the generation of source records. 
2446         if (inm
->in6m_st
[0].iss_asm 
> 0 && inm
->in6m_st
[1].iss_asm 
> 0 && 
2447             inm
->in6m_nsrc 
== 0) 
2448                 record_has_sources 
= 0; 
2450         if (is_state_change
) { 
2452                  * Queue a state change record. 
2453                  * If the mode did not change, and there are non-ASM 
2454                  * listeners or source filters present, 
2455                  * we potentially need to issue two records for the group. 
2456                  * If there are ASM listeners, and there was no filter 
2457                  * mode transition of any kind, do nothing. 
2459                  * If we are transitioning to MCAST_UNDEFINED, we need 
2460                  * not send any sources. A transition to/from this state is 
2461                  * considered inclusive with some special treatment. 
2463                  * If we are rewriting initial joins/leaves to use 
2464                  * ALLOW/BLOCK, and the group's membership is inclusive, 
2465                  * we need to send sources in all cases. 
2467                 if (mode 
!= inm
->in6m_st
[0].iss_fmode
) { 
2468                         if (mode 
== MCAST_EXCLUDE
) { 
2469                                 MLD_PRINTF(("%s: change to EXCLUDE\n", 
2471                                 type 
= MLD_CHANGE_TO_EXCLUDE_MODE
; 
2473                                 MLD_PRINTF(("%s: change to INCLUDE\n", 
2475                                 if (use_block_allow
) { 
2478                                          * Here we're interested in state 
2479                                          * edges either direction between 
2480                                          * MCAST_UNDEFINED and MCAST_INCLUDE. 
2481                                          * Perhaps we should just check 
2482                                          * the group state, rather than 
2485                                         if (mode 
== MCAST_UNDEFINED
) { 
2486                                                 type 
= MLD_BLOCK_OLD_SOURCES
; 
2488                                                 type 
= MLD_ALLOW_NEW_SOURCES
; 
2491                                         type 
= MLD_CHANGE_TO_INCLUDE_MODE
; 
2492                                         if (mode 
== MCAST_UNDEFINED
) 
2493                                                 record_has_sources 
= 0; 
2497                         if (record_has_sources
) { 
2498                                 is_filter_list_change 
= 1; 
2500                                 type 
= MLD_DO_NOTHING
; 
2505                  * Queue a current state record. 
2507                 if (mode 
== MCAST_EXCLUDE
) { 
2508                         type 
= MLD_MODE_IS_EXCLUDE
; 
2509                 } else if (mode 
== MCAST_INCLUDE
) { 
2510                         type 
= MLD_MODE_IS_INCLUDE
; 
2511                         VERIFY(inm
->in6m_st
[1].iss_asm 
== 0); 
2516          * Generate the filter list changes using a separate function. 
2518         if (is_filter_list_change
) 
2519                 return (mld_v2_enqueue_filter_change(ifq
, inm
)); 
2521         if (type 
== MLD_DO_NOTHING
) { 
2522                 MLD_PRINTF(("%s: nothing to do for %s/%s%d\n", 
2523                     __func__
, ip6_sprintf(&inm
->in6m_addr
), 
2524                     inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2529          * If any sources are present, we must be able to fit at least 
2530          * one in the trailing space of the tail packet's mbuf, 
2533         minrec0len 
= sizeof(struct mldv2_record
); 
2534         if (record_has_sources
) 
2535                 minrec0len 
+= sizeof(struct in6_addr
); 
2536         MLD_PRINTF(("%s: queueing %s for %s/%s%d\n", __func__
, 
2537             mld_rec_type_to_str(type
), 
2538             ip6_sprintf(&inm
->in6m_addr
), 
2539             inm
->in6m_ifp
->if_name
, inm
->in6m_ifp
->if_unit
)); 
2542          * Check if we have a packet in the tail of the queue for this 
2543          * group into which the first group record for this group will fit. 
2544          * Otherwise allocate a new packet. 
2545          * Always allocate leading space for IP6+RA+ICMPV6+REPORT. 
2546          * Note: Group records for G/GSR query responses MUST be sent 
2547          * in their own packet. 
2550         if (!is_group_query 
&& 
2552             (m0
->m_pkthdr
.vt_nrecs 
+ 1 <= MLD_V2_REPORT_MAXRECS
) && 
2553             (m0
->m_pkthdr
.len 
+ minrec0len
) < 
2554              (ifp
->if_mtu 
- MLD_MTUSPACE
)) { 
2555                 m0srcs 
= (ifp
->if_mtu 
- m0
->m_pkthdr
.len 
- 
2556                             sizeof(struct mldv2_record
)) / 
2557                             sizeof(struct in6_addr
); 
2559                 MLD_PRINTF(("%s: use existing packet\n", __func__
)); 
2561                 if (IF_QFULL(ifq
)) { 
2562                         MLD_PRINTF(("%s: outbound queue full\n", __func__
)); 
2566                 m0srcs 
= (ifp
->if_mtu 
- MLD_MTUSPACE 
- 
2567                     sizeof(struct mldv2_record
)) / sizeof(struct in6_addr
); 
2568                 if (!is_state_change 
&& !is_group_query
) 
2569                         m 
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
); 
2571                         m 
= m_gethdr(M_DONTWAIT
, MT_DATA
); 
2575                 MLD_PRINTF(("%s: allocated first packet\n", __func__
)); 
2579          * Append group record. 
2580          * If we have sources, we don't know how many yet. 
2585         mr
.mr_addr 
= inm
->in6m_addr
; 
2586         in6_clearscope(&mr
.mr_addr
); 
2587         if (!m_append(m
, sizeof(struct mldv2_record
), (void *)&mr
)) { 
2590                 MLD_PRINTF(("%s: m_append() failed.\n", __func__
)); 
2593         nbytes 
+= sizeof(struct mldv2_record
); 
2596          * Append as many sources as will fit in the first packet. 
2597          * If we are appending to a new packet, the chain allocation 
2598          * may potentially use clusters; use m_getptr() in this case. 
2599          * If we are appending to an existing packet, we need to obtain 
2600          * a pointer to the group record after m_append(), in case a new 
2601          * mbuf was allocated. 
2603          * Only append sources which are in-mode at t1. If we are 
2604          * transitioning to MCAST_UNDEFINED state on the group, and 
2605          * use_block_allow is zero, do not include source entries. 
2606          * Otherwise, we need to include this source in the report. 
2608          * Only report recorded sources in our filter set when responding 
2609          * to a group-source query. 
2611         if (record_has_sources
) { 
2614                         pmr 
= (struct mldv2_record 
*)(mtod(md
, uint8_t *) + 
2615                             md
->m_len 
- nbytes
); 
2617                         md 
= m_getptr(m
, 0, &off
); 
2618                         pmr 
= (struct mldv2_record 
*)(mtod(md
, uint8_t *) + 
2622                 RB_FOREACH_SAFE(ims
, ip6_msource_tree
, &inm
->in6m_srcs
, 
2624                         MLD_PRINTF(("%s: visit node %s\n", __func__
, 
2625                             ip6_sprintf(&ims
->im6s_addr
))); 
2626                         now 
= im6s_get_mode(inm
, ims
, 1); 
2627                         MLD_PRINTF(("%s: node is %d\n", __func__
, now
)); 
2628                         if ((now 
!= mode
) || 
2630                              (!use_block_allow 
&& mode 
== MCAST_UNDEFINED
))) { 
2631                                 MLD_PRINTF(("%s: skip node\n", __func__
)); 
2634                         if (is_source_query 
&& ims
->im6s_stp 
== 0) { 
2635                                 MLD_PRINTF(("%s: skip unrecorded node\n", 
2639                         MLD_PRINTF(("%s: append node\n", __func__
)); 
2640                         if (!m_append(m
, sizeof(struct in6_addr
), 
2641                             (void *)&ims
->im6s_addr
)) { 
2644                                 MLD_PRINTF(("%s: m_append() failed.\n", 
2648                         nbytes 
+= sizeof(struct in6_addr
); 
2650                         if (msrcs 
== m0srcs
) 
2653                 MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__
, 
2655                 pmr
->mr_numsrc 
= htons(msrcs
); 
2656                 nbytes 
+= (msrcs 
* sizeof(struct in6_addr
)); 
2659         if (is_source_query 
&& msrcs 
== 0) { 
2660                 MLD_PRINTF(("%s: no recorded sources to report\n", __func__
)); 
2667          * We are good to go with first packet. 
2670                 MLD_PRINTF(("%s: enqueueing first packet\n", __func__
)); 
2671                 m
->m_pkthdr
.vt_nrecs 
= 1; 
2672                 m
->m_pkthdr
.rcvif 
= ifp
; 
2675                 m
->m_pkthdr
.vt_nrecs
++; 
2678          * No further work needed if no source list in packet(s). 
2680         if (!record_has_sources
) 
2684          * Whilst sources remain to be announced, we need to allocate 
2685          * a new packet and fill out as many sources as will fit. 
2686          * Always try for a cluster first. 
2688         while (nims 
!= NULL
) { 
2689                 if (IF_QFULL(ifq
)) { 
2690                         MLD_PRINTF(("%s: outbound queue full\n", __func__
)); 
2693                 m 
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
); 
2695                         m 
= m_gethdr(M_DONTWAIT
, MT_DATA
); 
2698                 md 
= m_getptr(m
, 0, &off
); 
2699                 pmr 
= (struct mldv2_record 
*)(mtod(md
, uint8_t *) + off
); 
2700                 MLD_PRINTF(("%s: allocated next packet\n", __func__
)); 
2702                 if (!m_append(m
, sizeof(struct mldv2_record
), (void *)&mr
)) { 
2705                         MLD_PRINTF(("%s: m_append() failed.\n", __func__
)); 
2708                 m
->m_pkthdr
.vt_nrecs 
= 1; 
2709                 nbytes 
+= sizeof(struct mldv2_record
); 
2711                 m0srcs 
= (ifp
->if_mtu 
- MLD_MTUSPACE 
- 
2712                     sizeof(struct mldv2_record
)) / sizeof(struct in6_addr
); 
2715                 RB_FOREACH_FROM(ims
, ip6_msource_tree
, nims
) { 
2716                         MLD_PRINTF(("%s: visit node %s\n", 
2717                             __func__
, ip6_sprintf(&ims
->im6s_addr
))); 
2718                         now 
= im6s_get_mode(inm
, ims
, 1); 
2719                         if ((now 
!= mode
) || 
2721                              (!use_block_allow 
&& mode 
== MCAST_UNDEFINED
))) { 
2722                                 MLD_PRINTF(("%s: skip node\n", __func__
)); 
2725                         if (is_source_query 
&& ims
->im6s_stp 
== 0) { 
2726                                 MLD_PRINTF(("%s: skip unrecorded node\n", 
2730                         MLD_PRINTF(("%s: append node\n", __func__
)); 
2731                         if (!m_append(m
, sizeof(struct in6_addr
), 
2732                             (void *)&ims
->im6s_addr
)) { 
2735                                 MLD_PRINTF(("%s: m_append() failed.\n", 
2740                         if (msrcs 
== m0srcs
) 
2743                 pmr
->mr_numsrc 
= htons(msrcs
); 
2744                 nbytes 
+= (msrcs 
* sizeof(struct in6_addr
)); 
2746                 MLD_PRINTF(("%s: enqueueing next packet\n", __func__
)); 
2747                 m
->m_pkthdr
.rcvif 
= ifp
; 
2755  * Type used to mark record pass completion. 
2756  * We exploit the fact we can cast to this easily from the 
2757  * current filter modes on each ip_msource node. 
2760         REC_NONE 
= 0x00,        /* MCAST_UNDEFINED */ 
2761         REC_ALLOW 
= 0x01,       /* MCAST_INCLUDE */ 
2762         REC_BLOCK 
= 0x02,       /* MCAST_EXCLUDE */ 
2763         REC_FULL 
= REC_ALLOW 
| REC_BLOCK
 
2767  * Enqueue an MLDv2 filter list change to the given output queue. 
2769  * Source list filter state is held in an RB-tree. When the filter list 
2770  * for a group is changed without changing its mode, we need to compute 
2771  * the deltas between T0 and T1 for each source in the filter set, 
2772  * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 
2774  * As we may potentially queue two record types, and the entire R-B tree 
2775  * needs to be walked at once, we break this out into its own function 
2776  * so we can generate a tightly packed queue of packets. 
2778  * XXX This could be written to only use one tree walk, although that makes 
2779  * serializing into the mbuf chains a bit harder. For now we do two walks 
2780  * which makes things easier on us, and it may or may not be harder on 
2783  * If successful the size of all data appended to the queue is returned, 
2784  * otherwise an error code less than zero is returned, or zero if 
2785  * no record(s) were appended. 
2788 mld_v2_enqueue_filter_change(struct ifqueue 
*ifq
, struct in6_multi 
*inm
) 
2790         static const int MINRECLEN 
= 
2791             sizeof(struct mldv2_record
) + sizeof(struct in6_addr
); 
2793         struct mldv2_record      mr
; 
2794         struct mldv2_record     
*pmr
; 
2795         struct ip6_msource      
*ims
, *nims
; 
2796         struct mbuf             
*m
, *m0
, *md
; 
2797         int                      m0srcs
, nbytes
, npbytes
, off
, rsrcs
, schanged
; 
2799         uint8_t                  mode
, now
, then
; 
2800         rectype_t                crt
, drt
, nrt
; 
2802         IN6M_LOCK_ASSERT_HELD(inm
); 
2804         if (inm
->in6m_nsrc 
== 0 || 
2805             (inm
->in6m_st
[0].iss_asm 
> 0 && inm
->in6m_st
[1].iss_asm 
> 0)) 
2808         ifp 
= inm
->in6m_ifp
;                    /* interface */ 
2809         mode 
= inm
->in6m_st
[1].iss_fmode
;       /* filter mode at t1 */ 
2810         crt 
= REC_NONE
; /* current group record type */ 
2811         drt 
= REC_NONE
; /* mask of completed group record types */ 
2812         nrt 
= REC_NONE
; /* record type for current node */ 
2813         m0srcs 
= 0;     /* # source which will fit in current mbuf chain */ 
2814         npbytes 
= 0;    /* # of bytes appended this packet */ 
2815         nbytes 
= 0;     /* # of bytes appended to group's state-change queue */ 
2816         rsrcs 
= 0;      /* # sources encoded in current record */ 
2817         schanged 
= 0;   /* # nodes encoded in overall filter change */ 
2818         nallow 
= 0;     /* # of source entries in ALLOW_NEW */ 
2819         nblock 
= 0;     /* # of source entries in BLOCK_OLD */ 
2820         nims 
= NULL
;    /* next tree node pointer */ 
2823          * For each possible filter record mode. 
2824          * The first kind of source we encounter tells us which 
2825          * is the first kind of record we start appending. 
2826          * If a node transitioned to UNDEFINED at t1, its mode is treated 
2827          * as the inverse of the group's filter mode. 
2829         while (drt 
!= REC_FULL
) { 
2833                             (m0
->m_pkthdr
.vt_nrecs 
+ 1 <= 
2834                              MLD_V2_REPORT_MAXRECS
) && 
2835                             (m0
->m_pkthdr
.len 
+ MINRECLEN
) < 
2836                              (ifp
->if_mtu 
- MLD_MTUSPACE
)) { 
2838                                 m0srcs 
= (ifp
->if_mtu 
- m0
->m_pkthdr
.len 
- 
2839                                             sizeof(struct mldv2_record
)) / 
2840                                             sizeof(struct in6_addr
); 
2841                                 MLD_PRINTF(("%s: use previous packet\n", 
2844                                 m 
= m_getcl(M_DONTWAIT
, MT_DATA
, M_PKTHDR
); 
2846                                         m 
= m_gethdr(M_DONTWAIT
, MT_DATA
); 
2848                                         MLD_PRINTF(("%s: m_get*() failed\n", 
2852                                 m
->m_pkthdr
.vt_nrecs 
= 0; 
2853                                 m0srcs 
= (ifp
->if_mtu 
- MLD_MTUSPACE 
- 
2854                                     sizeof(struct mldv2_record
)) / 
2855                                     sizeof(struct in6_addr
); 
2857                                 MLD_PRINTF(("%s: allocated new packet\n", 
2861                          * Append the MLD group record header to the 
2862                          * current packet's data area. 
2863                          * Recalculate pointer to free space for next 
2864                          * group record, in case m_append() allocated 
2865                          * a new mbuf or cluster. 
2867                         memset(&mr
, 0, sizeof(mr
)); 
2868                         mr
.mr_addr 
= inm
->in6m_addr
; 
2869                         in6_clearscope(&mr
.mr_addr
); 
2870                         if (!m_append(m
, sizeof(mr
), (void *)&mr
)) { 
2873                                 MLD_PRINTF(("%s: m_append() failed\n", 
2877                         npbytes 
+= sizeof(struct mldv2_record
); 
2879                                 /* new packet; offset in chain */ 
2880                                 md 
= m_getptr(m
, npbytes 
- 
2881                                     sizeof(struct mldv2_record
), &off
); 
2882                                 pmr 
= (struct mldv2_record 
*)(mtod(md
, 
2885                                 /* current packet; offset from last append */ 
2887                                 pmr 
= (struct mldv2_record 
*)(mtod(md
, 
2888                                     uint8_t *) + md
->m_len 
- 
2889                                     sizeof(struct mldv2_record
)); 
2892                          * Begin walking the tree for this record type 
2893                          * pass, or continue from where we left off 
2894                          * previously if we had to allocate a new packet. 
2895                          * Only report deltas in-mode at t1. 
2896                          * We need not report included sources as allowed 
2897                          * if we are in inclusive mode on the group, 
2898                          * however the converse is not true. 
2902                                 nims 
= RB_MIN(ip6_msource_tree
, 
2905                         RB_FOREACH_FROM(ims
, ip6_msource_tree
, nims
) { 
2906                                 MLD_PRINTF(("%s: visit node %s\n", __func__
, 
2907                                     ip6_sprintf(&ims
->im6s_addr
))); 
2908                                 now 
= im6s_get_mode(inm
, ims
, 1); 
2909                                 then 
= im6s_get_mode(inm
, ims
, 0); 
2910                                 MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n", 
2911                                     __func__
, then
, now
)); 
2913                                         MLD_PRINTF(("%s: skip unchanged\n", 
2917                                 if (mode 
== MCAST_EXCLUDE 
&& 
2918                                     now 
== MCAST_INCLUDE
) { 
2919                                         MLD_PRINTF(("%s: skip IN src on EX " 
2920                                             "group\n", __func__
)); 
2923                                 nrt 
= (rectype_t
)now
; 
2924                                 if (nrt 
== REC_NONE
) 
2925                                         nrt 
= (rectype_t
)(~mode 
& REC_FULL
); 
2926                                 if (schanged
++ == 0) { 
2928                                 } else if (crt 
!= nrt
) 
2930                                 if (!m_append(m
, sizeof(struct in6_addr
), 
2931                                     (void *)&ims
->im6s_addr
)) { 
2934                                         MLD_PRINTF(("%s: m_append() failed\n", 
2938                                 nallow 
+= !!(crt 
== REC_ALLOW
); 
2939                                 nblock 
+= !!(crt 
== REC_BLOCK
); 
2940                                 if (++rsrcs 
== m0srcs
) 
2944                          * If we did not append any tree nodes on this 
2945                          * pass, back out of allocations. 
2948                                 npbytes 
-= sizeof(struct mldv2_record
); 
2950                                         MLD_PRINTF(("%s: m_free(m)\n", 
2954                                         MLD_PRINTF(("%s: m_adj(m, -mr)\n", 
2956                                         m_adj(m
, -((int)sizeof( 
2957                                             struct mldv2_record
))); 
2961                         npbytes 
+= (rsrcs 
* sizeof(struct in6_addr
)); 
2962                         if (crt 
== REC_ALLOW
) 
2963                                 pmr
->mr_type 
= MLD_ALLOW_NEW_SOURCES
; 
2964                         else if (crt 
== REC_BLOCK
) 
2965                                 pmr
->mr_type 
= MLD_BLOCK_OLD_SOURCES
; 
2966                         pmr
->mr_numsrc 
= htons(rsrcs
); 
2968                          * Count the new group record, and enqueue this 
2969                          * packet if it wasn't already queued. 
2971                         m
->m_pkthdr
.vt_nrecs
++; 
2972                         m
->m_pkthdr
.rcvif 
= ifp
; 
2976                 } while (nims 
!= NULL
); 
2978                 crt 
= (~crt 
& REC_FULL
); 
2981         MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__
, 
2988 mld_v2_merge_state_changes(struct in6_multi 
*inm
, struct ifqueue 
*ifscq
) 
2991         struct mbuf     
*m
;             /* pending state-change */ 
2992         struct mbuf     
*m0
;            /* copy of pending state-change */ 
2993         struct mbuf     
*mt
;            /* last state-change in packet */ 
2995         int              docopy
, domerge
; 
2998         IN6M_LOCK_ASSERT_HELD(inm
); 
3005          * If there are further pending retransmissions, make a writable 
3006          * copy of each queued state-change message before merging. 
3008         if (inm
->in6m_scrv 
> 0) 
3011         gq 
= &inm
->in6m_scq
; 
3013         if (gq
->ifq_head 
== NULL
) { 
3014                 MLD_PRINTF(("%s: WARNING: queue for inm %p is empty\n", 
3020          * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the 
3021          * packet might not always be at the head of the ifqueue. 
3026                  * Only merge the report into the current packet if 
3027                  * there is sufficient space to do so; an MLDv2 report 
3028                  * packet may only contain 65,535 group records. 
3029                  * Always use a simple mbuf chain concatentation to do this, 
3030                  * as large state changes for single groups may have 
3031                  * allocated clusters. 
3034                 mt 
= ifscq
->ifq_tail
; 
3036                         recslen 
= m_length(m
); 
3038                         if ((mt
->m_pkthdr
.vt_nrecs 
+ 
3039                             m
->m_pkthdr
.vt_nrecs 
<= 
3040                             MLD_V2_REPORT_MAXRECS
) && 
3041                             (mt
->m_pkthdr
.len 
+ recslen 
<= 
3042                             (inm
->in6m_ifp
->if_mtu 
- MLD_MTUSPACE
))) 
3046                 if (!domerge 
&& IF_QFULL(gq
)) { 
3047                         MLD_PRINTF(("%s: outbound queue full, skipping whole " 
3048                             "packet %p\n", __func__
, m
)); 
3059                         MLD_PRINTF(("%s: dequeueing %p\n", __func__
, m
)); 
3065                         MLD_PRINTF(("%s: copying %p\n", __func__
, m
)); 
3066                         m0 
= m_dup(m
, M_NOWAIT
); 
3069                         m0
->m_nextpkt 
= NULL
; 
3074                         MLD_PRINTF(("%s: queueing %p to ifscq %p)\n", 
3075                             __func__
, m0
, ifscq
)); 
3076                         m0
->m_pkthdr
.rcvif 
= inm
->in6m_ifp
; 
3077                         IF_ENQUEUE(ifscq
, m0
); 
3079                         struct mbuf 
*mtl
;       /* last mbuf of packet mt */ 
3081                         MLD_PRINTF(("%s: merging %p with ifscq tail %p)\n", 
3085                         m0
->m_flags 
&= ~M_PKTHDR
; 
3086                         mt
->m_pkthdr
.len 
+= recslen
; 
3087                         mt
->m_pkthdr
.vt_nrecs 
+= 
3088                             m0
->m_pkthdr
.vt_nrecs
; 
3098  * Respond to a pending MLDv2 General Query. 
3101 mld_v2_dispatch_general_query(struct mld_ifinfo 
*mli
) 
3104         struct in6_multi        
*inm
; 
3105         struct in6_multistep    step
; 
3108         MLI_LOCK_ASSERT_HELD(mli
); 
3110         VERIFY(mli
->mli_version 
== MLD_VERSION_2
); 
3115         in6_multihead_lock_shared(); 
3116         IN6_FIRST_MULTI(step
, inm
); 
3117         while (inm 
!= NULL
) { 
3119                 if (inm
->in6m_ifp 
!= ifp
) 
3122                 switch (inm
->in6m_state
) { 
3123                 case MLD_NOT_MEMBER
: 
3124                 case MLD_SILENT_MEMBER
: 
3126                 case MLD_REPORTING_MEMBER
: 
3127                 case MLD_IDLE_MEMBER
: 
3128                 case MLD_LAZY_MEMBER
: 
3129                 case MLD_SLEEPING_MEMBER
: 
3130                 case MLD_AWAKENING_MEMBER
: 
3131                         inm
->in6m_state 
= MLD_REPORTING_MEMBER
; 
3133                         retval 
= mld_v2_enqueue_group_record(&mli
->mli_gq
, 
3136                         MLD_PRINTF(("%s: enqueue record = %d\n", 
3139                 case MLD_G_QUERY_PENDING_MEMBER
: 
3140                 case MLD_SG_QUERY_PENDING_MEMBER
: 
3141                 case MLD_LEAVING_MEMBER
: 
3146                 IN6_NEXT_MULTI(step
, inm
); 
3148         in6_multihead_lock_done(); 
3151         mld_dispatch_queue(mli
, &mli
->mli_gq
, MLD_MAX_RESPONSE_BURST
); 
3152         MLI_LOCK_ASSERT_HELD(mli
); 
3155          * Slew transmission of bursts over 500ms intervals. 
3157         if (mli
->mli_gq
.ifq_head 
!= NULL
) { 
3158                 mli
->mli_v2_timer 
= 1 + MLD_RANDOM_DELAY( 
3159                     MLD_RESPONSE_BURST_INTERVAL
); 
3160                 interface_timers_running6 
= 1; 
3165  * Transmit the next pending message in the output queue. 
3167  * Must not be called with in6m_lockm or mli_lock held. 
3170 mld_dispatch_packet(struct mbuf 
*m
) 
3172         struct ip6_moptions     
*im6o
; 
3174         struct ifnet            
*oifp 
= NULL
; 
3177         struct ip6_hdr          
*ip6
; 
3178         struct mld_hdr          
*mld
; 
3183         MLD_PRINTF(("%s: transmit %p\n", __func__
, m
)); 
3186          * Check if the ifnet is still attached. 
3188         ifp 
= m
->m_pkthdr
.rcvif
; 
3189         if (ifp 
== NULL 
|| !ifnet_is_attached(ifp
, 0)) { 
3190                 MLD_PRINTF(("%s: dropped %p as ifindex %u went away.\n", 
3191                     __func__
, m
, (u_int
)if_index
)); 
3193                 ip6stat
.ip6s_noroute
++; 
3197         im6o 
= ip6_allocmoptions(M_WAITOK
); 
3203         im6o
->im6o_multicast_hlim  
= 1; 
3205         im6o
->im6o_multicast_loop 
= (ip6_mrouter 
!= NULL
); 
3207         im6o
->im6o_multicast_loop 
= 0; 
3209         im6o
->im6o_multicast_ifp 
= ifp
; 
3211         if (m
->m_flags 
& M_MLDV1
) { 
3214                 m0 
= mld_v2_encap_report(ifp
, m
); 
3216                         MLD_PRINTF(("%s: dropped %p\n", __func__
, m
)); 
3218                          * mld_v2_encap_report() has already freed our mbuf. 
3221                         ip6stat
.ip6s_odropped
++; 
3226         m
->m_flags 
&= ~(M_PROTOFLAGS
); 
3227         m0
->m_pkthdr
.rcvif 
= lo_ifp
; 
3229         ip6 
= mtod(m0
, struct ip6_hdr 
*); 
3231         (void) in6_setscope(&ip6
->ip6_dst
, ifp
, NULL
);  /* XXX LOR */ 
3234          * XXX XXX Break some KPI rules to prevent an LOR which would 
3235          * occur if we called in6_setscope() at transmission. 
3236          * See comments at top of file. 
3238         MLD_EMBEDSCOPE(&ip6
->ip6_dst
, ifp
->if_index
); 
3242          * Retrieve the ICMPv6 type before handoff to ip6_output(), 
3243          * so we can bump the stats. 
3245         md 
= m_getptr(m0
, sizeof(struct ip6_hdr
), &off
); 
3246         mld 
= (struct mld_hdr 
*)(mtod(md
, uint8_t *) + off
); 
3247         type 
= mld
->mld_type
; 
3249         error 
= ip6_output(m0
, &mld_po
, NULL
, IPV6_UNSPECSRC
, im6o
, 
3255                 MLD_PRINTF(("%s: ip6_output(%p) = %d\n", __func__
, m0
, error
)); 
3257                         ifnet_release(oifp
); 
3261         icmp6stat
.icp6s_outhist
[type
]++; 
3263                 icmp6_ifstat_inc(oifp
, ifs6_out_msg
); 
3265                 case MLD_LISTENER_REPORT
: 
3266                 case MLDV2_LISTENER_REPORT
: 
3267                         icmp6_ifstat_inc(oifp
, ifs6_out_mldreport
); 
3269                 case MLD_LISTENER_DONE
: 
3270                         icmp6_ifstat_inc(oifp
, ifs6_out_mlddone
); 
3273                 ifnet_release(oifp
); 
3278  * Encapsulate an MLDv2 report. 
3280  * KAME IPv6 requires that hop-by-hop options be passed separately, 
3281  * and that the IPv6 header be prepended in a separate mbuf. 
3283  * Returns a pointer to the new mbuf chain head, or NULL if the 
3284  * allocation failed. 
3286 static struct mbuf 
* 
3287 mld_v2_encap_report(struct ifnet 
*ifp
, struct mbuf 
*m
) 
3290         struct mldv2_report     
*mld
; 
3291         struct ip6_hdr          
*ip6
; 
3292         struct in6_ifaddr       
*ia
; 
3295         VERIFY(m
->m_flags 
& M_PKTHDR
); 
3298          * RFC3590: OK to send as :: or tentative during DAD. 
3300         ia 
= in6ifa_ifpforlinklocal(ifp
, IN6_IFF_NOTREADY
|IN6_IFF_ANYCAST
); 
3302                 MLD_PRINTF(("%s: warning: ia is NULL\n", __func__
)); 
3304         MGETHDR(mh
, M_DONTWAIT
, MT_HEADER
); 
3307                         IFA_REMREF(&ia
->ia_ifa
); 
3311         MH_ALIGN(mh
, sizeof(struct ip6_hdr
) + sizeof(struct mldv2_report
)); 
3313         mldreclen 
= m_length(m
); 
3314         MLD_PRINTF(("%s: mldreclen is %d\n", __func__
, mldreclen
)); 
3316         mh
->m_len 
= sizeof(struct ip6_hdr
) + sizeof(struct mldv2_report
); 
3317         mh
->m_pkthdr
.len 
= sizeof(struct ip6_hdr
) + 
3318             sizeof(struct mldv2_report
) + mldreclen
; 
3320         ip6 
= mtod(mh
, struct ip6_hdr 
*); 
3322         ip6
->ip6_vfc 
&= ~IPV6_VERSION_MASK
; 
3323         ip6
->ip6_vfc 
|= IPV6_VERSION
; 
3324         ip6
->ip6_nxt 
= IPPROTO_ICMPV6
; 
3326                 IFA_LOCK(&ia
->ia_ifa
); 
3327         ip6
->ip6_src 
= ia 
? ia
->ia_addr
.sin6_addr 
: in6addr_any
; 
3329                 IFA_UNLOCK(&ia
->ia_ifa
); 
3330                 IFA_REMREF(&ia
->ia_ifa
); 
3333         ip6
->ip6_dst 
= in6addr_linklocal_allv2routers
; 
3334         /* scope ID will be set in netisr */ 
3336         mld 
= (struct mldv2_report 
*)(ip6 
+ 1); 
3337         mld
->mld_type 
= MLDV2_LISTENER_REPORT
; 
3340         mld
->mld_v2_reserved 
= 0; 
3341         mld
->mld_v2_numrecs 
= htons(m
->m_pkthdr
.vt_nrecs
); 
3342         m
->m_pkthdr
.vt_nrecs 
= 0; 
3343         m
->m_flags 
&= ~M_PKTHDR
; 
3346         mld
->mld_cksum 
= in6_cksum(mh
, IPPROTO_ICMPV6
, 
3347             sizeof(struct ip6_hdr
), sizeof(struct mldv2_report
) + mldreclen
); 
3353 mld_rec_type_to_str(const int type
) 
3356                 case MLD_CHANGE_TO_EXCLUDE_MODE
: 
3359                 case MLD_CHANGE_TO_INCLUDE_MODE
: 
3362                 case MLD_MODE_IS_EXCLUDE
: 
3365                 case MLD_MODE_IS_INCLUDE
: 
3368                 case MLD_ALLOW_NEW_SOURCES
: 
3371                 case MLD_BLOCK_OLD_SOURCES
: 
3385         MLD_PRINTF(("%s: initializing\n", __func__
)); 
3387         /* Setup lock group and attribute for mld6_mtx */ 
3388         mld_mtx_grp_attr 
= lck_grp_attr_alloc_init(); 
3389         mld_mtx_grp 
= lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr
); 
3390         mld_mtx_attr 
= lck_attr_alloc_init(); 
3391         lck_mtx_init(&mld_mtx
, mld_mtx_grp
, mld_mtx_attr
); 
3393         ip6_initpktopts(&mld_po
); 
3394         mld_po
.ip6po_hlim 
= 1; 
3395         mld_po
.ip6po_hbh 
= &mld_ra
.hbh
; 
3396         mld_po
.ip6po_prefer_tempaddr 
= IP6PO_TEMPADDR_NOTPREFER
; 
3397         mld_po
.ip6po_flags 
= IP6PO_DONTFRAG
; 
3398         LIST_INIT(&mli_head
); 
3400         mli_size 
= sizeof (struct mld_ifinfo
); 
3401         mli_zone 
= zinit(mli_size
, MLI_ZONE_MAX 
* mli_size
, 
3403         if (mli_zone 
== NULL
) { 
3404                 panic("%s: failed allocating %s", __func__
, MLI_ZONE_NAME
); 
3407         zone_change(mli_zone
, Z_EXPAND
, TRUE
); 
3408         zone_change(mli_zone
, Z_CALLERACCT
, FALSE
);