]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet6/in6_mcast.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / netinet6 / in6_mcast.c
CommitLineData
6d2010ae 1/*
f427ee49 2 * Copyright (c) 2010-2020 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 2009 Bruce Simpson.
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. The name of the author may not be used to endorse or promote
41 * products derived from this software without specific prior written
42 * permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 */
56
57/*
58 * IPv6 multicast socket, group, and socket option processing module.
59 * Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810.
60 */
61
62#include <sys/cdefs.h>
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/kernel.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/protosw.h>
70#include <sys/socket.h>
71#include <sys/socketvar.h>
72#include <sys/protosw.h>
73#include <sys/sysctl.h>
74#include <sys/tree.h>
75#include <sys/mcache.h>
76
77#include <kern/zalloc.h>
78
79#include <pexpert/pexpert.h>
80
81#include <net/if.h>
82#include <net/if_dl.h>
5ba3f43e 83#include <net/net_api_stats.h>
6d2010ae
A
84#include <net/route.h>
85
86#include <netinet/in.h>
87#include <netinet/in_var.h>
88#include <netinet6/in6_var.h>
89#include <netinet/ip6.h>
90#include <netinet/icmp6.h>
91#include <netinet6/ip6_var.h>
92#include <netinet/in_pcb.h>
93#include <netinet/tcp.h>
94#include <netinet/tcp_seq.h>
95#include <netinet/tcp_var.h>
96#include <netinet6/nd6.h>
97#include <netinet6/mld6_var.h>
98#include <netinet6/scope6_var.h>
99
0a7de745
A
100static void im6f_commit(struct in6_mfilter *);
101static int im6f_get_source(struct in6_mfilter *imf,
102 const struct sockaddr_in6 *psin,
103 struct in6_msource **);
6d2010ae 104static struct in6_msource *
0a7de745
A
105im6f_graft(struct in6_mfilter *, const uint8_t,
106 const struct sockaddr_in6 *);
107static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *);
108static void im6f_rollback(struct in6_mfilter *);
109static void im6f_reap(struct in6_mfilter *);
f427ee49 110static int im6o_grow(struct ip6_moptions *);
0a7de745
A
111static size_t im6o_match_group(const struct ip6_moptions *,
112 const struct ifnet *, const struct sockaddr_in6 *);
6d2010ae 113static struct in6_msource *
0a7de745
A
114im6o_match_source(const struct ip6_moptions *,
115 const size_t, const struct sockaddr_in6 *);
116static void im6s_merge(struct ip6_msource *ims,
117 const struct in6_msource *lims, const int rollback);
118static int in6_mc_get(struct ifnet *, const struct in6_addr *,
119 struct in6_multi **);
120static int in6m_get_source(struct in6_multi *inm,
121 const struct in6_addr *addr, const int noalloc,
122 struct ip6_msource **pims);
123static int in6m_is_ifp_detached(const struct in6_multi *);
124static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *);
125static void in6m_reap(struct in6_multi *);
6d2010ae 126static struct ip6_moptions *
0a7de745
A
127in6p_findmoptions(struct inpcb *);
128static int in6p_get_source_filters(struct inpcb *, struct sockopt *);
129static int in6p_lookup_v4addr(struct ipv6_mreq *, struct ip_mreq *);
130static int in6p_join_group(struct inpcb *, struct sockopt *);
131static int in6p_leave_group(struct inpcb *, struct sockopt *);
6d2010ae 132static struct ifnet *
0a7de745
A
133in6p_lookup_mcast_ifp(const struct inpcb *,
134 const struct sockaddr_in6 *);
135static int in6p_block_unblock_source(struct inpcb *, struct sockopt *);
136static int in6p_set_multicast_if(struct inpcb *, struct sockopt *);
137static int in6p_set_source_filters(struct inpcb *, struct sockopt *);
138static int sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS;
6d2010ae 139static __inline__ int ip6_msource_cmp(const struct ip6_msource *,
0a7de745 140 const struct ip6_msource *);
6d2010ae 141
0a7de745 142SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */
6d2010ae
A
143
144SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPv6 multicast");
145
146static unsigned long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER;
147SYSCTL_LONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc,
0a7de745 148 CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxgrpsrc,
6d2010ae
A
149 "Max source filters per group");
150
151static unsigned long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER;
152SYSCTL_LONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc,
0a7de745 153 CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxsocksrc,
6d2010ae
A
154 "Max source filters per socket");
155
156int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP;
157SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_LOCKED,
158 &in6_mcast_loop, 0, "Loopback multicast datagrams by default");
159
160SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters,
161 CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_ip6_mcast_filters,
162 "Per-interface stack-wide source filters");
163
164RB_GENERATE_PREV(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
165
0a7de745 166#define IN6M_TRACE_HIST_SIZE 32 /* size of trace history */
6d2010ae
A
167
168/* For gdb */
169__private_extern__ unsigned int in6m_trace_hist_size = IN6M_TRACE_HIST_SIZE;
170
171struct in6_multi_dbg {
0a7de745
A
172 struct in6_multi in6m; /* in6_multi */
173 u_int16_t in6m_refhold_cnt; /* # of ref */
174 u_int16_t in6m_refrele_cnt; /* # of rele */
6d2010ae
A
175 /*
176 * Circular lists of in6m_addref and in6m_remref callers.
177 */
0a7de745
A
178 ctrace_t in6m_refhold[IN6M_TRACE_HIST_SIZE];
179 ctrace_t in6m_refrele[IN6M_TRACE_HIST_SIZE];
6d2010ae
A
180 /*
181 * Trash list linkage
182 */
183 TAILQ_ENTRY(in6_multi_dbg) in6m_trash_link;
184};
185
186/* List of trash in6_multi entries protected by in6m_trash_lock */
187static TAILQ_HEAD(, in6_multi_dbg) in6m_trash_head;
188static decl_lck_mtx_data(, in6m_trash_lock);
189
190#if DEBUG
0a7de745 191static unsigned int in6m_debug = 1; /* debugging (enabled) */
6d2010ae 192#else
0a7de745 193static unsigned int in6m_debug; /* debugging (disabled) */
6d2010ae 194#endif /* !DEBUG */
0a7de745 195static struct zone *in6m_zone; /* zone for in6_multi */
0a7de745 196#define IN6M_ZONE_NAME "in6_multi" /* zone name */
6d2010ae 197
f427ee49
A
198static ZONE_DECLARE(imm_zone, "in6_multi_mship",
199 sizeof(struct in6_multi_mship), ZC_ZFREE_CLEARMEM);
6d2010ae 200
f427ee49
A
201static ZONE_DECLARE(ip6ms_zone, "ip6_msource",
202 sizeof(struct ip6_msource), ZC_ZFREE_CLEARMEM);
6d2010ae 203
f427ee49
A
204static ZONE_DECLARE(in6ms_zone, "in6_msource",
205 sizeof(struct in6_msource), ZC_ZFREE_CLEARMEM);
6d2010ae
A
206
207/* Lock group and attribute for in6_multihead_lock lock */
0a7de745
A
208static lck_attr_t *in6_multihead_lock_attr;
209static lck_grp_t *in6_multihead_lock_grp;
210static lck_grp_attr_t *in6_multihead_lock_grp_attr;
6d2010ae
A
211
212static decl_lck_rw_data(, in6_multihead_lock);
213struct in6_multihead in6_multihead;
214
f427ee49 215static struct in6_multi *in6_multi_alloc(zalloc_flags_t);
6d2010ae
A
216static void in6_multi_free(struct in6_multi *);
217static void in6_multi_attach(struct in6_multi *);
f427ee49 218static struct in6_multi_mship *in6_multi_mship_alloc(zalloc_flags_t);
6d2010ae
A
219static void in6_multi_mship_free(struct in6_multi_mship *);
220static void in6m_trace(struct in6_multi *, int);
221
f427ee49 222static struct ip6_msource *ip6ms_alloc(zalloc_flags_t);
6d2010ae 223static void ip6ms_free(struct ip6_msource *);
f427ee49 224static struct in6_msource *in6ms_alloc(zalloc_flags_t);
6d2010ae
A
225static void in6ms_free(struct in6_msource *);
226
6d2010ae
A
227/*
228 * IPv6 source tree comparison function.
229 *
230 * An ordered predicate is necessary; bcmp() is not documented to return
231 * an indication of order, memcmp() is, and is an ISO C99 requirement.
232 */
233static __inline int
234ip6_msource_cmp(const struct ip6_msource *a, const struct ip6_msource *b)
235{
0a7de745 236 return memcmp(&a->im6s_addr, &b->im6s_addr, sizeof(struct in6_addr));
6d2010ae
A
237}
238
239/*
240 * Inline function which wraps assertions for a valid ifp.
241 */
242static __inline__ int
243in6m_is_ifp_detached(const struct in6_multi *inm)
244{
245 VERIFY(inm->in6m_ifma != NULL);
246 VERIFY(inm->in6m_ifp == inm->in6m_ifma->ifma_ifp);
247
0a7de745 248 return !ifnet_is_attached(inm->in6m_ifp, 0);
6d2010ae
A
249}
250
251/*
252 * Initialize an in6_mfilter structure to a known state at t0, t1
253 * with an empty source filter list.
254 */
255static __inline__ void
f427ee49 256im6f_init(struct in6_mfilter *imf, const uint8_t st0, const uint8_t st1)
6d2010ae
A
257{
258 memset(imf, 0, sizeof(struct in6_mfilter));
259 RB_INIT(&imf->im6f_sources);
260 imf->im6f_st[0] = st0;
261 imf->im6f_st[1] = st1;
262}
263
264/*
265 * Resize the ip6_moptions vector to the next power-of-two minus 1.
266 */
267static int
f427ee49 268im6o_grow(struct ip6_moptions *imo)
6d2010ae 269{
0a7de745
A
270 struct in6_multi **nmships;
271 struct in6_multi **omships;
272 struct in6_mfilter *nmfilters;
273 struct in6_mfilter *omfilters;
274 size_t idx;
275 size_t oldmax;
f427ee49 276 size_t newmax;
6d2010ae
A
277
278 IM6O_LOCK_ASSERT_HELD(imo);
279
280 nmships = NULL;
281 nmfilters = NULL;
282 omships = imo->im6o_membership;
283 omfilters = imo->im6o_mfilters;
284 oldmax = imo->im6o_max_memberships;
f427ee49 285 newmax = ((oldmax + 1) * 2) - 1;
6d2010ae 286
0a7de745
A
287 if (newmax > IPV6_MAX_MEMBERSHIPS) {
288 return ETOOMANYREFS;
289 }
6d2010ae
A
290
291 if ((nmships = (struct in6_multi **)_REALLOC(omships,
0a7de745
A
292 sizeof(struct in6_multi *) * newmax, M_IP6MOPTS,
293 M_WAITOK | M_ZERO)) == NULL) {
294 return ENOMEM;
295 }
6d2010ae
A
296
297 imo->im6o_membership = nmships;
298
299 if ((nmfilters = (struct in6_mfilter *)_REALLOC(omfilters,
0a7de745
A
300 sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER,
301 M_WAITOK | M_ZERO)) == NULL) {
302 return ENOMEM;
303 }
6d2010ae
A
304
305 imo->im6o_mfilters = nmfilters;
306
307 /* Initialize newly allocated source filter heads. */
0a7de745 308 for (idx = oldmax; idx < newmax; idx++) {
6d2010ae 309 im6f_init(&nmfilters[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
0a7de745 310 }
6d2010ae 311
f427ee49 312 imo->im6o_max_memberships = (u_short)newmax;
6d2010ae 313
0a7de745 314 return 0;
6d2010ae
A
315}
316
317/*
318 * Find an IPv6 multicast group entry for this ip6_moptions instance
319 * which matches the specified group, and optionally an interface.
320 * Return its index into the array, or -1 if not found.
321 */
322static size_t
323im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
5ba3f43e 324 const struct sockaddr_in6 *group)
6d2010ae
A
325{
326 const struct sockaddr_in6 *gsin6;
327 struct in6_multi *pinm;
0a7de745
A
328 int idx;
329 int nmships;
6d2010ae 330
39236c6e 331 IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo));
6d2010ae 332
5ba3f43e 333 gsin6 = group;
6d2010ae
A
334
335 /* The im6o_membership array may be lazy allocated. */
0a7de745
A
336 if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0) {
337 return -1;
338 }
6d2010ae
A
339
340 nmships = imo->im6o_num_memberships;
341 for (idx = 0; idx < nmships; idx++) {
342 pinm = imo->im6o_membership[idx];
0a7de745 343 if (pinm == NULL) {
6d2010ae 344 continue;
0a7de745 345 }
6d2010ae
A
346 IN6M_LOCK(pinm);
347 if ((ifp == NULL || (pinm->in6m_ifp == ifp)) &&
348 IN6_ARE_ADDR_EQUAL(&pinm->in6m_addr,
349 &gsin6->sin6_addr)) {
350 IN6M_UNLOCK(pinm);
351 break;
352 }
353 IN6M_UNLOCK(pinm);
354 }
0a7de745 355 if (idx >= nmships) {
6d2010ae 356 idx = -1;
0a7de745 357 }
6d2010ae 358
0a7de745 359 return idx;
6d2010ae
A
360}
361
362/*
363 * Find an IPv6 multicast source entry for this imo which matches
364 * the given group index for this socket, and source address.
365 *
366 * XXX TODO: The scope ID, if present in src, is stripped before
367 * any comparison. We SHOULD enforce scope/zone checks where the source
368 * filter entry has a link scope.
369 *
370 * NOTE: This does not check if the entry is in-mode, merely if
371 * it exists, which may not be the desired behaviour.
372 */
373static struct in6_msource *
374im6o_match_source(const struct ip6_moptions *imo, const size_t gidx,
5ba3f43e 375 const struct sockaddr_in6 *src)
6d2010ae 376{
0a7de745
A
377 struct ip6_msource find;
378 struct in6_mfilter *imf;
379 struct ip6_msource *ims;
5ba3f43e 380 const struct sockaddr_in6 *psa;
6d2010ae 381
39236c6e 382 IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo));
6d2010ae 383
5ba3f43e 384 VERIFY(src->sin6_family == AF_INET6);
6d2010ae
A
385 VERIFY(gidx != (size_t)-1 && gidx < imo->im6o_num_memberships);
386
387 /* The im6o_mfilters array may be lazy allocated. */
0a7de745
A
388 if (imo->im6o_mfilters == NULL) {
389 return NULL;
390 }
6d2010ae
A
391 imf = &imo->im6o_mfilters[gidx];
392
5ba3f43e
A
393 psa = src;
394 find.im6s_addr = psa->sin6_addr;
0a7de745 395 in6_clearscope(&find.im6s_addr); /* XXX */
6d2010ae
A
396 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
397
0a7de745 398 return (struct in6_msource *)ims;
6d2010ae
A
399}
400
401/*
402 * Perform filtering for multicast datagrams on a socket by group and source.
403 *
404 * Returns 0 if a datagram should be allowed through, or various error codes
405 * if the socket was not a member of the group, or the source was muted, etc.
406 */
407int
408im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
5ba3f43e 409 const struct sockaddr_in6 *group, const struct sockaddr_in6 *src)
6d2010ae
A
410{
411 size_t gidx;
412 struct in6_msource *ims;
413 int mode;
414
39236c6e 415 IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo));
6d2010ae
A
416 VERIFY(ifp != NULL);
417
418 gidx = im6o_match_group(imo, ifp, group);
0a7de745
A
419 if (gidx == (size_t)-1) {
420 return MCAST_NOTGMEMBER;
421 }
6d2010ae
A
422
423 /*
424 * Check if the source was included in an (S,G) join.
425 * Allow reception on exclusive memberships by default,
426 * reject reception on inclusive memberships by default.
427 * Exclude source only if an in-mode exclude filter exists.
428 * Include source only if an in-mode include filter exists.
429 * NOTE: We are comparing group state here at MLD t1 (now)
430 * with socket-layer t0 (since last downcall).
431 */
432 mode = imo->im6o_mfilters[gidx].im6f_st[1];
433 ims = im6o_match_source(imo, gidx, src);
434
435 if ((ims == NULL && mode == MCAST_INCLUDE) ||
0a7de745
A
436 (ims != NULL && ims->im6sl_st[0] != mode)) {
437 return MCAST_NOTSMEMBER;
438 }
6d2010ae 439
0a7de745 440 return MCAST_PASS;
6d2010ae
A
441}
442
443/*
444 * Find and return a reference to an in6_multi record for (ifp, group),
445 * and bump its reference count.
446 * If one does not exist, try to allocate it, and update link-layer multicast
447 * filters on ifp to listen for group.
448 * Assumes the IN6_MULTI lock is held across the call.
449 * Return 0 if successful, otherwise return an appropriate error code.
450 */
451static int
452in6_mc_get(struct ifnet *ifp, const struct in6_addr *group,
453 struct in6_multi **pinm)
454{
0a7de745
A
455 struct sockaddr_in6 gsin6;
456 struct ifmultiaddr *ifma;
457 struct in6_multi *inm;
458 int error;
6d2010ae
A
459
460 *pinm = NULL;
461
462 in6_multihead_lock_shared();
463 IN6_LOOKUP_MULTI(group, ifp, inm);
464 if (inm != NULL) {
465 IN6M_LOCK(inm);
466 VERIFY(inm->in6m_reqcnt >= 1);
467 inm->in6m_reqcnt++;
468 VERIFY(inm->in6m_reqcnt != 0);
469 *pinm = inm;
470 IN6M_UNLOCK(inm);
471 in6_multihead_lock_done();
472 /*
473 * We already joined this group; return the in6m
474 * with a refcount held (via lookup) for caller.
475 */
0a7de745 476 return 0;
6d2010ae
A
477 }
478 in6_multihead_lock_done();
479
480 memset(&gsin6, 0, sizeof(gsin6));
481 gsin6.sin6_family = AF_INET6;
482 gsin6.sin6_len = sizeof(struct sockaddr_in6);
483 gsin6.sin6_addr = *group;
484
485 /*
486 * Check if a link-layer group is already associated
487 * with this network-layer group on the given ifnet.
488 */
489 error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma);
0a7de745
A
490 if (error != 0) {
491 return error;
492 }
6d2010ae
A
493
494 /*
495 * See comments in in6m_remref() for access to ifma_protospec.
496 */
497 in6_multihead_lock_exclusive();
498 IFMA_LOCK(ifma);
499 if ((inm = ifma->ifma_protospec) != NULL) {
500 VERIFY(ifma->ifma_addr != NULL);
501 VERIFY(ifma->ifma_addr->sa_family == AF_INET6);
0a7de745 502 IN6M_ADDREF(inm); /* for caller */
6d2010ae
A
503 IFMA_UNLOCK(ifma);
504 IN6M_LOCK(inm);
505 VERIFY(inm->in6m_ifma == ifma);
506 VERIFY(inm->in6m_ifp == ifp);
507 VERIFY(IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group));
508 if (inm->in6m_debug & IFD_ATTACHED) {
509 VERIFY(inm->in6m_reqcnt >= 1);
510 inm->in6m_reqcnt++;
511 VERIFY(inm->in6m_reqcnt != 0);
512 *pinm = inm;
513 IN6M_UNLOCK(inm);
514 in6_multihead_lock_done();
515 IFMA_REMREF(ifma);
516 /*
517 * We lost the race with another thread doing
518 * in6_mc_get(); since this group has already
519 * been joined; return the inm with a refcount
520 * held for caller.
521 */
0a7de745 522 return 0;
6d2010ae
A
523 }
524 /*
525 * We lost the race with another thread doing in6_delmulti();
526 * the inm referring to the ifma has been detached, thus we
527 * reattach it back to the in6_multihead list, and return the
528 * inm with a refcount held for the caller.
529 */
530 in6_multi_attach(inm);
531 VERIFY((inm->in6m_debug &
532 (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED);
533 *pinm = inm;
534 IN6M_UNLOCK(inm);
535 in6_multihead_lock_done();
536 IFMA_REMREF(ifma);
0a7de745 537 return 0;
6d2010ae
A
538 }
539 IFMA_UNLOCK(ifma);
540
541 /*
542 * A new in6_multi record is needed; allocate and initialize it.
543 * We DO NOT perform an MLD join as the in6_ layer may need to
544 * push an initial source list down to MLD to support SSM.
545 *
546 * The initial source filter state is INCLUDE, {} as per the RFC.
547 * Pending state-changes per group are subject to a bounds check.
548 */
f427ee49
A
549 inm = in6_multi_alloc(Z_WAITOK);
550
6d2010ae
A
551 IN6M_LOCK(inm);
552 inm->in6m_addr = *group;
553 inm->in6m_ifp = ifp;
554 inm->in6m_mli = MLD_IFINFO(ifp);
555 VERIFY(inm->in6m_mli != NULL);
556 MLI_ADDREF(inm->in6m_mli);
0a7de745 557 inm->in6m_ifma = ifma; /* keep refcount from if_addmulti() */
6d2010ae
A
558 inm->in6m_state = MLD_NOT_MEMBER;
559 /*
560 * Pending state-changes per group are subject to a bounds check.
561 */
562 inm->in6m_scq.ifq_maxlen = MLD_MAX_STATE_CHANGES;
563 inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED;
564 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
565 RB_INIT(&inm->in6m_srcs);
566 *pinm = inm;
567 in6_multi_attach(inm);
568 VERIFY((inm->in6m_debug &
569 (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED);
0a7de745 570 IN6M_ADDREF_LOCKED(inm); /* for caller */
6d2010ae
A
571 IN6M_UNLOCK(inm);
572
573 IFMA_LOCK(ifma);
574 VERIFY(ifma->ifma_protospec == NULL);
575 ifma->ifma_protospec = inm;
576 IFMA_UNLOCK(ifma);
577 in6_multihead_lock_done();
578
0a7de745 579 return 0;
6d2010ae
A
580}
581
582/*
583 * Clear recorded source entries for a group.
584 * Used by the MLD code. Caller must hold the IN6_MULTI lock.
585 * FIXME: Should reap.
586 */
587void
588in6m_clear_recorded(struct in6_multi *inm)
589{
0a7de745 590 struct ip6_msource *ims;
6d2010ae
A
591
592 IN6M_LOCK_ASSERT_HELD(inm);
593
594 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
595 if (ims->im6s_stp) {
596 ims->im6s_stp = 0;
597 --inm->in6m_st[1].iss_rec;
598 }
599 }
600 VERIFY(inm->in6m_st[1].iss_rec == 0);
601}
602
603/*
604 * Record a source as pending for a Source-Group MLDv2 query.
605 * This lives here as it modifies the shared tree.
606 *
607 * inm is the group descriptor.
608 * naddr is the address of the source to record in network-byte order.
609 *
610 * If the net.inet6.mld.sgalloc sysctl is non-zero, we will
611 * lazy-allocate a source node in response to an SG query.
612 * Otherwise, no allocation is performed. This saves some memory
613 * with the trade-off that the source will not be reported to the
614 * router if joined in the window between the query response and
615 * the group actually being joined on the local host.
616 *
617 * VIMAGE: XXX: Currently the mld_sgalloc feature has been removed.
618 * This turns off the allocation of a recorded source entry if
619 * the group has not been joined.
620 *
621 * Return 0 if the source didn't exist or was already marked as recorded.
622 * Return 1 if the source was marked as recorded by this function.
623 * Return <0 if any error occured (negated errno code).
624 */
625int
626in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
627{
0a7de745
A
628 struct ip6_msource find;
629 struct ip6_msource *ims, *nims;
6d2010ae
A
630
631 IN6M_LOCK_ASSERT_HELD(inm);
632
633 find.im6s_addr = *addr;
634 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
0a7de745
A
635 if (ims && ims->im6s_stp) {
636 return 0;
637 }
6d2010ae 638 if (ims == NULL) {
0a7de745
A
639 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) {
640 return -ENOSPC;
641 }
f427ee49 642 nims = ip6ms_alloc(Z_WAITOK);
6d2010ae
A
643 nims->im6s_addr = find.im6s_addr;
644 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims);
645 ++inm->in6m_nsrc;
646 ims = nims;
647 }
648
649 /*
650 * Mark the source as recorded and update the recorded
651 * source count.
652 */
653 ++ims->im6s_stp;
654 ++inm->in6m_st[1].iss_rec;
655
0a7de745 656 return 1;
6d2010ae
A
657}
658
659/*
660 * Return a pointer to an in6_msource owned by an in6_mfilter,
661 * given its source address.
662 * Lazy-allocate if needed. If this is a new entry its filter state is
663 * undefined at t0.
664 *
665 * imf is the filter set being modified.
666 * addr is the source address.
667 *
668 * Caller is expected to be holding im6o_lock.
669 */
670static int
671im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin,
672 struct in6_msource **plims)
673{
0a7de745
A
674 struct ip6_msource find;
675 struct ip6_msource *ims;
676 struct in6_msource *lims;
677 int error;
6d2010ae
A
678
679 error = 0;
680 ims = NULL;
681 lims = NULL;
682
683 find.im6s_addr = psin->sin6_addr;
684 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
685 lims = (struct in6_msource *)ims;
686 if (lims == NULL) {
0a7de745
A
687 if (imf->im6f_nsrc == in6_mcast_maxsocksrc) {
688 return ENOSPC;
689 }
f427ee49 690 lims = in6ms_alloc(Z_WAITOK);
6d2010ae
A
691 lims->im6s_addr = find.im6s_addr;
692 lims->im6sl_st[0] = MCAST_UNDEFINED;
693 RB_INSERT(ip6_msource_tree, &imf->im6f_sources,
694 (struct ip6_msource *)lims);
695 ++imf->im6f_nsrc;
696 }
697
698 *plims = lims;
699
0a7de745 700 return error;
6d2010ae
A
701}
702
703/*
704 * Graft a source entry into an existing socket-layer filter set,
705 * maintaining any required invariants and checking allocations.
706 *
707 * The source is marked as being in the new filter mode at t1.
708 *
709 * Return the pointer to the new node, otherwise return NULL.
710 *
711 * Caller is expected to be holding im6o_lock.
712 */
713static struct in6_msource *
714im6f_graft(struct in6_mfilter *imf, const uint8_t st1,
715 const struct sockaddr_in6 *psin)
716{
0a7de745 717 struct in6_msource *lims;
6d2010ae 718
f427ee49 719 lims = in6ms_alloc(Z_WAITOK);
6d2010ae
A
720 lims->im6s_addr = psin->sin6_addr;
721 lims->im6sl_st[0] = MCAST_UNDEFINED;
722 lims->im6sl_st[1] = st1;
723 RB_INSERT(ip6_msource_tree, &imf->im6f_sources,
724 (struct ip6_msource *)lims);
725 ++imf->im6f_nsrc;
726
0a7de745 727 return lims;
6d2010ae
A
728}
729
730/*
731 * Prune a source entry from an existing socket-layer filter set,
732 * maintaining any required invariants and checking allocations.
733 *
734 * The source is marked as being left at t1, it is not freed.
735 *
736 * Return 0 if no error occurred, otherwise return an errno value.
737 *
738 * Caller is expected to be holding im6o_lock.
739 */
740static int
741im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin)
742{
0a7de745
A
743 struct ip6_msource find;
744 struct ip6_msource *ims;
745 struct in6_msource *lims;
6d2010ae
A
746
747 find.im6s_addr = psin->sin6_addr;
748 ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
0a7de745
A
749 if (ims == NULL) {
750 return ENOENT;
751 }
6d2010ae
A
752 lims = (struct in6_msource *)ims;
753 lims->im6sl_st[1] = MCAST_UNDEFINED;
0a7de745 754 return 0;
6d2010ae
A
755}
756
757/*
758 * Revert socket-layer filter set deltas at t1 to t0 state.
759 *
760 * Caller is expected to be holding im6o_lock.
761 */
762static void
763im6f_rollback(struct in6_mfilter *imf)
764{
0a7de745
A
765 struct ip6_msource *ims, *tims;
766 struct in6_msource *lims;
6d2010ae
A
767
768 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
769 lims = (struct in6_msource *)ims;
770 if (lims->im6sl_st[0] == lims->im6sl_st[1]) {
771 /* no change at t1 */
772 continue;
773 } else if (lims->im6sl_st[0] != MCAST_UNDEFINED) {
774 /* revert change to existing source at t1 */
775 lims->im6sl_st[1] = lims->im6sl_st[0];
776 } else {
777 /* revert source added t1 */
39236c6e
A
778 MLD_PRINTF(("%s: free in6ms 0x%llx\n", __func__,
779 (uint64_t)VM_KERNEL_ADDRPERM(lims)));
6d2010ae
A
780 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
781 in6ms_free(lims);
782 imf->im6f_nsrc--;
783 }
784 }
785 imf->im6f_st[1] = imf->im6f_st[0];
786}
787
788/*
789 * Mark socket-layer filter set as INCLUDE {} at t1.
790 *
791 * Caller is expected to be holding im6o_lock.
792 */
793void
794im6f_leave(struct in6_mfilter *imf)
795{
0a7de745
A
796 struct ip6_msource *ims;
797 struct in6_msource *lims;
6d2010ae
A
798
799 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
800 lims = (struct in6_msource *)ims;
801 lims->im6sl_st[1] = MCAST_UNDEFINED;
802 }
803 imf->im6f_st[1] = MCAST_INCLUDE;
804}
805
806/*
807 * Mark socket-layer filter set deltas as committed.
808 *
809 * Caller is expected to be holding im6o_lock.
810 */
811static void
812im6f_commit(struct in6_mfilter *imf)
813{
0a7de745
A
814 struct ip6_msource *ims;
815 struct in6_msource *lims;
6d2010ae
A
816
817 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
818 lims = (struct in6_msource *)ims;
819 lims->im6sl_st[0] = lims->im6sl_st[1];
820 }
821 imf->im6f_st[0] = imf->im6f_st[1];
822}
823
824/*
825 * Reap unreferenced sources from socket-layer filter set.
826 *
827 * Caller is expected to be holding im6o_lock.
828 */
829static void
830im6f_reap(struct in6_mfilter *imf)
831{
0a7de745
A
832 struct ip6_msource *ims, *tims;
833 struct in6_msource *lims;
6d2010ae
A
834
835 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
836 lims = (struct in6_msource *)ims;
837 if ((lims->im6sl_st[0] == MCAST_UNDEFINED) &&
838 (lims->im6sl_st[1] == MCAST_UNDEFINED)) {
39236c6e
A
839 MLD_PRINTF(("%s: free in6ms 0x%llx\n", __func__,
840 (uint64_t)VM_KERNEL_ADDRPERM(lims)));
6d2010ae
A
841 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
842 in6ms_free(lims);
843 imf->im6f_nsrc--;
844 }
845 }
846}
847
848/*
849 * Purge socket-layer filter set.
850 *
851 * Caller is expected to be holding im6o_lock.
852 */
853void
854im6f_purge(struct in6_mfilter *imf)
855{
0a7de745
A
856 struct ip6_msource *ims, *tims;
857 struct in6_msource *lims;
6d2010ae
A
858
859 RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
860 lims = (struct in6_msource *)ims;
39236c6e
A
861 MLD_PRINTF(("%s: free in6ms 0x%llx\n", __func__,
862 (uint64_t)VM_KERNEL_ADDRPERM(lims)));
6d2010ae
A
863 RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
864 in6ms_free(lims);
865 imf->im6f_nsrc--;
866 }
867 imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED;
868 VERIFY(RB_EMPTY(&imf->im6f_sources));
869}
870
871/*
872 * Look up a source filter entry for a multicast group.
873 *
874 * inm is the group descriptor to work with.
875 * addr is the IPv6 address to look up.
876 * noalloc may be non-zero to suppress allocation of sources.
877 * *pims will be set to the address of the retrieved or allocated source.
878 *
879 * Return 0 if successful, otherwise return a non-zero error code.
880 */
881static int
882in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr,
883 const int noalloc, struct ip6_msource **pims)
884{
0a7de745
A
885 struct ip6_msource find;
886 struct ip6_msource *ims, *nims;
6d2010ae
A
887
888 IN6M_LOCK_ASSERT_HELD(inm);
889
890 find.im6s_addr = *addr;
891 ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
892 if (ims == NULL && !noalloc) {
0a7de745
A
893 if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) {
894 return ENOSPC;
895 }
f427ee49 896 nims = ip6ms_alloc(Z_WAITOK);
6d2010ae
A
897 nims->im6s_addr = *addr;
898 RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims);
899 ++inm->in6m_nsrc;
900 ims = nims;
39236c6e
A
901 MLD_PRINTF(("%s: allocated %s as 0x%llx\n", __func__,
902 ip6_sprintf(addr), (uint64_t)VM_KERNEL_ADDRPERM(ims)));
6d2010ae
A
903 }
904
905 *pims = ims;
0a7de745 906 return 0;
6d2010ae
A
907}
908
909/*
910 * Helper function to derive the filter mode on a source entry
911 * from its internal counters. Predicates are:
912 * A source is only excluded if all listeners exclude it.
913 * A source is only included if no listeners exclude it,
914 * and at least one listener includes it.
915 * May be used by ifmcstat(8).
916 */
917uint8_t
918im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims,
919 uint8_t t)
920{
39236c6e 921 IN6M_LOCK_ASSERT_HELD(__DECONST(struct in6_multi *, inm));
6d2010ae
A
922
923 t = !!t;
924 if (inm->in6m_st[t].iss_ex > 0 &&
0a7de745
A
925 inm->in6m_st[t].iss_ex == ims->im6s_st[t].ex) {
926 return MCAST_EXCLUDE;
927 } else if (ims->im6s_st[t].in > 0 && ims->im6s_st[t].ex == 0) {
928 return MCAST_INCLUDE;
929 }
930 return MCAST_UNDEFINED;
6d2010ae
A
931}
932
933/*
934 * Merge socket-layer source into MLD-layer source.
935 * If rollback is non-zero, perform the inverse of the merge.
936 */
937static void
938im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims,
939 const int rollback)
940{
941 int n = rollback ? -1 : 1;
942
943 if (lims->im6sl_st[0] == MCAST_EXCLUDE) {
944 MLD_PRINTF(("%s: t1 ex -= %d on %s\n", __func__, n,
945 ip6_sprintf(&lims->im6s_addr)));
946 ims->im6s_st[1].ex -= n;
947 } else if (lims->im6sl_st[0] == MCAST_INCLUDE) {
948 MLD_PRINTF(("%s: t1 in -= %d on %s\n", __func__, n,
949 ip6_sprintf(&lims->im6s_addr)));
950 ims->im6s_st[1].in -= n;
951 }
952
953 if (lims->im6sl_st[1] == MCAST_EXCLUDE) {
954 MLD_PRINTF(("%s: t1 ex += %d on %s\n", __func__, n,
955 ip6_sprintf(&lims->im6s_addr)));
956 ims->im6s_st[1].ex += n;
957 } else if (lims->im6sl_st[1] == MCAST_INCLUDE) {
958 MLD_PRINTF(("%s: t1 in += %d on %s\n", __func__, n,
959 ip6_sprintf(&lims->im6s_addr)));
960 ims->im6s_st[1].in += n;
961 }
962}
963
964/*
965 * Atomically update the global in6_multi state, when a membership's
966 * filter list is being updated in any way.
967 *
968 * imf is the per-inpcb-membership group filter pointer.
969 * A fake imf may be passed for in-kernel consumers.
970 *
971 * XXX This is a candidate for a set-symmetric-difference style loop
972 * which would eliminate the repeated lookup from root of ims nodes,
973 * as they share the same key space.
974 *
975 * If any error occurred this function will back out of refcounts
976 * and return a non-zero value.
977 */
978static int
979in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
980{
0a7de745
A
981 struct ip6_msource *ims, *nims = NULL;
982 struct in6_msource *lims;
983 int schanged, error;
984 int nsrc0, nsrc1;
6d2010ae
A
985
986 IN6M_LOCK_ASSERT_HELD(inm);
987
988 schanged = 0;
989 error = 0;
990 nsrc1 = nsrc0 = 0;
991
992 /*
993 * Update the source filters first, as this may fail.
994 * Maintain count of in-mode filters at t0, t1. These are
995 * used to work out if we transition into ASM mode or not.
996 * Maintain a count of source filters whose state was
997 * actually modified by this operation.
998 */
999 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
1000 lims = (struct in6_msource *)ims;
0a7de745
A
1001 if (lims->im6sl_st[0] == imf->im6f_st[0]) {
1002 nsrc0++;
1003 }
1004 if (lims->im6sl_st[1] == imf->im6f_st[1]) {
1005 nsrc1++;
1006 }
1007 if (lims->im6sl_st[0] == lims->im6sl_st[1]) {
1008 continue;
1009 }
6d2010ae
A
1010 error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims);
1011 ++schanged;
0a7de745 1012 if (error) {
6d2010ae 1013 break;
0a7de745 1014 }
6d2010ae
A
1015 im6s_merge(nims, lims, 0);
1016 }
1017 if (error) {
1018 struct ip6_msource *bims;
1019
1020 RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) {
1021 lims = (struct in6_msource *)ims;
0a7de745 1022 if (lims->im6sl_st[0] == lims->im6sl_st[1]) {
6d2010ae 1023 continue;
0a7de745 1024 }
6d2010ae 1025 (void) in6m_get_source(inm, &lims->im6s_addr, 1, &bims);
0a7de745 1026 if (bims == NULL) {
6d2010ae 1027 continue;
0a7de745 1028 }
6d2010ae
A
1029 im6s_merge(bims, lims, 1);
1030 }
1031 goto out_reap;
1032 }
1033
1034 MLD_PRINTF(("%s: imf filters in-mode: %d at t0, %d at t1\n",
1035 __func__, nsrc0, nsrc1));
1036
1037 /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */
1038 if (imf->im6f_st[0] == imf->im6f_st[1] &&
1039 imf->im6f_st[1] == MCAST_INCLUDE) {
1040 if (nsrc1 == 0) {
1041 MLD_PRINTF(("%s: --in on inm at t1\n", __func__));
1042 --inm->in6m_st[1].iss_in;
1043 }
1044 }
1045
1046 /* Handle filter mode transition on socket. */
1047 if (imf->im6f_st[0] != imf->im6f_st[1]) {
1048 MLD_PRINTF(("%s: imf transition %d to %d\n",
1049 __func__, imf->im6f_st[0], imf->im6f_st[1]));
1050
1051 if (imf->im6f_st[0] == MCAST_EXCLUDE) {
1052 MLD_PRINTF(("%s: --ex on inm at t1\n", __func__));
1053 --inm->in6m_st[1].iss_ex;
1054 } else if (imf->im6f_st[0] == MCAST_INCLUDE) {
1055 MLD_PRINTF(("%s: --in on inm at t1\n", __func__));
1056 --inm->in6m_st[1].iss_in;
1057 }
1058
1059 if (imf->im6f_st[1] == MCAST_EXCLUDE) {
1060 MLD_PRINTF(("%s: ex++ on inm at t1\n", __func__));
1061 inm->in6m_st[1].iss_ex++;
1062 } else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) {
1063 MLD_PRINTF(("%s: in++ on inm at t1\n", __func__));
1064 inm->in6m_st[1].iss_in++;
1065 }
1066 }
1067
1068 /*
1069 * Track inm filter state in terms of listener counts.
1070 * If there are any exclusive listeners, stack-wide
1071 * membership is exclusive.
1072 * Otherwise, if only inclusive listeners, stack-wide is inclusive.
1073 * If no listeners remain, state is undefined at t1,
1074 * and the MLD lifecycle for this group should finish.
1075 */
1076 if (inm->in6m_st[1].iss_ex > 0) {
1077 MLD_PRINTF(("%s: transition to EX\n", __func__));
1078 inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE;
1079 } else if (inm->in6m_st[1].iss_in > 0) {
1080 MLD_PRINTF(("%s: transition to IN\n", __func__));
1081 inm->in6m_st[1].iss_fmode = MCAST_INCLUDE;
1082 } else {
1083 MLD_PRINTF(("%s: transition to UNDEF\n", __func__));
1084 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
1085 }
1086
1087 /* Decrement ASM listener count on transition out of ASM mode. */
1088 if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) {
1089 if ((imf->im6f_st[1] != MCAST_EXCLUDE) ||
1090 (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) {
1091 MLD_PRINTF(("%s: --asm on inm at t1\n", __func__));
1092 --inm->in6m_st[1].iss_asm;
1093 }
1094 }
1095
1096 /* Increment ASM listener count on transition to ASM mode. */
1097 if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) {
1098 MLD_PRINTF(("%s: asm++ on inm at t1\n", __func__));
1099 inm->in6m_st[1].iss_asm++;
1100 }
1101
39236c6e
A
1102 MLD_PRINTF(("%s: merged imf 0x%llx to inm 0x%llx\n", __func__,
1103 (uint64_t)VM_KERNEL_ADDRPERM(imf),
1104 (uint64_t)VM_KERNEL_ADDRPERM(inm)));
6d2010ae
A
1105 in6m_print(inm);
1106
1107out_reap:
1108 if (schanged > 0) {
1109 MLD_PRINTF(("%s: sources changed; reaping\n", __func__));
1110 in6m_reap(inm);
1111 }
0a7de745 1112 return error;
6d2010ae
A
1113}
1114
1115/*
1116 * Mark an in6_multi's filter set deltas as committed.
1117 * Called by MLD after a state change has been enqueued.
1118 */
1119void
1120in6m_commit(struct in6_multi *inm)
1121{
0a7de745 1122 struct ip6_msource *ims;
6d2010ae
A
1123
1124 IN6M_LOCK_ASSERT_HELD(inm);
1125
39236c6e
A
1126 MLD_PRINTF(("%s: commit inm 0x%llx\n", __func__,
1127 (uint64_t)VM_KERNEL_ADDRPERM(inm)));
6d2010ae
A
1128 MLD_PRINTF(("%s: pre commit:\n", __func__));
1129 in6m_print(inm);
1130
1131 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
1132 ims->im6s_st[0] = ims->im6s_st[1];
1133 }
1134 inm->in6m_st[0] = inm->in6m_st[1];
1135}
1136
1137/*
1138 * Reap unreferenced nodes from an in6_multi's filter set.
1139 */
1140static void
1141in6m_reap(struct in6_multi *inm)
1142{
0a7de745 1143 struct ip6_msource *ims, *tims;
6d2010ae
A
1144
1145 IN6M_LOCK_ASSERT_HELD(inm);
1146
1147 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) {
1148 if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 ||
1149 ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 ||
0a7de745 1150 ims->im6s_stp != 0) {
6d2010ae 1151 continue;
0a7de745 1152 }
39236c6e
A
1153 MLD_PRINTF(("%s: free ims 0x%llx\n", __func__,
1154 (uint64_t)VM_KERNEL_ADDRPERM(ims)));
6d2010ae
A
1155 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims);
1156 ip6ms_free(ims);
1157 inm->in6m_nsrc--;
1158 }
1159}
1160
1161/*
1162 * Purge all source nodes from an in6_multi's filter set.
1163 */
1164void
1165in6m_purge(struct in6_multi *inm)
1166{
0a7de745 1167 struct ip6_msource *ims, *tims;
6d2010ae
A
1168
1169 IN6M_LOCK_ASSERT_HELD(inm);
1170
1171 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) {
39236c6e
A
1172 MLD_PRINTF(("%s: free ims 0x%llx\n", __func__,
1173 (uint64_t)VM_KERNEL_ADDRPERM(ims)));
6d2010ae
A
1174 RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims);
1175 ip6ms_free(ims);
1176 inm->in6m_nsrc--;
1177 }
1178}
1179
1180/*
1181 * Join a multicast address w/o sources.
1182 * KAME compatibility entry point.
1183 *
1184 */
1185struct in6_multi_mship *
1186in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr,
1187 int *errorp, int delay)
1188{
1189 struct in6_multi_mship *imm;
1190 int error;
1191
1192 *errorp = 0;
1193
f427ee49 1194 imm = in6_multi_mship_alloc(Z_WAITOK);
6d2010ae 1195
6d2010ae
A
1196 error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay);
1197 if (error) {
1198 *errorp = error;
1199 in6_multi_mship_free(imm);
0a7de745 1200 return NULL;
6d2010ae
A
1201 }
1202
0a7de745 1203 return imm;
6d2010ae
A
1204}
1205
1206/*
1207 * Leave a multicast address w/o sources.
1208 * KAME compatibility entry point.
1209 */
1210int
1211in6_leavegroup(struct in6_multi_mship *imm)
1212{
1213 if (imm->i6mm_maddr != NULL) {
1214 in6_mc_leave(imm->i6mm_maddr, NULL);
1215 IN6M_REMREF(imm->i6mm_maddr);
1216 imm->i6mm_maddr = NULL;
1217 }
1218 in6_multi_mship_free(imm);
1219 return 0;
1220}
1221
1222/*
1223 * Join a multicast group; real entry point.
1224 *
1225 * Only preserves atomicity at inm level.
1226 * NOTE: imf argument cannot be const due to sys/tree.h limitations.
1227 *
1228 * If the MLD downcall fails, the group is not joined, and an error
1229 * code is returned.
1230 */
1231int
1232in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr,
1233 /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
1234 const int delay)
1235{
0a7de745
A
1236 struct in6_mfilter timf;
1237 struct in6_multi *inm = NULL;
1238 int error = 0;
1239 struct mld_tparams mtp;
6d2010ae
A
1240
1241 /*
1242 * Sanity: Check scope zone ID was set for ifp, if and
1243 * only if group is scoped to an interface.
1244 */
1245 VERIFY(IN6_IS_ADDR_MULTICAST(mcaddr));
1246 if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) ||
1247 IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) {
1248 VERIFY(mcaddr->s6_addr16[1] != 0);
1249 }
1250
39236c6e
A
1251 MLD_PRINTF(("%s: join %s on 0x%llx(%s))\n", __func__,
1252 ip6_sprintf(mcaddr), (uint64_t)VM_KERNEL_ADDRPERM(ifp),
1253 if_name(ifp)));
6d2010ae 1254
0a7de745 1255 bzero(&mtp, sizeof(mtp));
6d2010ae
A
1256 *pinm = NULL;
1257
1258 /*
1259 * If no imf was specified (i.e. kernel consumer),
1260 * fake one up and assume it is an ASM join.
1261 */
1262 if (imf == NULL) {
1263 im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
1264 imf = &timf;
1265 }
1266
1267 error = in6_mc_get(ifp, mcaddr, &inm);
1268 if (error) {
1269 MLD_PRINTF(("%s: in6_mc_get() failure\n", __func__));
0a7de745 1270 return error;
6d2010ae
A
1271 }
1272
1273 MLD_PRINTF(("%s: merge inm state\n", __func__));
1274
1275 IN6M_LOCK(inm);
1276 error = in6m_merge(inm, imf);
1277 if (error) {
1278 MLD_PRINTF(("%s: failed to merge inm state\n", __func__));
1279 goto out_in6m_release;
1280 }
1281
1282 MLD_PRINTF(("%s: doing mld downcall\n", __func__));
39236c6e 1283 error = mld_change_state(inm, &mtp, delay);
6d2010ae
A
1284 if (error) {
1285 MLD_PRINTF(("%s: failed to update source\n", __func__));
39236c6e 1286 im6f_rollback(imf);
6d2010ae
A
1287 goto out_in6m_release;
1288 }
1289
1290out_in6m_release:
1291 if (error) {
39236c6e
A
1292 MLD_PRINTF(("%s: dropping ref on 0x%llx\n", __func__,
1293 (uint64_t)VM_KERNEL_ADDRPERM(inm)));
6d2010ae
A
1294 IN6M_UNLOCK(inm);
1295 IN6M_REMREF(inm);
1296 } else {
1297 IN6M_UNLOCK(inm);
0a7de745 1298 *pinm = inm; /* keep refcount from in6_mc_get() */
6d2010ae
A
1299 }
1300
39236c6e
A
1301 /* schedule timer now that we've dropped the lock(s) */
1302 mld_set_timeout(&mtp);
1303
0a7de745 1304 return error;
6d2010ae
A
1305}
1306
1307/*
1308 * Leave a multicast group; real entry point.
1309 * All source filters will be expunged.
1310 *
1311 * Only preserves atomicity at inm level.
1312 *
1313 * Holding the write lock for the INP which contains imf
1314 * is highly advisable. We can't assert for it as imf does not
1315 * contain a back-pointer to the owning inp.
1316 *
1317 * Note: This is not the same as in6m_release(*) as this function also
1318 * makes a state change downcall into MLD.
1319 */
1320int
1321in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
1322{
0a7de745
A
1323 struct in6_mfilter timf;
1324 int error, lastref;
1325 struct mld_tparams mtp;
6d2010ae 1326
0a7de745 1327 bzero(&mtp, sizeof(mtp));
6d2010ae
A
1328 error = 0;
1329
1330 IN6M_LOCK_ASSERT_NOTHELD(inm);
1331
1332 in6_multihead_lock_exclusive();
1333 IN6M_LOCK(inm);
1334
39236c6e
A
1335 MLD_PRINTF(("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n", __func__,
1336 (uint64_t)VM_KERNEL_ADDRPERM(inm), ip6_sprintf(&inm->in6m_addr),
6d2010ae 1337 (in6m_is_ifp_detached(inm) ? "null" : inm->in6m_ifp->if_name),
39236c6e 1338 inm->in6m_ifp->if_unit, (uint64_t)VM_KERNEL_ADDRPERM(imf)));
6d2010ae
A
1339
1340 /*
1341 * If no imf was specified (i.e. kernel consumer),
1342 * fake one up and assume it is an ASM join.
1343 */
1344 if (imf == NULL) {
1345 im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED);
1346 imf = &timf;
1347 }
1348
1349 /*
1350 * Begin state merge transaction at MLD layer.
1351 *
1352 * As this particular invocation should not cause any memory
1353 * to be allocated, and there is no opportunity to roll back
1354 * the transaction, it MUST NOT fail.
1355 */
1356 MLD_PRINTF(("%s: merge inm state\n", __func__));
1357
1358 error = in6m_merge(inm, imf);
1359 KASSERT(error == 0, ("%s: failed to merge inm state\n", __func__));
1360
1361 MLD_PRINTF(("%s: doing mld downcall\n", __func__));
39236c6e 1362 error = mld_change_state(inm, &mtp, 0);
6d2010ae 1363#if MLD_DEBUG
0a7de745 1364 if (error) {
6d2010ae 1365 MLD_PRINTF(("%s: failed mld downcall\n", __func__));
0a7de745 1366 }
6d2010ae
A
1367#endif
1368 lastref = in6_multi_detach(inm);
1369 VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) &&
1370 inm->in6m_reqcnt == 0));
1371 IN6M_UNLOCK(inm);
1372 in6_multihead_lock_done();
1373
0a7de745
A
1374 if (lastref) {
1375 IN6M_REMREF(inm); /* for in6_multihead list */
1376 }
39236c6e
A
1377 /* schedule timer now that we've dropped the lock(s) */
1378 mld_set_timeout(&mtp);
1379
0a7de745 1380 return error;
6d2010ae
A
1381}
1382
1383/*
1384 * Block or unblock an ASM multicast source on an inpcb.
1385 * This implements the delta-based API described in RFC 3678.
1386 *
1387 * The delta-based API applies only to exclusive-mode memberships.
1388 * An MLD downcall will be performed.
1389 *
1390 * Return 0 if successful, otherwise return an appropriate error code.
1391 */
1392static int
1393in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
1394{
0a7de745
A
1395 struct group_source_req gsr;
1396 struct sockaddr_in6 *gsa, *ssa;
1397 struct ifnet *ifp;
1398 struct in6_mfilter *imf;
1399 struct ip6_moptions *imo;
1400 struct in6_msource *ims;
1401 struct in6_multi *inm;
1402 size_t idx;
f427ee49 1403 uint8_t fmode;
0a7de745
A
1404 int error, doblock;
1405 struct mld_tparams mtp;
1406
1407 bzero(&mtp, sizeof(mtp));
6d2010ae
A
1408 ifp = NULL;
1409 error = 0;
1410 doblock = 0;
1411
1412 memset(&gsr, 0, sizeof(struct group_source_req));
5ba3f43e
A
1413 gsa = (struct sockaddr_in6 *)&gsr.gsr_group;
1414 ssa = (struct sockaddr_in6 *)&gsr.gsr_source;
6d2010ae
A
1415
1416 switch (sopt->sopt_name) {
1417 case MCAST_BLOCK_SOURCE:
1418 case MCAST_UNBLOCK_SOURCE:
1419 error = sooptcopyin(sopt, &gsr,
1420 sizeof(struct group_source_req),
1421 sizeof(struct group_source_req));
0a7de745
A
1422 if (error) {
1423 return error;
1424 }
6d2010ae 1425
5ba3f43e 1426 if (gsa->sin6_family != AF_INET6 ||
0a7de745
A
1427 gsa->sin6_len != sizeof(struct sockaddr_in6)) {
1428 return EINVAL;
1429 }
6d2010ae 1430
5ba3f43e 1431 if (ssa->sin6_family != AF_INET6 ||
0a7de745
A
1432 ssa->sin6_len != sizeof(struct sockaddr_in6)) {
1433 return EINVAL;
1434 }
6d2010ae
A
1435
1436 ifnet_head_lock_shared();
1437 if (gsr.gsr_interface == 0 ||
1438 (u_int)if_index < gsr.gsr_interface) {
1439 ifnet_head_done();
0a7de745 1440 return EADDRNOTAVAIL;
6d2010ae
A
1441 }
1442
1443 ifp = ifindex2ifnet[gsr.gsr_interface];
1444 ifnet_head_done();
1445
0a7de745
A
1446 if (ifp == NULL) {
1447 return EADDRNOTAVAIL;
1448 }
6d2010ae 1449
0a7de745 1450 if (sopt->sopt_name == MCAST_BLOCK_SOURCE) {
6d2010ae 1451 doblock = 1;
0a7de745 1452 }
6d2010ae
A
1453 break;
1454
1455 default:
1456 MLD_PRINTF(("%s: unknown sopt_name %d\n",
1457 __func__, sopt->sopt_name));
0a7de745 1458 return EOPNOTSUPP;
6d2010ae
A
1459 }
1460
0a7de745
A
1461 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) {
1462 return EINVAL;
1463 }
6d2010ae 1464
5ba3f43e 1465 (void) in6_setscope(&gsa->sin6_addr, ifp, NULL);
6d2010ae
A
1466
1467 /*
1468 * Check if we are actually a member of this group.
1469 */
1470 imo = in6p_findmoptions(inp);
0a7de745
A
1471 if (imo == NULL) {
1472 return ENOMEM;
1473 }
6d2010ae
A
1474
1475 IM6O_LOCK(imo);
5ba3f43e 1476 idx = im6o_match_group(imo, ifp, gsa);
6d2010ae
A
1477 if (idx == (size_t)-1 || imo->im6o_mfilters == NULL) {
1478 error = EADDRNOTAVAIL;
1479 goto out_imo_locked;
1480 }
1481
1482 VERIFY(imo->im6o_mfilters != NULL);
1483 imf = &imo->im6o_mfilters[idx];
1484 inm = imo->im6o_membership[idx];
1485
1486 /*
1487 * Attempting to use the delta-based API on an
1488 * non exclusive-mode membership is an error.
1489 */
1490 fmode = imf->im6f_st[0];
1491 if (fmode != MCAST_EXCLUDE) {
1492 error = EINVAL;
1493 goto out_imo_locked;
1494 }
1495
1496 /*
1497 * Deal with error cases up-front:
1498 * Asked to block, but already blocked; or
1499 * Asked to unblock, but nothing to unblock.
1500 * If adding a new block entry, allocate it.
1501 */
5ba3f43e 1502 ims = im6o_match_source(imo, idx, ssa);
6d2010ae
A
1503 if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
1504 MLD_PRINTF(("%s: source %s %spresent\n", __func__,
5ba3f43e 1505 ip6_sprintf(&ssa->sin6_addr),
6d2010ae
A
1506 doblock ? "" : "not "));
1507 error = EADDRNOTAVAIL;
1508 goto out_imo_locked;
1509 }
1510
1511 /*
1512 * Begin state merge transaction at socket layer.
1513 */
1514 if (doblock) {
1515 MLD_PRINTF(("%s: %s source\n", __func__, "block"));
5ba3f43e 1516 ims = im6f_graft(imf, fmode, ssa);
0a7de745 1517 if (ims == NULL) {
6d2010ae 1518 error = ENOMEM;
0a7de745 1519 }
6d2010ae
A
1520 } else {
1521 MLD_PRINTF(("%s: %s source\n", __func__, "allow"));
5ba3f43e 1522 error = im6f_prune(imf, ssa);
6d2010ae
A
1523 }
1524
1525 if (error) {
1526 MLD_PRINTF(("%s: merge imf state failed\n", __func__));
1527 goto out_im6f_rollback;
1528 }
1529
1530 /*
1531 * Begin state merge transaction at MLD layer.
1532 */
1533 IN6M_LOCK(inm);
1534 MLD_PRINTF(("%s: merge inm state\n", __func__));
1535 error = in6m_merge(inm, imf);
1536 if (error) {
1537 MLD_PRINTF(("%s: failed to merge inm state\n", __func__));
1538 IN6M_UNLOCK(inm);
1539 goto out_im6f_rollback;
1540 }
1541
1542 MLD_PRINTF(("%s: doing mld downcall\n", __func__));
39236c6e 1543 error = mld_change_state(inm, &mtp, 0);
6d2010ae
A
1544 IN6M_UNLOCK(inm);
1545#if MLD_DEBUG
0a7de745 1546 if (error) {
6d2010ae 1547 MLD_PRINTF(("%s: failed mld downcall\n", __func__));
0a7de745 1548 }
6d2010ae
A
1549#endif
1550
1551out_im6f_rollback:
0a7de745 1552 if (error) {
6d2010ae 1553 im6f_rollback(imf);
0a7de745 1554 } else {
6d2010ae 1555 im6f_commit(imf);
0a7de745 1556 }
6d2010ae
A
1557
1558 im6f_reap(imf);
1559
1560out_imo_locked:
1561 IM6O_UNLOCK(imo);
0a7de745 1562 IM6O_REMREF(imo); /* from in6p_findmoptions() */
39236c6e
A
1563
1564 /* schedule timer now that we've dropped the lock(s) */
1565 mld_set_timeout(&mtp);
1566
0a7de745 1567 return error;
6d2010ae
A
1568}
1569
1570/*
1571 * Given an inpcb, return its multicast options structure pointer. Accepts
1572 * an unlocked inpcb pointer, but will return it locked. May sleep.
1573 *
1574 */
1575static struct ip6_moptions *
1576in6p_findmoptions(struct inpcb *inp)
1577{
0a7de745
A
1578 struct ip6_moptions *imo;
1579 struct in6_multi **immp;
1580 struct in6_mfilter *imfp;
1581 size_t idx;
6d2010ae
A
1582
1583 if ((imo = inp->in6p_moptions) != NULL) {
0a7de745
A
1584 IM6O_ADDREF(imo); /* for caller */
1585 return imo;
6d2010ae
A
1586 }
1587
f427ee49 1588 imo = ip6_allocmoptions(Z_WAITOK);
0a7de745
A
1589 if (imo == NULL) {
1590 return NULL;
1591 }
6d2010ae 1592
0a7de745 1593 immp = _MALLOC(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS,
6d2010ae
A
1594 M_WAITOK | M_ZERO);
1595 if (immp == NULL) {
1596 IM6O_REMREF(imo);
0a7de745 1597 return NULL;
6d2010ae
A
1598 }
1599
0a7de745 1600 imfp = _MALLOC(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS,
6d2010ae
A
1601 M_IN6MFILTER, M_WAITOK | M_ZERO);
1602 if (imfp == NULL) {
1603 _FREE(immp, M_IP6MOPTS);
1604 IM6O_REMREF(imo);
0a7de745 1605 return NULL;
6d2010ae
A
1606 }
1607
1608 imo->im6o_multicast_ifp = NULL;
f427ee49
A
1609 imo->im6o_multicast_hlim = (u_char)ip6_defmcasthlim;
1610 imo->im6o_multicast_loop = (u_char)in6_mcast_loop;
6d2010ae
A
1611 imo->im6o_num_memberships = 0;
1612 imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS;
1613 imo->im6o_membership = immp;
1614
1615 /* Initialize per-group source filters. */
0a7de745 1616 for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++) {
6d2010ae 1617 im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE);
0a7de745 1618 }
6d2010ae
A
1619
1620 imo->im6o_mfilters = imfp;
1621 inp->in6p_moptions = imo; /* keep reference from ip6_allocmoptions() */
0a7de745 1622 IM6O_ADDREF(imo); /* for caller */
6d2010ae 1623
0a7de745 1624 return imo;
6d2010ae
A
1625}
1626
1627/*
1628 * Atomically get source filters on a socket for an IPv6 multicast group.
1629 * Called with INP lock held; returns with lock released.
1630 */
1631static int
1632in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
1633{
0a7de745
A
1634 struct __msfilterreq64 msfr = {}, msfr64;
1635 struct __msfilterreq32 msfr32;
1636 struct sockaddr_in6 *gsa;
1637 struct ifnet *ifp;
1638 struct ip6_moptions *imo;
1639 struct in6_mfilter *imf;
1640 struct ip6_msource *ims;
1641 struct in6_msource *lims;
1642 struct sockaddr_in6 *psin;
1643 struct sockaddr_storage *ptss;
1644 struct sockaddr_storage *tss;
1645 int error;
1646 size_t idx, nsrcs, ncsrcs;
1647 user_addr_t tmp_ptr;
6d2010ae
A
1648
1649 imo = inp->in6p_moptions;
1650 VERIFY(imo != NULL);
1651
1652 if (IS_64BIT_PROCESS(current_proc())) {
1653 error = sooptcopyin(sopt, &msfr64,
1654 sizeof(struct __msfilterreq64),
1655 sizeof(struct __msfilterreq64));
0a7de745
A
1656 if (error) {
1657 return error;
1658 }
6d2010ae 1659 /* we never use msfr.msfr_srcs; */
3e170ce0 1660 memcpy(&msfr, &msfr64, sizeof(msfr64));
6d2010ae
A
1661 } else {
1662 error = sooptcopyin(sopt, &msfr32,
1663 sizeof(struct __msfilterreq32),
1664 sizeof(struct __msfilterreq32));
0a7de745
A
1665 if (error) {
1666 return error;
1667 }
6d2010ae 1668 /* we never use msfr.msfr_srcs; */
3e170ce0 1669 memcpy(&msfr, &msfr32, sizeof(msfr32));
6d2010ae
A
1670 }
1671
1672 if (msfr.msfr_group.ss_family != AF_INET6 ||
0a7de745
A
1673 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) {
1674 return EINVAL;
1675 }
6d2010ae 1676
5ba3f43e 1677 gsa = (struct sockaddr_in6 *)&msfr.msfr_group;
0a7de745
A
1678 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) {
1679 return EINVAL;
1680 }
6d2010ae
A
1681
1682 ifnet_head_lock_shared();
1683 if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) {
1684 ifnet_head_done();
0a7de745 1685 return EADDRNOTAVAIL;
6d2010ae
A
1686 }
1687 ifp = ifindex2ifnet[msfr.msfr_ifindex];
1688 ifnet_head_done();
1689
0a7de745
A
1690 if (ifp == NULL) {
1691 return EADDRNOTAVAIL;
1692 }
316670eb
A
1693
1694 if ((size_t) msfr.msfr_nsrcs >
0a7de745 1695 UINT32_MAX / sizeof(struct sockaddr_storage)) {
39236c6e 1696 msfr.msfr_nsrcs = UINT32_MAX / sizeof(struct sockaddr_storage);
0a7de745 1697 }
316670eb 1698
0a7de745 1699 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) {
f427ee49 1700 msfr.msfr_nsrcs = (uint32_t)in6_mcast_maxsocksrc;
0a7de745 1701 }
6d2010ae 1702
5ba3f43e 1703 (void)in6_setscope(&gsa->sin6_addr, ifp, NULL);
6d2010ae
A
1704
1705 IM6O_LOCK(imo);
1706 /*
1707 * Lookup group on the socket.
1708 */
5ba3f43e 1709 idx = im6o_match_group(imo, ifp, gsa);
6d2010ae
A
1710 if (idx == (size_t)-1 || imo->im6o_mfilters == NULL) {
1711 IM6O_UNLOCK(imo);
0a7de745 1712 return EADDRNOTAVAIL;
6d2010ae
A
1713 }
1714 imf = &imo->im6o_mfilters[idx];
1715
1716 /*
1717 * Ignore memberships which are in limbo.
1718 */
1719 if (imf->im6f_st[1] == MCAST_UNDEFINED) {
1720 IM6O_UNLOCK(imo);
0a7de745 1721 return EAGAIN;
6d2010ae
A
1722 }
1723 msfr.msfr_fmode = imf->im6f_st[1];
1724
1725 /*
1726 * If the user specified a buffer, copy out the source filter
1727 * entries to userland gracefully.
1728 * We only copy out the number of entries which userland
1729 * has asked for, but we always tell userland how big the
1730 * buffer really needs to be.
1731 */
1732 tss = NULL;
1733
0a7de745 1734 if (IS_64BIT_PROCESS(current_proc())) {
f427ee49 1735 tmp_ptr = (user_addr_t)msfr64.msfr_srcs;
0a7de745 1736 } else {
6d2010ae 1737 tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs);
0a7de745 1738 }
6d2010ae
A
1739
1740 if (tmp_ptr != USER_ADDR_NULL && msfr.msfr_nsrcs > 0) {
316670eb 1741 tss = _MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*tss),
6d2010ae
A
1742 M_TEMP, M_WAITOK | M_ZERO);
1743 if (tss == NULL) {
1744 IM6O_UNLOCK(imo);
0a7de745 1745 return ENOBUFS;
6d2010ae
A
1746 }
1747 }
1748
1749 /*
1750 * Count number of sources in-mode at t0.
1751 * If buffer space exists and remains, copy out source entries.
1752 */
1753 nsrcs = msfr.msfr_nsrcs;
1754 ncsrcs = 0;
1755 ptss = tss;
1756 RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
1757 lims = (struct in6_msource *)ims;
1758 if (lims->im6sl_st[0] == MCAST_UNDEFINED ||
0a7de745 1759 lims->im6sl_st[0] != imf->im6f_st[0]) {
6d2010ae 1760 continue;
0a7de745 1761 }
6d2010ae
A
1762 if (tss != NULL && nsrcs > 0) {
1763 psin = (struct sockaddr_in6 *)ptss;
1764 psin->sin6_family = AF_INET6;
1765 psin->sin6_len = sizeof(struct sockaddr_in6);
1766 psin->sin6_addr = lims->im6s_addr;
1767 psin->sin6_port = 0;
1768 --nsrcs;
1769 ++ptss;
1770 ++ncsrcs;
1771 }
1772 }
1773
1774 IM6O_UNLOCK(imo);
1775
1776 if (tss != NULL) {
316670eb 1777 error = copyout(tss, tmp_ptr, ncsrcs * sizeof(*tss));
6d2010ae 1778 FREE(tss, M_TEMP);
0a7de745
A
1779 if (error) {
1780 return error;
1781 }
6d2010ae
A
1782 }
1783
f427ee49 1784 msfr.msfr_nsrcs = (uint32_t)ncsrcs;
6d2010ae
A
1785 if (IS_64BIT_PROCESS(current_proc())) {
1786 msfr64.msfr_ifindex = msfr.msfr_ifindex;
1787 msfr64.msfr_fmode = msfr.msfr_fmode;
1788 msfr64.msfr_nsrcs = msfr.msfr_nsrcs;
1789 memcpy(&msfr64.msfr_group, &msfr.msfr_group,
1790 sizeof(struct sockaddr_storage));
1791 error = sooptcopyout(sopt, &msfr64,
1792 sizeof(struct __msfilterreq64));
1793 } else {
1794 msfr32.msfr_ifindex = msfr.msfr_ifindex;
1795 msfr32.msfr_fmode = msfr.msfr_fmode;
1796 msfr32.msfr_nsrcs = msfr.msfr_nsrcs;
3e170ce0 1797 memcpy(&msfr32.msfr_group, &msfr.msfr_group,
6d2010ae
A
1798 sizeof(struct sockaddr_storage));
1799 error = sooptcopyout(sopt, &msfr32,
1800 sizeof(struct __msfilterreq32));
1801 }
1802
0a7de745 1803 return error;
6d2010ae
A
1804}
1805
1806/*
1807 * Return the IP multicast options in response to user getsockopt().
1808 */
1809int
1810ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt)
1811{
0a7de745
A
1812 struct ip6_moptions *im6o;
1813 int error;
1814 u_int optval;
6d2010ae
A
1815
1816 im6o = inp->in6p_moptions;
1817 /*
1818 * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
1819 * or is a divert socket, reject it.
1820 */
39236c6e
A
1821 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_DIVERT ||
1822 (SOCK_TYPE(inp->inp_socket) != SOCK_RAW &&
1823 SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) {
0a7de745 1824 return EOPNOTSUPP;
6d2010ae
A
1825 }
1826
1827 error = 0;
1828 switch (sopt->sopt_name) {
1829 case IPV6_MULTICAST_IF:
0a7de745 1830 if (im6o != NULL) {
6d2010ae 1831 IM6O_LOCK(im6o);
0a7de745 1832 }
6d2010ae
A
1833 if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) {
1834 optval = 0;
1835 } else {
1836 optval = im6o->im6o_multicast_ifp->if_index;
1837 }
0a7de745 1838 if (im6o != NULL) {
6d2010ae 1839 IM6O_UNLOCK(im6o);
0a7de745 1840 }
6d2010ae
A
1841 error = sooptcopyout(sopt, &optval, sizeof(u_int));
1842 break;
1843
1844 case IPV6_MULTICAST_HOPS:
1845 if (im6o == NULL) {
1846 optval = ip6_defmcasthlim;
1847 } else {
1848 IM6O_LOCK(im6o);
1849 optval = im6o->im6o_multicast_hlim;
1850 IM6O_UNLOCK(im6o);
1851 }
1852 error = sooptcopyout(sopt, &optval, sizeof(u_int));
1853 break;
1854
1855 case IPV6_MULTICAST_LOOP:
1856 if (im6o == NULL) {
1857 optval = in6_mcast_loop; /* XXX VIMAGE */
1858 } else {
1859 IM6O_LOCK(im6o);
1860 optval = im6o->im6o_multicast_loop;
1861 IM6O_UNLOCK(im6o);
1862 }
1863 error = sooptcopyout(sopt, &optval, sizeof(u_int));
1864 break;
1865
1866 case IPV6_MSFILTER:
1867 if (im6o == NULL) {
1868 error = EADDRNOTAVAIL;
1869 } else {
1870 error = in6p_get_source_filters(inp, sopt);
1871 }
1872 break;
1873
1874 default:
1875 error = ENOPROTOOPT;
1876 break;
1877 }
1878
0a7de745 1879 return error;
6d2010ae
A
1880}
1881
1882/*
1883 * Look up the ifnet to use for a multicast group membership,
1884 * given the address of an IPv6 group.
1885 *
1886 * This routine exists to support legacy IPv6 multicast applications.
1887 *
1888 * If inp is non-NULL and is bound to an interface, use this socket's
1889 * inp_boundif for any required routing table lookup.
1890 *
1891 * If the route lookup fails, return NULL.
1892 *
1893 * FUTURE: Support multiple forwarding tables for IPv6.
1894 *
1895 * Returns NULL if no ifp could be found.
1896 */
1897static struct ifnet *
1898in6p_lookup_mcast_ifp(const struct inpcb *in6p,
1899 const struct sockaddr_in6 *gsin6)
1900{
0a7de745
A
1901 struct route_in6 ro6;
1902 struct ifnet *ifp;
1903 unsigned int ifscope = IFSCOPE_NONE;
6d2010ae
A
1904
1905 VERIFY(in6p == NULL || (in6p->inp_vflag & INP_IPV6));
1906 VERIFY(gsin6->sin6_family == AF_INET6);
0a7de745 1907 if (IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr) == 0) {
6d2010ae 1908 return NULL;
0a7de745 1909 }
6d2010ae 1910
0a7de745 1911 if (in6p != NULL && (in6p->inp_flags & INP_BOUND_IF)) {
316670eb 1912 ifscope = in6p->inp_boundifp->if_index;
0a7de745 1913 }
6d2010ae
A
1914
1915 ifp = NULL;
1916 memset(&ro6, 0, sizeof(struct route_in6));
1917 memcpy(&ro6.ro_dst, gsin6, sizeof(struct sockaddr_in6));
1918 rtalloc_scoped_ign((struct route *)&ro6, 0, ifscope);
1919 if (ro6.ro_rt != NULL) {
1920 ifp = ro6.ro_rt->rt_ifp;
1921 VERIFY(ifp != NULL);
6d2010ae 1922 }
39236c6e 1923 ROUTE_RELEASE(&ro6);
6d2010ae 1924
0a7de745 1925 return ifp;
6d2010ae
A
1926}
1927
1928/*
1929 * Since ipv6_mreq contains an ifindex and ip_mreq contains an AF_INET
1930 * address, we need to lookup the AF_INET address when translating an
1931 * ipv6_mreq structure into an ipmreq structure.
1932 * This is used when userland performs multicast setsockopt() on AF_INET6
1933 * sockets with AF_INET multicast addresses (IPv6 v4 mapped addresses).
1934 */
1935static int
1936in6p_lookup_v4addr(struct ipv6_mreq *mreq, struct ip_mreq *v4mreq)
1937{
1938 struct ifnet *ifp;
1939 struct ifaddr *ifa;
1940 struct sockaddr_in *sin;
1941
1942 ifnet_head_lock_shared();
1943 if (mreq->ipv6mr_interface > (unsigned int)if_index) {
1944 ifnet_head_done();
0a7de745
A
1945 return EADDRNOTAVAIL;
1946 } else {
6d2010ae 1947 ifp = ifindex2ifnet[mreq->ipv6mr_interface];
0a7de745 1948 }
6d2010ae 1949 ifnet_head_done();
0a7de745
A
1950 if (ifp == NULL) {
1951 return EADDRNOTAVAIL;
1952 }
6d2010ae 1953 ifa = ifa_ifpgetprimary(ifp, AF_INET);
0a7de745
A
1954 if (ifa == NULL) {
1955 return EADDRNOTAVAIL;
1956 }
316670eb 1957 sin = (struct sockaddr_in *)(uintptr_t)(size_t)ifa->ifa_addr;
6d2010ae
A
1958 v4mreq->imr_interface.s_addr = sin->sin_addr.s_addr;
1959 IFA_REMREF(ifa);
1960
0a7de745 1961 return 0;
6d2010ae
A
1962}
1963
1964/*
1965 * Join an IPv6 multicast group, possibly with a source.
1966 *
1967 * FIXME: The KAME use of the unspecified address (::)
1968 * to join *all* multicast groups is currently unsupported.
1969 */
1970static int
1971in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
1972{
0a7de745
A
1973 struct group_source_req gsr;
1974 struct sockaddr_in6 *gsa, *ssa;
1975 struct ifnet *ifp;
1976 struct in6_mfilter *imf;
1977 struct ip6_moptions *imo;
1978 struct in6_multi *inm = NULL;
1979 struct in6_msource *lims = NULL;
1980 size_t idx;
1981 int error, is_new;
1982 uint32_t scopeid = 0;
1983 struct mld_tparams mtp;
1984
1985 bzero(&mtp, sizeof(mtp));
6d2010ae
A
1986 ifp = NULL;
1987 imf = NULL;
1988 error = 0;
1989 is_new = 0;
1990
1991 memset(&gsr, 0, sizeof(struct group_source_req));
5ba3f43e
A
1992 gsa = (struct sockaddr_in6 *)&gsr.gsr_group;
1993 ssa = (struct sockaddr_in6 *)&gsr.gsr_source;
6d2010ae
A
1994
1995 /*
1996 * Chew everything into struct group_source_req.
1997 * Overwrite the port field if present, as the sockaddr
1998 * being copied in may be matched with a binary comparison.
1999 * Ignore passed-in scope ID.
2000 */
2001 switch (sopt->sopt_name) {
2002 case IPV6_JOIN_GROUP: {
2003 struct ipv6_mreq mreq;
6d2010ae
A
2004
2005 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
2006 sizeof(struct ipv6_mreq));
0a7de745
A
2007 if (error) {
2008 return error;
2009 }
6d2010ae
A
2010 if (IN6_IS_ADDR_V4MAPPED(&mreq.ipv6mr_multiaddr)) {
2011 struct ip_mreq v4mreq;
2012 struct sockopt v4sopt;
2013
2014 v4mreq.imr_multiaddr.s_addr =
2015 mreq.ipv6mr_multiaddr.s6_addr32[3];
0a7de745 2016 if (mreq.ipv6mr_interface == 0) {
6d2010ae 2017 v4mreq.imr_interface.s_addr = INADDR_ANY;
0a7de745 2018 } else {
6d2010ae 2019 error = in6p_lookup_v4addr(&mreq, &v4mreq);
0a7de745
A
2020 }
2021 if (error) {
2022 return error;
2023 }
6d2010ae 2024 v4sopt.sopt_dir = SOPT_SET;
0a7de745 2025 v4sopt.sopt_level = sopt->sopt_level;
6d2010ae
A
2026 v4sopt.sopt_name = IP_ADD_MEMBERSHIP;
2027 v4sopt.sopt_val = CAST_USER_ADDR_T(&v4mreq);
2028 v4sopt.sopt_valsize = sizeof(v4mreq);
2029 v4sopt.sopt_p = kernproc;
2030
0a7de745 2031 return inp_join_group(inp, &v4sopt);
6d2010ae 2032 }
5ba3f43e
A
2033 gsa->sin6_family = AF_INET6;
2034 gsa->sin6_len = sizeof(struct sockaddr_in6);
2035 gsa->sin6_addr = mreq.ipv6mr_multiaddr;
6d2010ae 2036
0a7de745 2037 /* Only allow IPv6 multicast addresses */
5ba3f43e 2038 if (IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr) == 0) {
0a7de745 2039 return EINVAL;
6d2010ae
A
2040 }
2041
2042 if (mreq.ipv6mr_interface == 0) {
5ba3f43e 2043 ifp = in6p_lookup_mcast_ifp(inp, gsa);
6d2010ae
A
2044 } else {
2045 ifnet_head_lock_shared();
2046 if ((u_int)if_index < mreq.ipv6mr_interface) {
2047 ifnet_head_done();
0a7de745
A
2048 return EADDRNOTAVAIL;
2049 }
6d2010ae
A
2050 ifp = ifindex2ifnet[mreq.ipv6mr_interface];
2051 ifnet_head_done();
2052 }
39236c6e
A
2053 MLD_PRINTF(("%s: ipv6mr_interface = %d, ifp = 0x%llx\n",
2054 __func__, mreq.ipv6mr_interface,
2055 (uint64_t)VM_KERNEL_ADDRPERM(ifp)));
6d2010ae
A
2056 break;
2057 }
2058
2059 case MCAST_JOIN_GROUP:
2060 case MCAST_JOIN_SOURCE_GROUP:
2061 if (sopt->sopt_name == MCAST_JOIN_GROUP) {
2062 error = sooptcopyin(sopt, &gsr,
2063 sizeof(struct group_req),
2064 sizeof(struct group_req));
2065 } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
2066 error = sooptcopyin(sopt, &gsr,
2067 sizeof(struct group_source_req),
2068 sizeof(struct group_source_req));
2069 }
0a7de745
A
2070 if (error) {
2071 return error;
2072 }
6d2010ae 2073
5ba3f43e 2074 if (gsa->sin6_family != AF_INET6 ||
0a7de745
A
2075 gsa->sin6_len != sizeof(struct sockaddr_in6)) {
2076 return EINVAL;
2077 }
6d2010ae
A
2078
2079 if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
5ba3f43e 2080 if (ssa->sin6_family != AF_INET6 ||
0a7de745
A
2081 ssa->sin6_len != sizeof(struct sockaddr_in6)) {
2082 return EINVAL;
2083 }
2084 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6_addr)) {
2085 return EINVAL;
2086 }
6d2010ae
A
2087 /*
2088 * TODO: Validate embedded scope ID in source
2089 * list entry against passed-in ifp, if and only
2090 * if source list filter entry is iface or node local.
2091 */
5ba3f43e
A
2092 in6_clearscope(&ssa->sin6_addr);
2093 ssa->sin6_port = 0;
2094 ssa->sin6_scope_id = 0;
6d2010ae
A
2095 }
2096
2097 ifnet_head_lock_shared();
2098 if (gsr.gsr_interface == 0 ||
2099 (u_int)if_index < gsr.gsr_interface) {
2100 ifnet_head_done();
0a7de745 2101 return EADDRNOTAVAIL;
6d2010ae
A
2102 }
2103 ifp = ifindex2ifnet[gsr.gsr_interface];
2104 ifnet_head_done();
2105 break;
2106
2107 default:
2108 MLD_PRINTF(("%s: unknown sopt_name %d\n",
2109 __func__, sopt->sopt_name));
0a7de745 2110 return EOPNOTSUPP;
6d2010ae
A
2111 }
2112
0a7de745
A
2113 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) {
2114 return EINVAL;
2115 }
6d2010ae 2116
0a7de745
A
2117 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
2118 return EADDRNOTAVAIL;
2119 }
6d2010ae 2120
5ba3f43e
A
2121 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_mcast_join_total);
2122 /*
2123 * TBD: revisit the criteria for non-OS initiated joins
2124 */
2125 if (inp->inp_lport == htons(5353)) {
2126 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_mcast_join_os_total);
2127 }
2128
2129 gsa->sin6_port = 0;
2130 gsa->sin6_scope_id = 0;
6d2010ae
A
2131
2132 /*
2133 * Always set the scope zone ID on memberships created from userland.
2134 * Use the passed-in ifp to do this.
2135 */
5ba3f43e 2136 (void)in6_setscope(&gsa->sin6_addr, ifp, &scopeid);
6d2010ae
A
2137 /*
2138 * Some addresses are not valid without an embedded scopeid.
2139 * This check must be present because otherwise we will later hit
2140 * a VERIFY() in in6_mc_join().
2141 */
5ba3f43e
A
2142 if ((IN6_IS_ADDR_MC_LINKLOCAL(&gsa->sin6_addr) ||
2143 IN6_IS_ADDR_MC_INTFACELOCAL(&gsa->sin6_addr)) &&
0a7de745
A
2144 (scopeid == 0 || gsa->sin6_addr.s6_addr16[1] == 0)) {
2145 return EINVAL;
2146 }
6d2010ae
A
2147
2148 imo = in6p_findmoptions(inp);
0a7de745
A
2149 if (imo == NULL) {
2150 return ENOMEM;
2151 }
6d2010ae
A
2152
2153 IM6O_LOCK(imo);
5ba3f43e 2154 idx = im6o_match_group(imo, ifp, gsa);
6d2010ae
A
2155 if (idx == (size_t)-1) {
2156 is_new = 1;
2157 } else {
2158 inm = imo->im6o_membership[idx];
2159 imf = &imo->im6o_mfilters[idx];
5ba3f43e 2160 if (ssa->sin6_family != AF_UNSPEC) {
6d2010ae
A
2161 /*
2162 * MCAST_JOIN_SOURCE_GROUP on an exclusive membership
2163 * is an error. On an existing inclusive membership,
2164 * it just adds the source to the filter list.
2165 */
2166 if (imf->im6f_st[1] != MCAST_INCLUDE) {
2167 error = EINVAL;
2168 goto out_imo_locked;
2169 }
2170 /*
2171 * Throw out duplicates.
2172 *
2173 * XXX FIXME: This makes a naive assumption that
2174 * even if entries exist for *ssa in this imf,
2175 * they will be rejected as dupes, even if they
2176 * are not valid in the current mode (in-mode).
2177 *
2178 * in6_msource is transactioned just as for anything
2179 * else in SSM -- but note naive use of in6m_graft()
2180 * below for allocating new filter entries.
2181 *
2182 * This is only an issue if someone mixes the
2183 * full-state SSM API with the delta-based API,
2184 * which is discouraged in the relevant RFCs.
2185 */
5ba3f43e 2186 lims = im6o_match_source(imo, idx, ssa);
6d2010ae 2187 if (lims != NULL /*&&
0a7de745 2188 * lims->im6sl_st[1] == MCAST_INCLUDE*/) {
6d2010ae
A
2189 error = EADDRNOTAVAIL;
2190 goto out_imo_locked;
2191 }
2192 } else {
2193 /*
2194 * MCAST_JOIN_GROUP on an existing exclusive
2195 * membership is an error; return EADDRINUSE
2196 * to preserve 4.4BSD API idempotence, and
2197 * avoid tedious detour to code below.
2198 * NOTE: This is bending RFC 3678 a bit.
2199 *
2200 * On an existing inclusive membership, this is also
2201 * an error; if you want to change filter mode,
2202 * you must use the userland API setsourcefilter().
2203 * XXX We don't reject this for imf in UNDEFINED
2204 * state at t1, because allocation of a filter
2205 * is atomic with allocation of a membership.
2206 */
2207 error = EINVAL;
2208 /* See comments above for EADDRINUSE */
0a7de745 2209 if (imf->im6f_st[1] == MCAST_EXCLUDE) {
6d2010ae 2210 error = EADDRINUSE;
0a7de745 2211 }
6d2010ae
A
2212 goto out_imo_locked;
2213 }
2214 }
2215
2216 /*
2217 * Begin state merge transaction at socket layer.
2218 */
2219
2220 if (is_new) {
2221 if (imo->im6o_num_memberships == imo->im6o_max_memberships) {
f427ee49 2222 error = im6o_grow(imo);
0a7de745 2223 if (error) {
6d2010ae 2224 goto out_imo_locked;
0a7de745 2225 }
6d2010ae
A
2226 }
2227 /*
2228 * Allocate the new slot upfront so we can deal with
2229 * grafting the new source filter in same code path
2230 * as for join-source on existing membership.
2231 */
2232 idx = imo->im6o_num_memberships;
2233 imo->im6o_membership[idx] = NULL;
2234 imo->im6o_num_memberships++;
2235 VERIFY(imo->im6o_mfilters != NULL);
2236 imf = &imo->im6o_mfilters[idx];
2237 VERIFY(RB_EMPTY(&imf->im6f_sources));
2238 }
2239
2240 /*
2241 * Graft new source into filter list for this inpcb's
2242 * membership of the group. The in6_multi may not have
2243 * been allocated yet if this is a new membership, however,
2244 * the in_mfilter slot will be allocated and must be initialized.
2245 *
2246 * Note: Grafting of exclusive mode filters doesn't happen
2247 * in this path.
2248 * XXX: Should check for non-NULL lims (node exists but may
2249 * not be in-mode) for interop with full-state API.
2250 */
5ba3f43e 2251 if (ssa->sin6_family != AF_UNSPEC) {
6d2010ae
A
2252 /* Membership starts in IN mode */
2253 if (is_new) {
2254 MLD_PRINTF(("%s: new join w/source\n", __func__);
0a7de745 2255 im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE));
6d2010ae
A
2256 } else {
2257 MLD_PRINTF(("%s: %s source\n", __func__, "allow"));
2258 }
5ba3f43e 2259 lims = im6f_graft(imf, MCAST_INCLUDE, ssa);
6d2010ae
A
2260 if (lims == NULL) {
2261 MLD_PRINTF(("%s: merge imf state failed\n",
2262 __func__));
2263 error = ENOMEM;
2264 goto out_im6o_free;
2265 }
2266 } else {
2267 /* No address specified; Membership starts in EX mode */
2268 if (is_new) {
2269 MLD_PRINTF(("%s: new join w/o source", __func__));
2270 im6f_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE);
2271 }
2272 }
2273
2274 /*
2275 * Begin state merge transaction at MLD layer.
2276 */
2277
2278 if (is_new) {
39037602
A
2279 /*
2280 * See inp_join_group() for why we need to unlock
2281 */
2282 IM6O_ADDREF_LOCKED(imo);
2283 IM6O_UNLOCK(imo);
2284 socket_unlock(inp->inp_socket, 0);
2285
6d2010ae 2286 VERIFY(inm == NULL);
5ba3f43e 2287 error = in6_mc_join(ifp, &gsa->sin6_addr, imf, &inm, 0);
6d2010ae 2288 VERIFY(inm != NULL || error != 0);
39037602
A
2289
2290 socket_lock(inp->inp_socket, 0);
2291 IM6O_REMREF(imo);
2292 IM6O_LOCK(imo);
2293
0a7de745 2294 if (error) {
6d2010ae 2295 goto out_im6o_free;
0a7de745 2296 }
6d2010ae
A
2297 imo->im6o_membership[idx] = inm; /* from in6_mc_join() */
2298 } else {
2299 MLD_PRINTF(("%s: merge inm state\n", __func__));
2300 IN6M_LOCK(inm);
2301 error = in6m_merge(inm, imf);
2302 if (error) {
2303 MLD_PRINTF(("%s: failed to merge inm state\n",
2304 __func__));
2305 IN6M_UNLOCK(inm);
2306 goto out_im6f_rollback;
2307 }
2308 MLD_PRINTF(("%s: doing mld downcall\n", __func__));
39236c6e 2309 error = mld_change_state(inm, &mtp, 0);
6d2010ae
A
2310 IN6M_UNLOCK(inm);
2311 if (error) {
2312 MLD_PRINTF(("%s: failed mld downcall\n",
2313 __func__));
2314 goto out_im6f_rollback;
2315 }
2316 }
2317
2318out_im6f_rollback:
2319 if (error) {
2320 im6f_rollback(imf);
0a7de745 2321 if (is_new) {
6d2010ae 2322 im6f_purge(imf);
0a7de745 2323 } else {
6d2010ae 2324 im6f_reap(imf);
0a7de745 2325 }
6d2010ae
A
2326 } else {
2327 im6f_commit(imf);
2328 }
2329
2330out_im6o_free:
2331 if (error && is_new) {
2332 VERIFY(inm == NULL);
2333 imo->im6o_membership[idx] = NULL;
2334 --imo->im6o_num_memberships;
2335 }
2336
2337out_imo_locked:
2338 IM6O_UNLOCK(imo);
0a7de745 2339 IM6O_REMREF(imo); /* from in6p_findmoptions() */
39236c6e
A
2340
2341 /* schedule timer now that we've dropped the lock(s) */
2342 mld_set_timeout(&mtp);
2343
0a7de745 2344 return error;
6d2010ae
A
2345}
2346
2347/*
2348 * Leave an IPv6 multicast group on an inpcb, possibly with a source.
2349 */
2350static int
2351in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
2352{
0a7de745
A
2353 struct ipv6_mreq mreq;
2354 struct group_source_req gsr;
2355 struct sockaddr_in6 *gsa, *ssa;
2356 struct ifnet *ifp;
2357 struct in6_mfilter *imf;
2358 struct ip6_moptions *imo;
2359 struct in6_msource *ims;
2360 struct in6_multi *inm = NULL;
2361 uint32_t ifindex = 0;
2362 size_t idx;
2363 int error, is_final;
2364 struct mld_tparams mtp;
2365
2366 bzero(&mtp, sizeof(mtp));
6d2010ae
A
2367 ifp = NULL;
2368 error = 0;
2369 is_final = 1;
2370
2371 memset(&gsr, 0, sizeof(struct group_source_req));
5ba3f43e
A
2372 gsa = (struct sockaddr_in6 *)&gsr.gsr_group;
2373 ssa = (struct sockaddr_in6 *)&gsr.gsr_source;
6d2010ae
A
2374
2375 /*
2376 * Chew everything passed in up into a struct group_source_req
2377 * as that is easier to process.
2378 * Note: Any embedded scope ID in the multicast group passed
2379 * in by userland is ignored, the interface index is the recommended
2380 * mechanism to specify an interface; see below.
2381 */
2382 switch (sopt->sopt_name) {
2383 case IPV6_LEAVE_GROUP: {
6d2010ae
A
2384 error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
2385 sizeof(struct ipv6_mreq));
0a7de745
A
2386 if (error) {
2387 return error;
2388 }
6d2010ae
A
2389 if (IN6_IS_ADDR_V4MAPPED(&mreq.ipv6mr_multiaddr)) {
2390 struct ip_mreq v4mreq;
2391 struct sockopt v4sopt;
2392
2393 v4mreq.imr_multiaddr.s_addr =
2394 mreq.ipv6mr_multiaddr.s6_addr32[3];
0a7de745 2395 if (mreq.ipv6mr_interface == 0) {
6d2010ae 2396 v4mreq.imr_interface.s_addr = INADDR_ANY;
0a7de745 2397 } else {
6d2010ae 2398 error = in6p_lookup_v4addr(&mreq, &v4mreq);
0a7de745
A
2399 }
2400 if (error) {
2401 return error;
2402 }
6d2010ae 2403 v4sopt.sopt_dir = SOPT_SET;
0a7de745 2404 v4sopt.sopt_level = sopt->sopt_level;
6d2010ae
A
2405 v4sopt.sopt_name = IP_DROP_MEMBERSHIP;
2406 v4sopt.sopt_val = CAST_USER_ADDR_T(&v4mreq);
2407 v4sopt.sopt_valsize = sizeof(v4mreq);
2408 v4sopt.sopt_p = kernproc;
2409
0a7de745 2410 return inp_leave_group(inp, &v4sopt);
6d2010ae 2411 }
5ba3f43e
A
2412 gsa->sin6_family = AF_INET6;
2413 gsa->sin6_len = sizeof(struct sockaddr_in6);
2414 gsa->sin6_addr = mreq.ipv6mr_multiaddr;
2415 gsa->sin6_port = 0;
2416 gsa->sin6_scope_id = 0;
6d2010ae 2417 ifindex = mreq.ipv6mr_interface;
0a7de745 2418 /* Only allow IPv6 multicast addresses */
5ba3f43e 2419 if (IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr) == 0) {
0a7de745 2420 return EINVAL;
6d2010ae
A
2421 }
2422 break;
2423 }
2424
2425 case MCAST_LEAVE_GROUP:
2426 case MCAST_LEAVE_SOURCE_GROUP:
2427 if (sopt->sopt_name == MCAST_LEAVE_GROUP) {
2428 error = sooptcopyin(sopt, &gsr,
2429 sizeof(struct group_req),
2430 sizeof(struct group_req));
2431 } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
2432 error = sooptcopyin(sopt, &gsr,
2433 sizeof(struct group_source_req),
2434 sizeof(struct group_source_req));
2435 }
0a7de745
A
2436 if (error) {
2437 return error;
2438 }
6d2010ae 2439
5ba3f43e 2440 if (gsa->sin6_family != AF_INET6 ||
0a7de745
A
2441 gsa->sin6_len != sizeof(struct sockaddr_in6)) {
2442 return EINVAL;
2443 }
6d2010ae 2444 if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
5ba3f43e 2445 if (ssa->sin6_family != AF_INET6 ||
0a7de745
A
2446 ssa->sin6_len != sizeof(struct sockaddr_in6)) {
2447 return EINVAL;
2448 }
2449 if (IN6_IS_ADDR_MULTICAST(&ssa->sin6_addr)) {
2450 return EINVAL;
2451 }
6d2010ae
A
2452 /*
2453 * TODO: Validate embedded scope ID in source
2454 * list entry against passed-in ifp, if and only
2455 * if source list filter entry is iface or node local.
2456 */
5ba3f43e 2457 in6_clearscope(&ssa->sin6_addr);
6d2010ae 2458 }
5ba3f43e
A
2459 gsa->sin6_port = 0;
2460 gsa->sin6_scope_id = 0;
6d2010ae
A
2461 ifindex = gsr.gsr_interface;
2462 break;
2463
2464 default:
2465 MLD_PRINTF(("%s: unknown sopt_name %d\n",
2466 __func__, sopt->sopt_name));
0a7de745 2467 return EOPNOTSUPP;
6d2010ae
A
2468 }
2469
0a7de745
A
2470 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) {
2471 return EINVAL;
2472 }
6d2010ae
A
2473
2474 /*
2475 * Validate interface index if provided. If no interface index
2476 * was provided separately, attempt to look the membership up
2477 * from the default scope as a last resort to disambiguate
2478 * the membership we are being asked to leave.
2479 * XXX SCOPE6 lock potentially taken here.
2480 */
2481 if (ifindex != 0) {
2482 ifnet_head_lock_shared();
2483 if ((u_int)if_index < ifindex) {
2484 ifnet_head_done();
0a7de745 2485 return EADDRNOTAVAIL;
6d2010ae
A
2486 }
2487 ifp = ifindex2ifnet[ifindex];
2488 ifnet_head_done();
0a7de745
A
2489 if (ifp == NULL) {
2490 return EADDRNOTAVAIL;
2491 }
5ba3f43e 2492 (void) in6_setscope(&gsa->sin6_addr, ifp, NULL);
6d2010ae 2493 } else {
5ba3f43e 2494 error = sa6_embedscope(gsa, ip6_use_defzone);
0a7de745
A
2495 if (error) {
2496 return EADDRNOTAVAIL;
2497 }
6d2010ae
A
2498 /*
2499 * Some badly behaved applications don't pass an ifindex
2500 * or a scope ID, which is an API violation. In this case,
2501 * perform a lookup as per a v6 join.
2502 *
2503 * XXX For now, stomp on zone ID for the corner case.
2504 * This is not the 'KAME way', but we need to see the ifp
2505 * directly until such time as this implementation is
2506 * refactored, assuming the scope IDs are the way to go.
2507 */
5ba3f43e 2508 ifindex = ntohs(gsa->sin6_addr.s6_addr16[1]);
6d2010ae
A
2509 if (ifindex == 0) {
2510 MLD_PRINTF(("%s: warning: no ifindex, looking up "
2511 "ifp for group %s.\n", __func__,
5ba3f43e
A
2512 ip6_sprintf(&gsa->sin6_addr)));
2513 ifp = in6p_lookup_mcast_ifp(inp, gsa);
6d2010ae 2514 } else {
0a7de745
A
2515 if (!IF_INDEX_IN_RANGE(ifindex)) {
2516 return EADDRNOTAVAIL;
2517 }
6d2010ae
A
2518 ifnet_head_lock_shared();
2519 ifp = ifindex2ifnet[ifindex];
2520 ifnet_head_done();
2521 }
0a7de745
A
2522 if (ifp == NULL) {
2523 return EADDRNOTAVAIL;
2524 }
6d2010ae
A
2525 }
2526
2527 VERIFY(ifp != NULL);
39236c6e
A
2528 MLD_PRINTF(("%s: ifp = 0x%llx\n", __func__,
2529 (uint64_t)VM_KERNEL_ADDRPERM(ifp)));
6d2010ae
A
2530
2531 /*
2532 * Find the membership in the membership array.
2533 */
2534 imo = in6p_findmoptions(inp);
0a7de745
A
2535 if (imo == NULL) {
2536 return ENOMEM;
2537 }
6d2010ae
A
2538
2539 IM6O_LOCK(imo);
5ba3f43e 2540 idx = im6o_match_group(imo, ifp, gsa);
6d2010ae
A
2541 if (idx == (size_t)-1) {
2542 error = EADDRNOTAVAIL;
2543 goto out_locked;
2544 }
2545 inm = imo->im6o_membership[idx];
2546 imf = &imo->im6o_mfilters[idx];
2547
0a7de745 2548 if (ssa->sin6_family != AF_UNSPEC) {
6d2010ae 2549 is_final = 0;
0a7de745 2550 }
6d2010ae
A
2551
2552 /*
2553 * Begin state merge transaction at socket layer.
2554 */
2555
2556 /*
2557 * If we were instructed only to leave a given source, do so.
2558 * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
2559 */
2560 if (is_final) {
2561 im6f_leave(imf);
2562 } else {
2563 if (imf->im6f_st[0] == MCAST_EXCLUDE) {
2564 error = EADDRNOTAVAIL;
2565 goto out_locked;
2566 }
5ba3f43e 2567 ims = im6o_match_source(imo, idx, ssa);
6d2010ae 2568 if (ims == NULL) {
39236c6e 2569 MLD_PRINTF(("%s: source %s %spresent\n", __func__,
5ba3f43e 2570 ip6_sprintf(&ssa->sin6_addr),
6d2010ae
A
2571 "not "));
2572 error = EADDRNOTAVAIL;
2573 goto out_locked;
2574 }
2575 MLD_PRINTF(("%s: %s source\n", __func__, "block"));
5ba3f43e 2576 error = im6f_prune(imf, ssa);
6d2010ae
A
2577 if (error) {
2578 MLD_PRINTF(("%s: merge imf state failed\n",
2579 __func__));
2580 goto out_locked;
2581 }
2582 }
2583
2584 /*
2585 * Begin state merge transaction at MLD layer.
2586 */
2587
2588 if (is_final) {
2589 /*
2590 * Give up the multicast address record to which
2591 * the membership points. Reference held in im6o
2592 * will be released below.
2593 */
2594 (void) in6_mc_leave(inm, imf);
2595 } else {
2596 MLD_PRINTF(("%s: merge inm state\n", __func__));
2597 IN6M_LOCK(inm);
2598 error = in6m_merge(inm, imf);
2599 if (error) {
2600 MLD_PRINTF(("%s: failed to merge inm state\n",
2601 __func__));
2602 IN6M_UNLOCK(inm);
2603 goto out_im6f_rollback;
2604 }
2605
2606 MLD_PRINTF(("%s: doing mld downcall\n", __func__));
39236c6e 2607 error = mld_change_state(inm, &mtp, 0);
6d2010ae
A
2608 if (error) {
2609 MLD_PRINTF(("%s: failed mld downcall\n", __func__));
2610 }
2611 IN6M_UNLOCK(inm);
2612 }
2613
2614out_im6f_rollback:
0a7de745 2615 if (error) {
6d2010ae 2616 im6f_rollback(imf);
0a7de745 2617 } else {
6d2010ae 2618 im6f_commit(imf);
0a7de745 2619 }
6d2010ae
A
2620
2621 im6f_reap(imf);
2622
2623 if (is_final) {
2624 /* Remove the gap in the membership array. */
2625 VERIFY(inm == imo->im6o_membership[idx]);
2626 imo->im6o_membership[idx] = NULL;
39037602
A
2627
2628 /*
2629 * See inp_join_group() for why we need to unlock
2630 */
2631 IM6O_ADDREF_LOCKED(imo);
2632 IM6O_UNLOCK(imo);
2633 socket_unlock(inp->inp_socket, 0);
2634
6d2010ae 2635 IN6M_REMREF(inm);
39037602
A
2636
2637 socket_lock(inp->inp_socket, 0);
2638 IM6O_REMREF(imo);
2639 IM6O_LOCK(imo);
2640
6d2010ae 2641 for (++idx; idx < imo->im6o_num_memberships; ++idx) {
0a7de745
A
2642 imo->im6o_membership[idx - 1] = imo->im6o_membership[idx];
2643 imo->im6o_mfilters[idx - 1] = imo->im6o_mfilters[idx];
6d2010ae
A
2644 }
2645 imo->im6o_num_memberships--;
2646 }
2647
2648out_locked:
2649 IM6O_UNLOCK(imo);
0a7de745 2650 IM6O_REMREF(imo); /* from in6p_findmoptions() */
39236c6e
A
2651
2652 /* schedule timer now that we've dropped the lock(s) */
2653 mld_set_timeout(&mtp);
2654
0a7de745 2655 return error;
6d2010ae
A
2656}
2657
2658/*
2659 * Select the interface for transmitting IPv6 multicast datagrams.
2660 *
2661 * Either an instance of struct in6_addr or an instance of struct ipv6_mreqn
2662 * may be passed to this socket option. An address of in6addr_any or an
2663 * interface index of 0 is used to remove a previous selection.
2664 * When no interface is selected, one is chosen for every send.
2665 */
2666static int
2667in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt)
2668{
0a7de745
A
2669 struct ifnet *ifp;
2670 struct ip6_moptions *imo;
2671 u_int ifindex;
2672 int error;
6d2010ae 2673
0a7de745
A
2674 if (sopt->sopt_valsize != sizeof(u_int)) {
2675 return EINVAL;
2676 }
6d2010ae
A
2677
2678 error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int));
0a7de745
A
2679 if (error) {
2680 return error;
2681 }
6d2010ae
A
2682
2683 ifnet_head_lock_shared();
2684 if ((u_int)if_index < ifindex) {
2685 ifnet_head_done();
0a7de745 2686 return EINVAL;
6d2010ae
A
2687 }
2688
2689 ifp = ifindex2ifnet[ifindex];
2690 ifnet_head_done();
0a7de745
A
2691 if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
2692 return EADDRNOTAVAIL;
2693 }
6d2010ae
A
2694
2695 imo = in6p_findmoptions(inp);
0a7de745
A
2696 if (imo == NULL) {
2697 return ENOMEM;
2698 }
6d2010ae
A
2699
2700 IM6O_LOCK(imo);
2701 imo->im6o_multicast_ifp = ifp;
2702 IM6O_UNLOCK(imo);
0a7de745 2703 IM6O_REMREF(imo); /* from in6p_findmoptions() */
6d2010ae 2704
0a7de745 2705 return 0;
6d2010ae
A
2706}
2707
2708/*
2709 * Atomically set source filters on a socket for an IPv6 multicast group.
2710 *
2711 */
2712static int
2713in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
2714{
0a7de745
A
2715 struct __msfilterreq64 msfr = {}, msfr64;
2716 struct __msfilterreq32 msfr32;
2717 struct sockaddr_in6 *gsa;
2718 struct ifnet *ifp;
2719 struct in6_mfilter *imf;
2720 struct ip6_moptions *imo;
2721 struct in6_multi *inm;
2722 size_t idx;
2723 int error;
2724 user_addr_t tmp_ptr;
2725 struct mld_tparams mtp;
2726
2727 bzero(&mtp, sizeof(mtp));
6d2010ae
A
2728
2729 if (IS_64BIT_PROCESS(current_proc())) {
2730 error = sooptcopyin(sopt, &msfr64,
2731 sizeof(struct __msfilterreq64),
2732 sizeof(struct __msfilterreq64));
0a7de745
A
2733 if (error) {
2734 return error;
2735 }
6d2010ae 2736 /* we never use msfr.msfr_srcs; */
d9a64523 2737 memcpy(&msfr, &msfr64, sizeof(msfr64));
6d2010ae
A
2738 } else {
2739 error = sooptcopyin(sopt, &msfr32,
2740 sizeof(struct __msfilterreq32),
2741 sizeof(struct __msfilterreq32));
0a7de745
A
2742 if (error) {
2743 return error;
2744 }
6d2010ae 2745 /* we never use msfr.msfr_srcs; */
d9a64523 2746 memcpy(&msfr, &msfr32, sizeof(msfr32));
6d2010ae
A
2747 }
2748
316670eb 2749 if ((size_t) msfr.msfr_nsrcs >
0a7de745 2750 UINT32_MAX / sizeof(struct sockaddr_storage)) {
39236c6e 2751 msfr.msfr_nsrcs = UINT32_MAX / sizeof(struct sockaddr_storage);
0a7de745 2752 }
316670eb 2753
0a7de745
A
2754 if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) {
2755 return ENOBUFS;
2756 }
6d2010ae
A
2757
2758 if (msfr.msfr_fmode != MCAST_EXCLUDE &&
0a7de745
A
2759 msfr.msfr_fmode != MCAST_INCLUDE) {
2760 return EINVAL;
2761 }
6d2010ae
A
2762
2763 if (msfr.msfr_group.ss_family != AF_INET6 ||
0a7de745
A
2764 msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) {
2765 return EINVAL;
2766 }
6d2010ae 2767
5ba3f43e 2768 gsa = (struct sockaddr_in6 *)&msfr.msfr_group;
0a7de745
A
2769 if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) {
2770 return EINVAL;
2771 }
6d2010ae 2772
0a7de745 2773 gsa->sin6_port = 0; /* ignore port */
6d2010ae
A
2774
2775 ifnet_head_lock_shared();
2776 if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) {
2777 ifnet_head_done();
0a7de745 2778 return EADDRNOTAVAIL;
6d2010ae
A
2779 }
2780 ifp = ifindex2ifnet[msfr.msfr_ifindex];
2781 ifnet_head_done();
0a7de745
A
2782 if (ifp == NULL) {
2783 return EADDRNOTAVAIL;
2784 }
6d2010ae 2785
5ba3f43e 2786 (void)in6_setscope(&gsa->sin6_addr, ifp, NULL);
6d2010ae
A
2787
2788 /*
2789 * Take the INP write lock.
2790 * Check if this socket is a member of this group.
2791 */
2792 imo = in6p_findmoptions(inp);
0a7de745
A
2793 if (imo == NULL) {
2794 return ENOMEM;
2795 }
6d2010ae
A
2796
2797 IM6O_LOCK(imo);
5ba3f43e 2798 idx = im6o_match_group(imo, ifp, gsa);
6d2010ae
A
2799 if (idx == (size_t)-1 || imo->im6o_mfilters == NULL) {
2800 error = EADDRNOTAVAIL;
2801 goto out_imo_locked;
2802 }
2803 inm = imo->im6o_membership[idx];
2804 imf = &imo->im6o_mfilters[idx];
2805
2806 /*
2807 * Begin state merge transaction at socket layer.
2808 */
2809
f427ee49 2810 imf->im6f_st[1] = (uint8_t)msfr.msfr_fmode;
6d2010ae
A
2811
2812 /*
2813 * Apply any new source filters, if present.
2814 * Make a copy of the user-space source vector so
2815 * that we may copy them with a single copyin. This
2816 * allows us to deal with page faults up-front.
2817 */
2818 if (msfr.msfr_nsrcs > 0) {
0a7de745
A
2819 struct in6_msource *lims;
2820 struct sockaddr_in6 *psin;
2821 struct sockaddr_storage *kss, *pkss;
2822 unsigned int i;
6d2010ae 2823
0a7de745 2824 if (IS_64BIT_PROCESS(current_proc())) {
f427ee49 2825 tmp_ptr = (user_addr_t)msfr64.msfr_srcs;
0a7de745 2826 } else {
6d2010ae 2827 tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs);
0a7de745 2828 }
6d2010ae
A
2829
2830 MLD_PRINTF(("%s: loading %lu source list entries\n",
2831 __func__, (unsigned long)msfr.msfr_nsrcs));
316670eb 2832 kss = _MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*kss),
6d2010ae
A
2833 M_TEMP, M_WAITOK);
2834 if (kss == NULL) {
2835 error = ENOMEM;
2836 goto out_imo_locked;
2837 }
2838
2839 error = copyin(tmp_ptr, kss,
316670eb 2840 (size_t) msfr.msfr_nsrcs * sizeof(*kss));
6d2010ae
A
2841 if (error) {
2842 FREE(kss, M_TEMP);
2843 goto out_imo_locked;
2844 }
2845
2846 /*
2847 * Mark all source filters as UNDEFINED at t1.
2848 * Restore new group filter mode, as im6f_leave()
2849 * will set it to INCLUDE.
2850 */
2851 im6f_leave(imf);
f427ee49 2852 imf->im6f_st[1] = (uint8_t)msfr.msfr_fmode;
6d2010ae
A
2853
2854 /*
2855 * Update socket layer filters at t1, lazy-allocating
2856 * new entries. This saves a bunch of memory at the
2857 * cost of one RB_FIND() per source entry; duplicate
2858 * entries in the msfr_nsrcs vector are ignored.
2859 * If we encounter an error, rollback transaction.
2860 *
2861 * XXX This too could be replaced with a set-symmetric
2862 * difference like loop to avoid walking from root
2863 * every time, as the key space is common.
2864 */
2865 for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) {
2866 psin = (struct sockaddr_in6 *)pkss;
2867 if (psin->sin6_family != AF_INET6) {
2868 error = EAFNOSUPPORT;
2869 break;
2870 }
2871 if (psin->sin6_len != sizeof(struct sockaddr_in6)) {
2872 error = EINVAL;
2873 break;
2874 }
2875 if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) {
2876 error = EINVAL;
2877 break;
2878 }
2879 /*
2880 * TODO: Validate embedded scope ID in source
2881 * list entry against passed-in ifp, if and only
2882 * if source list filter entry is iface or node local.
2883 */
2884 in6_clearscope(&psin->sin6_addr);
2885 error = im6f_get_source(imf, psin, &lims);
0a7de745 2886 if (error) {
6d2010ae 2887 break;
0a7de745 2888 }
6d2010ae
A
2889 lims->im6sl_st[1] = imf->im6f_st[1];
2890 }
2891 FREE(kss, M_TEMP);
2892 }
2893
0a7de745 2894 if (error) {
6d2010ae 2895 goto out_im6f_rollback;
0a7de745 2896 }
6d2010ae
A
2897
2898 /*
2899 * Begin state merge transaction at MLD layer.
2900 */
2901 IN6M_LOCK(inm);
2902 MLD_PRINTF(("%s: merge inm state\n", __func__));
2903 error = in6m_merge(inm, imf);
2904 if (error) {
2905 MLD_PRINTF(("%s: failed to merge inm state\n", __func__));
2906 IN6M_UNLOCK(inm);
2907 goto out_im6f_rollback;
2908 }
2909
2910 MLD_PRINTF(("%s: doing mld downcall\n", __func__));
39236c6e 2911 error = mld_change_state(inm, &mtp, 0);
6d2010ae
A
2912 IN6M_UNLOCK(inm);
2913#if MLD_DEBUG
0a7de745 2914 if (error) {
6d2010ae 2915 MLD_PRINTF(("%s: failed mld downcall\n", __func__));
0a7de745 2916 }
6d2010ae
A
2917#endif
2918
2919out_im6f_rollback:
0a7de745 2920 if (error) {
6d2010ae 2921 im6f_rollback(imf);
0a7de745 2922 } else {
6d2010ae 2923 im6f_commit(imf);
0a7de745 2924 }
6d2010ae
A
2925
2926 im6f_reap(imf);
2927
2928out_imo_locked:
2929 IM6O_UNLOCK(imo);
0a7de745 2930 IM6O_REMREF(imo); /* from in6p_findmoptions() */
6d2010ae 2931
39236c6e
A
2932 /* schedule timer now that we've dropped the lock(s) */
2933 mld_set_timeout(&mtp);
2934
0a7de745 2935 return error;
6d2010ae
A
2936}
2937
2938/*
2939 * Set the IP multicast options in response to user setsockopt().
2940 *
2941 * Many of the socket options handled in this function duplicate the
2942 * functionality of socket options in the regular unicast API. However,
2943 * it is not possible to merge the duplicate code, because the idempotence
2944 * of the IPv6 multicast part of the BSD Sockets API must be preserved;
2945 * the effects of these options must be treated as separate and distinct.
2946 *
2947 */
2948int
2949ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt)
2950{
0a7de745
A
2951 struct ip6_moptions *im6o;
2952 int error;
6d2010ae
A
2953
2954 error = 0;
2955
2956 /*
2957 * If socket is neither of type SOCK_RAW or SOCK_DGRAM,
2958 * or is a divert socket, reject it.
2959 */
39236c6e
A
2960 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_DIVERT ||
2961 (SOCK_TYPE(inp->inp_socket) != SOCK_RAW &&
0a7de745
A
2962 SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) {
2963 return EOPNOTSUPP;
2964 }
6d2010ae
A
2965
2966 switch (sopt->sopt_name) {
2967 case IPV6_MULTICAST_IF:
2968 error = in6p_set_multicast_if(inp, sopt);
2969 break;
2970
2971 case IPV6_MULTICAST_HOPS: {
2972 int hlim;
2973
2974 if (sopt->sopt_valsize != sizeof(int)) {
2975 error = EINVAL;
2976 break;
2977 }
2978 error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int));
0a7de745 2979 if (error) {
6d2010ae 2980 break;
0a7de745 2981 }
cb323159 2982 if (hlim < -1 || hlim > IPV6_MAXHLIM) {
6d2010ae
A
2983 error = EINVAL;
2984 break;
2985 } else if (hlim == -1) {
2986 hlim = ip6_defmcasthlim;
2987 }
2988 im6o = in6p_findmoptions(inp);
2989 if (im6o == NULL) {
2990 error = ENOMEM;
2991 break;
2992 }
2993 IM6O_LOCK(im6o);
f427ee49 2994 im6o->im6o_multicast_hlim = (u_char)hlim;
6d2010ae 2995 IM6O_UNLOCK(im6o);
0a7de745 2996 IM6O_REMREF(im6o); /* from in6p_findmoptions() */
6d2010ae
A
2997 break;
2998 }
2999
3000 case IPV6_MULTICAST_LOOP: {
3001 u_int loop;
3002
3003 /*
3004 * Set the loopback flag for outgoing multicast packets.
3005 * Must be zero or one.
3006 */
3007 if (sopt->sopt_valsize != sizeof(u_int)) {
3008 error = EINVAL;
3009 break;
3010 }
3011 error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int));
0a7de745 3012 if (error) {
6d2010ae 3013 break;
0a7de745 3014 }
6d2010ae
A
3015 if (loop > 1) {
3016 error = EINVAL;
3017 break;
3018 }
3019 im6o = in6p_findmoptions(inp);
3020 if (im6o == NULL) {
3021 error = ENOMEM;
3022 break;
3023 }
3024 IM6O_LOCK(im6o);
f427ee49 3025 im6o->im6o_multicast_loop = (u_char)loop;
6d2010ae 3026 IM6O_UNLOCK(im6o);
0a7de745 3027 IM6O_REMREF(im6o); /* from in6p_findmoptions() */
6d2010ae
A
3028 break;
3029 }
3030
3031 case IPV6_JOIN_GROUP:
3032 case MCAST_JOIN_GROUP:
3033 case MCAST_JOIN_SOURCE_GROUP:
3034 error = in6p_join_group(inp, sopt);
3035 break;
3036
3037 case IPV6_LEAVE_GROUP:
3038 case MCAST_LEAVE_GROUP:
3039 case MCAST_LEAVE_SOURCE_GROUP:
3040 error = in6p_leave_group(inp, sopt);
3041 break;
3042
3043 case MCAST_BLOCK_SOURCE:
3044 case MCAST_UNBLOCK_SOURCE:
3045 error = in6p_block_unblock_source(inp, sopt);
3046 break;
3047
3048 case IPV6_MSFILTER:
3049 error = in6p_set_source_filters(inp, sopt);
3050 break;
3051
3052 default:
3053 error = EOPNOTSUPP;
3054 break;
3055 }
3056
0a7de745 3057 return error;
6d2010ae
A
3058}
3059/*
3060 * Expose MLD's multicast filter mode and source list(s) to userland,
3061 * keyed by (ifindex, group).
3062 * The filter mode is written out as a uint32_t, followed by
3063 * 0..n of struct in6_addr.
3064 * For use by ifmcstat(8).
3065 */
3066static int
3067sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS
3068{
3069#pragma unused(oidp)
3070
0a7de745
A
3071 struct in6_addr mcaddr;
3072 struct in6_addr src;
3073 struct ifnet *ifp;
3074 struct in6_multi *inm;
3075 struct in6_multistep step;
3076 struct ip6_msource *ims;
3077 int *name;
3078 int retval = 0;
3079 u_int namelen;
3080 uint32_t fmode, ifindex;
6d2010ae
A
3081
3082 name = (int *)arg1;
3083 namelen = arg2;
3084
0a7de745
A
3085 if (req->newptr != USER_ADDR_NULL) {
3086 return EPERM;
3087 }
6d2010ae
A
3088
3089 /* int: ifindex + 4 * 32 bits of IPv6 address */
0a7de745
A
3090 if (namelen != 5) {
3091 return EINVAL;
3092 }
6d2010ae
A
3093
3094 ifindex = name[0];
3095 ifnet_head_lock_shared();
3096 if (ifindex <= 0 || ifindex > (u_int)if_index) {
3097 MLD_PRINTF(("%s: ifindex %u out of range\n",
3098 __func__, ifindex));
3099 ifnet_head_done();
0a7de745 3100 return ENOENT;
6d2010ae
A
3101 }
3102
3103 memcpy(&mcaddr, &name[1], sizeof(struct in6_addr));
3104 if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) {
3105 MLD_PRINTF(("%s: group %s is not multicast\n",
3106 __func__, ip6_sprintf(&mcaddr)));
3107 ifnet_head_done();
0a7de745 3108 return EINVAL;
6d2010ae
A
3109 }
3110
3111 ifp = ifindex2ifnet[ifindex];
3112 ifnet_head_done();
3113 if (ifp == NULL) {
3114 MLD_PRINTF(("%s: no ifp for ifindex %u\n", __func__, ifindex));
0a7de745 3115 return ENOENT;
6d2010ae
A
3116 }
3117 /*
3118 * Internal MLD lookups require that scope/zone ID is set.
3119 */
3120 (void)in6_setscope(&mcaddr, ifp, NULL);
3121
3122 in6_multihead_lock_shared();
3123 IN6_FIRST_MULTI(step, inm);
3124 while (inm != NULL) {
3125 IN6M_LOCK(inm);
0a7de745 3126 if (inm->in6m_ifp != ifp) {
6d2010ae 3127 goto next;
0a7de745 3128 }
6d2010ae 3129
0a7de745 3130 if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) {
6d2010ae 3131 goto next;
0a7de745 3132 }
6d2010ae
A
3133
3134 fmode = inm->in6m_st[1].iss_fmode;
3135 retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t));
3136 if (retval != 0) {
3137 IN6M_UNLOCK(inm);
0a7de745 3138 break; /* abort */
6d2010ae
A
3139 }
3140 RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
39236c6e
A
3141 MLD_PRINTF(("%s: visit node 0x%llx\n", __func__,
3142 (uint64_t)VM_KERNEL_ADDRPERM(ims)));
6d2010ae
A
3143 /*
3144 * Only copy-out sources which are in-mode.
3145 */
3146 if (fmode != im6s_get_mode(inm, ims, 1)) {
3147 MLD_PRINTF(("%s: skip non-in-mode\n",
3148 __func__));
3149 continue; /* process next source */
3150 }
3151 src = ims->im6s_addr;
3152 retval = SYSCTL_OUT(req, &src, sizeof(struct in6_addr));
0a7de745
A
3153 if (retval != 0) {
3154 break; /* process next inm */
3155 }
6d2010ae
A
3156 }
3157next:
3158 IN6M_UNLOCK(inm);
3159 IN6_NEXT_MULTI(step, inm);
3160 }
3161 in6_multihead_lock_done();
3162
0a7de745 3163 return retval;
6d2010ae
A
3164}
3165
3166void
3167in6_multi_init(void)
3168{
0a7de745 3169 PE_parse_boot_argn("ifa_debug", &in6m_debug, sizeof(in6m_debug));
6d2010ae
A
3170
3171 /* Setup lock group and attribute for in6_multihead */
3172 in6_multihead_lock_grp_attr = lck_grp_attr_alloc_init();
3173 in6_multihead_lock_grp = lck_grp_alloc_init("in6_multihead",
3174 in6_multihead_lock_grp_attr);
3175 in6_multihead_lock_attr = lck_attr_alloc_init();
3176 lck_rw_init(&in6_multihead_lock, in6_multihead_lock_grp,
3177 in6_multihead_lock_attr);
3178
3179 lck_mtx_init(&in6m_trash_lock, in6_multihead_lock_grp,
3180 in6_multihead_lock_attr);
3181 TAILQ_INIT(&in6m_trash_head);
3182
f427ee49 3183 vm_size_t in6m_size = (in6m_debug == 0) ? sizeof(struct in6_multi) :
0a7de745 3184 sizeof(struct in6_multi_dbg);
f427ee49 3185 in6m_zone = zone_create(IN6M_ZONE_NAME, in6m_size, ZC_ZFREE_CLEARMEM);
6d2010ae
A
3186}
3187
3188static struct in6_multi *
f427ee49 3189in6_multi_alloc(zalloc_flags_t how)
6d2010ae
A
3190{
3191 struct in6_multi *in6m;
3192
f427ee49 3193 in6m = zalloc_flags(in6m_zone, how | Z_ZERO);
6d2010ae 3194 if (in6m != NULL) {
6d2010ae
A
3195 lck_mtx_init(&in6m->in6m_lock, in6_multihead_lock_grp,
3196 in6_multihead_lock_attr);
3197 in6m->in6m_debug |= IFD_ALLOC;
3198 if (in6m_debug != 0) {
3199 in6m->in6m_debug |= IFD_DEBUG;
3200 in6m->in6m_trace = in6m_trace;
3201 }
3202 }
0a7de745 3203 return in6m;
6d2010ae
A
3204}
3205
3206static void
3207in6_multi_free(struct in6_multi *in6m)
3208{
3209 IN6M_LOCK(in6m);
3210 if (in6m->in6m_debug & IFD_ATTACHED) {
3211 panic("%s: attached in6m=%p is being freed", __func__, in6m);
3212 /* NOTREACHED */
3213 } else if (in6m->in6m_ifma != NULL) {
3214 panic("%s: ifma not NULL for in6m=%p", __func__, in6m);
3215 /* NOTREACHED */
3216 } else if (!(in6m->in6m_debug & IFD_ALLOC)) {
3217 panic("%s: in6m %p cannot be freed", __func__, in6m);
3218 /* NOTREACHED */
3219 } else if (in6m->in6m_refcount != 0) {
3220 panic("%s: non-zero refcount in6m=%p", __func__, in6m);
3221 /* NOTREACHED */
3222 } else if (in6m->in6m_reqcnt != 0) {
3223 panic("%s: non-zero reqcnt in6m=%p", __func__, in6m);
3224 /* NOTREACHED */
3225 }
3226
3227 /* Free any pending MLDv2 state-change records */
3228 IF_DRAIN(&in6m->in6m_scq);
3229
3230 in6m->in6m_debug &= ~IFD_ALLOC;
3231 if ((in6m->in6m_debug & (IFD_DEBUG | IFD_TRASHED)) ==
3232 (IFD_DEBUG | IFD_TRASHED)) {
3233 lck_mtx_lock(&in6m_trash_lock);
3234 TAILQ_REMOVE(&in6m_trash_head, (struct in6_multi_dbg *)in6m,
3235 in6m_trash_link);
3236 lck_mtx_unlock(&in6m_trash_lock);
3237 in6m->in6m_debug &= ~IFD_TRASHED;
3238 }
3239 IN6M_UNLOCK(in6m);
3240
3241 lck_mtx_destroy(&in6m->in6m_lock, in6_multihead_lock_grp);
3242 zfree(in6m_zone, in6m);
3243}
3244
3245static void
3246in6_multi_attach(struct in6_multi *in6m)
3247{
3248 in6_multihead_lock_assert(LCK_RW_ASSERT_EXCLUSIVE);
3249 IN6M_LOCK_ASSERT_HELD(in6m);
3250
3251 if (in6m->in6m_debug & IFD_ATTACHED) {
3252 panic("%s: Attempt to attach an already attached in6m=%p",
3253 __func__, in6m);
3254 /* NOTREACHED */
6d2010ae
A
3255 }
3256
3257 in6m->in6m_reqcnt++;
3258 VERIFY(in6m->in6m_reqcnt == 1);
3259 IN6M_ADDREF_LOCKED(in6m);
3260 in6m->in6m_debug |= IFD_ATTACHED;
3261 /*
3262 * Reattach case: If debugging is enabled, take it
3263 * out of the trash list and clear IFD_TRASHED.
3264 */
3265 if ((in6m->in6m_debug & (IFD_DEBUG | IFD_TRASHED)) ==
3266 (IFD_DEBUG | IFD_TRASHED)) {
3267 /* Become a regular mutex, just in case */
3268 IN6M_CONVERT_LOCK(in6m);
3269 lck_mtx_lock(&in6m_trash_lock);
3270 TAILQ_REMOVE(&in6m_trash_head, (struct in6_multi_dbg *)in6m,
3271 in6m_trash_link);
3272 lck_mtx_unlock(&in6m_trash_lock);
3273 in6m->in6m_debug &= ~IFD_TRASHED;
3274 }
3275
3276 LIST_INSERT_HEAD(&in6_multihead, in6m, in6m_entry);
3277}
3278
3279int
3280in6_multi_detach(struct in6_multi *in6m)
3281{
3282 in6_multihead_lock_assert(LCK_RW_ASSERT_EXCLUSIVE);
3283 IN6M_LOCK_ASSERT_HELD(in6m);
3284
3285 if (in6m->in6m_reqcnt == 0) {
3286 panic("%s: in6m=%p negative reqcnt", __func__, in6m);
3287 /* NOTREACHED */
3288 }
3289
3290 --in6m->in6m_reqcnt;
0a7de745
A
3291 if (in6m->in6m_reqcnt > 0) {
3292 return 0;
3293 }
6d2010ae
A
3294
3295 if (!(in6m->in6m_debug & IFD_ATTACHED)) {
3296 panic("%s: Attempt to detach an unattached record in6m=%p",
3297 __func__, in6m);
3298 /* NOTREACHED */
3299 } else if (in6m->in6m_debug & IFD_TRASHED) {
3300 panic("%s: in6m %p is already in trash list", __func__, in6m);
3301 /* NOTREACHED */
3302 }
3303
3304 /*
3305 * NOTE: Caller calls IFMA_REMREF
3306 */
3307 in6m->in6m_debug &= ~IFD_ATTACHED;
3308 LIST_REMOVE(in6m, in6m_entry);
3309
3310 if (in6m->in6m_debug & IFD_DEBUG) {
3311 /* Become a regular mutex, just in case */
3312 IN6M_CONVERT_LOCK(in6m);
3313 lck_mtx_lock(&in6m_trash_lock);
3314 TAILQ_INSERT_TAIL(&in6m_trash_head,
3315 (struct in6_multi_dbg *)in6m, in6m_trash_link);
3316 lck_mtx_unlock(&in6m_trash_lock);
3317 in6m->in6m_debug |= IFD_TRASHED;
3318 }
3319
0a7de745 3320 return 1;
6d2010ae
A
3321}
3322
3323void
3324in6m_addref(struct in6_multi *in6m, int locked)
3325{
0a7de745 3326 if (!locked) {
6d2010ae 3327 IN6M_LOCK_SPIN(in6m);
0a7de745 3328 } else {
6d2010ae 3329 IN6M_LOCK_ASSERT_HELD(in6m);
0a7de745 3330 }
6d2010ae
A
3331
3332 if (++in6m->in6m_refcount == 0) {
3333 panic("%s: in6m=%p wraparound refcnt", __func__, in6m);
3334 /* NOTREACHED */
3335 } else if (in6m->in6m_trace != NULL) {
3336 (*in6m->in6m_trace)(in6m, TRUE);
3337 }
0a7de745 3338 if (!locked) {
6d2010ae 3339 IN6M_UNLOCK(in6m);
0a7de745 3340 }
6d2010ae
A
3341}
3342
3343void
3344in6m_remref(struct in6_multi *in6m, int locked)
3345{
3346 struct ifmultiaddr *ifma;
3347 struct mld_ifinfo *mli;
3348
0a7de745 3349 if (!locked) {
6d2010ae 3350 IN6M_LOCK_SPIN(in6m);
0a7de745 3351 } else {
6d2010ae 3352 IN6M_LOCK_ASSERT_HELD(in6m);
0a7de745 3353 }
6d2010ae
A
3354
3355 if (in6m->in6m_refcount == 0 || (in6m->in6m_refcount == 1 && locked)) {
3356 panic("%s: in6m=%p negative refcnt", __func__, in6m);
3357 /* NOTREACHED */
3358 } else if (in6m->in6m_trace != NULL) {
3359 (*in6m->in6m_trace)(in6m, FALSE);
3360 }
3361
3362 --in6m->in6m_refcount;
3363 if (in6m->in6m_refcount > 0) {
0a7de745 3364 if (!locked) {
6d2010ae 3365 IN6M_UNLOCK(in6m);
0a7de745 3366 }
6d2010ae
A
3367 return;
3368 }
3369
3370 /*
3371 * Synchronization with in6_mc_get(). In the event the in6m has been
3372 * detached, the underlying ifma would still be in the if_multiaddrs
3373 * list, and thus can be looked up via if_addmulti(). At that point,
3374 * the only way to find this in6m is via ifma_protospec. To avoid
3375 * race conditions between the last in6m_remref() of that in6m and its
3376 * use via ifma_protospec, in6_multihead lock is used for serialization.
3377 * In order to avoid violating the lock order, we must drop in6m_lock
3378 * before acquiring in6_multihead lock. To prevent the in6m from being
3379 * freed prematurely, we hold an extra reference.
3380 */
3381 ++in6m->in6m_refcount;
3382 IN6M_UNLOCK(in6m);
3383 in6_multihead_lock_shared();
3384 IN6M_LOCK_SPIN(in6m);
3385 --in6m->in6m_refcount;
3386 if (in6m->in6m_refcount > 0) {
3387 /* We've lost the race, so abort since in6m is still in use */
3388 IN6M_UNLOCK(in6m);
3389 in6_multihead_lock_done();
3390 /* If it was locked, return it as such */
0a7de745 3391 if (locked) {
6d2010ae 3392 IN6M_LOCK(in6m);
0a7de745 3393 }
6d2010ae
A
3394 return;
3395 }
3396 in6m_purge(in6m);
3397 ifma = in6m->in6m_ifma;
3398 in6m->in6m_ifma = NULL;
3399 in6m->in6m_ifp = NULL;
3400 mli = in6m->in6m_mli;
3401 in6m->in6m_mli = NULL;
3402 IN6M_UNLOCK(in6m);
3403 IFMA_LOCK_SPIN(ifma);
3404 ifma->ifma_protospec = NULL;
3405 IFMA_UNLOCK(ifma);
3406 in6_multihead_lock_done();
3407
3408 in6_multi_free(in6m);
3409 if_delmulti_ifma(ifma);
3410 /* Release reference held to the underlying ifmultiaddr */
3411 IFMA_REMREF(ifma);
3412
0a7de745 3413 if (mli != NULL) {
6d2010ae 3414 MLI_REMREF(mli);
0a7de745 3415 }
6d2010ae
A
3416}
3417
3418static void
3419in6m_trace(struct in6_multi *in6m, int refhold)
3420{
3421 struct in6_multi_dbg *in6m_dbg = (struct in6_multi_dbg *)in6m;
3422 ctrace_t *tr;
3423 u_int32_t idx;
3424 u_int16_t *cnt;
3425
3426 if (!(in6m->in6m_debug & IFD_DEBUG)) {
3427 panic("%s: in6m %p has no debug structure", __func__, in6m);
3428 /* NOTREACHED */
3429 }
3430 if (refhold) {
3431 cnt = &in6m_dbg->in6m_refhold_cnt;
3432 tr = in6m_dbg->in6m_refhold;
3433 } else {
3434 cnt = &in6m_dbg->in6m_refrele_cnt;
3435 tr = in6m_dbg->in6m_refrele;
3436 }
3437
3438 idx = atomic_add_16_ov(cnt, 1) % IN6M_TRACE_HIST_SIZE;
3439 ctrace_record(&tr[idx]);
3440}
3441
3442static struct in6_multi_mship *
f427ee49 3443in6_multi_mship_alloc(zalloc_flags_t how)
6d2010ae 3444{
f427ee49 3445 return zalloc_flags(imm_zone, how | Z_ZERO);
6d2010ae
A
3446}
3447
3448static void
3449in6_multi_mship_free(struct in6_multi_mship *imm)
3450{
3451 if (imm->i6mm_maddr != NULL) {
3452 panic("%s: i6mm_maddr not NULL for imm=%p", __func__, imm);
3453 /* NOTREACHED */
3454 }
3455 zfree(imm_zone, imm);
3456}
3457
3458void
3459in6_multihead_lock_exclusive(void)
3460{
3461 lck_rw_lock_exclusive(&in6_multihead_lock);
3462}
3463
3464void
3465in6_multihead_lock_shared(void)
3466{
3467 lck_rw_lock_shared(&in6_multihead_lock);
3468}
3469
3470void
3471in6_multihead_lock_assert(int what)
3472{
5ba3f43e
A
3473#if !MACH_ASSERT
3474#pragma unused(what)
3475#endif
3476 LCK_RW_ASSERT(&in6_multihead_lock, what);
6d2010ae
A
3477}
3478
3479void
3480in6_multihead_lock_done(void)
3481{
3482 lck_rw_done(&in6_multihead_lock);
3483}
3484
3485static struct ip6_msource *
f427ee49 3486ip6ms_alloc(zalloc_flags_t how)
6d2010ae 3487{
f427ee49 3488 return zalloc_flags(ip6ms_zone, how | Z_ZERO);
6d2010ae
A
3489}
3490
3491static void
3492ip6ms_free(struct ip6_msource *i6ms)
3493{
3494 zfree(ip6ms_zone, i6ms);
3495}
3496
3497static struct in6_msource *
f427ee49 3498in6ms_alloc(zalloc_flags_t how)
6d2010ae 3499{
f427ee49 3500 return zalloc_flags(in6ms_zone, how | Z_ZERO);
6d2010ae
A
3501}
3502
3503static void
3504in6ms_free(struct in6_msource *in6ms)
3505{
3506 zfree(in6ms_zone, in6ms);
3507}
3508
3509#ifdef MLD_DEBUG
3510
3511static const char *in6m_modestrs[] = { "un\n", "in", "ex" };
3512
3513static const char *
3514in6m_mode_str(const int mode)
3515{
0a7de745
A
3516 if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) {
3517 return in6m_modestrs[mode];
3518 }
3519 return "??";
6d2010ae
A
3520}
3521
3522static const char *in6m_statestrs[] = {
3523 "not-member\n",
3524 "silent\n",
39236c6e 3525 "reporting\n",
6d2010ae
A
3526 "idle\n",
3527 "lazy\n",
3528 "sleeping\n",
3529 "awakening\n",
3530 "query-pending\n",
3531 "sg-query-pending\n",
3532 "leaving"
3533};
3534
3535static const char *
3536in6m_state_str(const int state)
3537{
0a7de745
A
3538 if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) {
3539 return in6m_statestrs[state];
3540 }
3541 return "??";
6d2010ae
A
3542}
3543
3544/*
3545 * Dump an in6_multi structure to the console.
3546 */
3547void
3548in6m_print(const struct in6_multi *inm)
3549{
3550 int t;
3551
39236c6e 3552 IN6M_LOCK_ASSERT_HELD(__DECONST(struct in6_multi *, inm));
6d2010ae 3553
0a7de745 3554 if (mld_debug == 0) {
6d2010ae 3555 return;
0a7de745 3556 }
6d2010ae 3557
39236c6e
A
3558 printf("%s: --- begin in6m 0x%llx ---\n", __func__,
3559 (uint64_t)VM_KERNEL_ADDRPERM(inm));
3560 printf("addr %s ifp 0x%llx(%s) ifma 0x%llx\n",
6d2010ae 3561 ip6_sprintf(&inm->in6m_addr),
39236c6e
A
3562 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp),
3563 if_name(inm->in6m_ifp),
3564 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifma));
6d2010ae
A
3565 printf("timer %u state %s refcount %u scq.len %u\n",
3566 inm->in6m_timer,
3567 in6m_state_str(inm->in6m_state),
3568 inm->in6m_refcount,
3569 inm->in6m_scq.ifq_len);
39236c6e
A
3570 printf("mli 0x%llx nsrc %lu sctimer %u scrv %u\n",
3571 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_mli),
6d2010ae
A
3572 inm->in6m_nsrc,
3573 inm->in6m_sctimer,
3574 inm->in6m_scrv);
3575 for (t = 0; t < 2; t++) {
3576 printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t,
3577 in6m_mode_str(inm->in6m_st[t].iss_fmode),
3578 inm->in6m_st[t].iss_asm,
3579 inm->in6m_st[t].iss_ex,
3580 inm->in6m_st[t].iss_in,
3581 inm->in6m_st[t].iss_rec);
3582 }
39236c6e
A
3583 printf("%s: --- end in6m 0x%llx ---\n", __func__,
3584 (uint64_t)VM_KERNEL_ADDRPERM(inm));
6d2010ae
A
3585}
3586
0a7de745 3587#else
6d2010ae
A
3588
3589void
3590in6m_print(__unused const struct in6_multi *inm)
3591{
6d2010ae
A
3592}
3593
3594#endif