X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/bsd/netinet6/mld6.c?ds=sidebyside diff --git a/bsd/netinet6/mld6.c b/bsd/netinet6/mld6.c index 6cbcaef13..a056a9cc6 100644 --- a/bsd/netinet6/mld6.c +++ b/bsd/netinet6/mld6.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2016 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -166,61 +166,61 @@ static decl_lck_mtx_data(, mld_mtx); SLIST_HEAD(mld_in6m_relhead, in6_multi); -static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); -static struct mld_ifinfo *mli_alloc(int); -static void mli_free(struct mld_ifinfo *); -static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); -static void mld_dispatch_packet(struct mbuf *); -static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *, - struct mld_tparams *); -static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *, - struct mld_tparams *); -static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *, - struct mld_tparams *, const int); +static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); +static struct mld_ifinfo *mli_alloc(zalloc_flags_t); +static void mli_free(struct mld_ifinfo *); +static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); +static void mld_dispatch_packet(struct mbuf *); +static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *, + struct mld_tparams *); +static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *, + struct mld_tparams *); +static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *, + struct mld_tparams *, const int); #ifdef MLD_DEBUG -static const char * mld_rec_type_to_str(const int); +static const char * mld_rec_type_to_str(const int); #endif -static uint32_t mld_set_version(struct mld_ifinfo *, const int); -static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *); -static void mld_dispatch_queue(struct mld_ifinfo *, struct ifqueue *, int); -static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, - /*const*/ struct mld_hdr *); -static int mld_v1_input_report(struct ifnet *, struct mbuf *, - const struct ip6_hdr *, /*const*/ struct mld_hdr *); -static void mld_v1_process_group_timer(struct in6_multi *, const int); -static void mld_v1_process_querier_timers(struct mld_ifinfo *); -static int mld_v1_transmit_report(struct in6_multi *, const int); -static uint32_t mld_v1_update_group(struct in6_multi *, const int); -static void mld_v2_cancel_link_timers(struct mld_ifinfo *); -static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); +static uint32_t mld_set_version(struct mld_ifinfo *, const int); +static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *); +static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int); +static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, + /*const*/ struct mld_hdr *); +static int mld_v1_input_report(struct ifnet *, struct mbuf *, + const struct ip6_hdr *, /*const*/ struct mld_hdr *); +static void mld_v1_process_group_timer(struct in6_multi *, const int); +static void mld_v1_process_querier_timers(struct mld_ifinfo *); +static int mld_v1_transmit_report(struct in6_multi *, const uint8_t); +static uint32_t mld_v1_update_group(struct in6_multi *, const int); +static void mld_v2_cancel_link_timers(struct mld_ifinfo *); +static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); static struct mbuf * - mld_v2_encap_report(struct ifnet *, struct mbuf *); -static int mld_v2_enqueue_filter_change(struct ifqueue *, - struct in6_multi *); -static int mld_v2_enqueue_group_record(struct ifqueue *, - struct in6_multi *, const int, const int, const int, - const int); -static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, - struct mbuf *, const int, const int); -static int mld_v2_merge_state_changes(struct in6_multi *, - struct ifqueue *); -static void mld_v2_process_group_timers(struct mld_ifinfo *, - struct ifqueue *, struct ifqueue *, - struct in6_multi *, const int); -static int mld_v2_process_group_query(struct in6_multi *, - int, struct mbuf *, const int); -static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS; -static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS; -static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS; - -static int mld_timeout_run; /* MLD timer is scheduled to run */ +mld_v2_encap_report(struct ifnet *, struct mbuf *); +static int mld_v2_enqueue_filter_change(struct ifqueue *, + struct in6_multi *); +static int mld_v2_enqueue_group_record(struct ifqueue *, + struct in6_multi *, const int, const int, const int, + const int); +static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, + struct mbuf *, const int, const int); +static int mld_v2_merge_state_changes(struct in6_multi *, + struct ifqueue *); +static void mld_v2_process_group_timers(struct mld_ifinfo *, + struct ifqueue *, struct ifqueue *, + struct in6_multi *, const int); +static int mld_v2_process_group_query(struct in6_multi *, + int, struct mbuf *, const int); +static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS; +static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS; +static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS; + +static int mld_timeout_run; /* MLD timer is scheduled to run */ static void mld_timeout(void *); static void mld_sched_timeout(void); /* * Normative references: RFC 2710, RFC 3590, RFC 3810. */ -static struct timeval mld_gsrdelay = {10, 0}; +static struct timeval mld_gsrdelay = {.tv_sec = 10, .tv_usec = 0}; static LIST_HEAD(, mld_ifinfo) mli_head; static int querier_present_timers_running6; @@ -228,38 +228,36 @@ static int interface_timers_running6; static int state_change_timers_running6; static int current_state_timers_running6; +static unsigned int mld_mli_list_genid; /* * Subsystem lock macros. */ -#define MLD_LOCK() \ +#define MLD_LOCK() \ lck_mtx_lock(&mld_mtx) -#define MLD_LOCK_ASSERT_HELD() \ - lck_mtx_assert(&mld_mtx, LCK_MTX_ASSERT_OWNED) -#define MLD_LOCK_ASSERT_NOTHELD() \ - lck_mtx_assert(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED) -#define MLD_UNLOCK() \ +#define MLD_LOCK_ASSERT_HELD() \ + LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED) +#define MLD_LOCK_ASSERT_NOTHELD() \ + LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED) +#define MLD_UNLOCK() \ lck_mtx_unlock(&mld_mtx) -#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \ - SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \ +#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \ + SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \ } -#define MLD_REMOVE_DETACHED_IN6M(_head) { \ - struct in6_multi *_in6m, *_inm_tmp; \ - SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \ - SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \ - IN6M_REMREF(_in6m); \ - } \ - VERIFY(SLIST_EMPTY(_head)); \ +#define MLD_REMOVE_DETACHED_IN6M(_head) { \ + struct in6_multi *_in6m, *_inm_tmp; \ + SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \ + SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \ + IN6M_REMREF(_in6m); \ + } \ + VERIFY(SLIST_EMPTY(_head)); \ } -#define MLI_ZONE_MAX 64 /* maximum elements in zone */ -#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */ - -static unsigned int mli_size; /* size of zone element */ -static struct zone *mli_zone; /* zone for mld_ifinfo */ +static ZONE_DECLARE(mli_zone, "mld_ifinfo", + sizeof(struct mld_ifinfo), ZC_ZFREE_CLEARMEM); -SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ +SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPv6 Multicast Listener Discovery"); @@ -269,53 +267,53 @@ SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, "Rate limit for MLDv2 Group-and-Source queries in seconds"); SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED, - sysctl_mld_ifinfo, "Per-interface MLDv2 state"); + sysctl_mld_ifinfo, "Per-interface MLDv2 state"); -static int mld_v1enable = 1; +static int mld_v1enable = 1; SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_v1enable, 0, "Enable fallback to MLDv1"); -static int mld_v2enable = 1; +static int mld_v2enable = 1; SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &mld_v2enable, 0, sysctl_mld_v2enable, "I", "Enable MLDv2 (debug purposes only)"); -static int mld_use_allow = 1; +static int mld_use_allow = 1; SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves"); #ifdef MLD_DEBUG int mld_debug = 0; SYSCTL_INT(_net_inet6_mld, OID_AUTO, - debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, ""); + debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, ""); #endif /* * Packed Router Alert option structure declaration. */ struct mld_raopt { - struct ip6_hbh hbh; - struct ip6_opt pad; - struct ip6_opt_router ra; + struct ip6_hbh hbh; + struct ip6_opt pad; + struct ip6_opt_router ra; } __packed; /* * Router Alert hop-by-hop option header. */ static struct mld_raopt mld_ra = { - .hbh = { 0, 0 }, - .pad = { .ip6o_type = IP6OPT_PADN, 0 }, + .hbh = { .ip6h_nxt = 0, .ip6h_len = 0 }, + .pad = { .ip6o_type = IP6OPT_PADN, .ip6o_len = 0 }, .ra = { - .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT, - .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2), - .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF), - (IP6OPT_RTALERT_MLD & 0xFF) } + .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT, + .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2), + .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF), + (IP6OPT_RTALERT_MLD & 0xFF) } } }; static struct ip6_pktopts mld_po; /* Store MLDv2 record count in the module private scratch space */ -#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] +#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] static __inline void mld_save_context(struct mbuf *m, struct ifnet *ifp) @@ -336,7 +334,7 @@ mld_scrub_context(struct mbuf *m) static __inline struct ifnet * mld_restore_context(struct mbuf *m) { - return (m->m_pkthdr.rcvif); + return m->m_pkthdr.rcvif; } /* @@ -351,11 +349,12 @@ sysctl_mld_gsr SYSCTL_HANDLER_ARGS MLD_LOCK(); - i = mld_gsrdelay.tv_sec; + i = (int)mld_gsrdelay.tv_sec; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { goto out_locked; + } if (i < -1 || i >= 60) { error = EINVAL; @@ -366,7 +365,7 @@ sysctl_mld_gsr SYSCTL_HANDLER_ARGS out_locked: MLD_UNLOCK(); - return (error); + return error; } /* * Expose struct mld_ifinfo to userland, keyed by ifindex. @@ -377,21 +376,23 @@ static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - int *name; - int error; - u_int namelen; - struct ifnet *ifp; - struct mld_ifinfo *mli; - struct mld_ifinfo_u mli_u; + int *name; + int error; + u_int namelen; + struct ifnet *ifp; + struct mld_ifinfo *mli; + struct mld_ifinfo_u mli_u; name = (int *)arg1; namelen = arg2; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - if (namelen != 1) - return (EINVAL); + if (namelen != 1) { + return EINVAL; + } MLD_LOCK(); @@ -405,10 +406,11 @@ sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); ifp = ifindex2ifnet[name[0]]; ifnet_head_done(); - if (ifp == NULL) + if (ifp == NULL) { goto out_locked; + } - bzero(&mli_u, sizeof (mli_u)); + bzero(&mli_u, sizeof(mli_u)); LIST_FOREACH(mli, &mli_head, mli_link) { MLI_LOCK(mli); @@ -428,13 +430,13 @@ sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS mli_u.mli_uri = mli->mli_uri; MLI_UNLOCK(mli); - error = SYSCTL_OUT(req, &mli_u, sizeof (mli_u)); + error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u)); break; } out_locked: MLD_UNLOCK(); - return (error); + return error; } static int @@ -444,15 +446,16 @@ sysctl_mld_v2enable SYSCTL_HANDLER_ARGS int error; int i; struct mld_ifinfo *mli; - struct mld_tparams mtp = { 0, 0, 0, 0 }; + struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; MLD_LOCK(); i = mld_v2enable; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { goto out_locked; + } if (i < 0 || i > 1) { error = EINVAL; @@ -465,13 +468,15 @@ sysctl_mld_v2enable SYSCTL_HANDLER_ARGS * the MLD version back to v2. Otherwise, we have to explicitly * downgrade. Note that this functionality is to be used for debugging. */ - if (mld_v2enable == 1) + if (mld_v2enable == 1) { goto out_locked; + } LIST_FOREACH(mli, &mli_head, mli_link) { MLI_LOCK(mli); - if (mld_set_version(mli, MLD_VERSION_1) > 0) + if (mld_set_version(mli, MLD_VERSION_1) > 0) { mtp.qpt = 1; + } MLI_UNLOCK(mli); } @@ -480,40 +485,60 @@ out_locked: mld_set_timeout(&mtp); - return (error); + return error; } /* * Dispatch an entire queue of pending packet chains. * * Must not be called with in6m_lock held. + * XXX This routine unlocks MLD global lock and also mli locks. + * Make sure that the calling routine takes reference on the mli + * before calling this routine. + * Also if we are traversing mli_head, remember to check for + * mli list generation count and restart the loop if generation count + * has changed. */ static void -mld_dispatch_queue(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit) +mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit) { struct mbuf *m; - if (mli != NULL) + MLD_LOCK_ASSERT_HELD(); + + if (mli != NULL) { MLI_LOCK_ASSERT_HELD(mli); + } for (;;) { IF_DEQUEUE(ifq, m); - if (m == NULL) + if (m == NULL) { break; + } MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifq), (uint64_t)VM_KERNEL_ADDRPERM(m))); - if (mli != NULL) + + if (mli != NULL) { MLI_UNLOCK(mli); + } + MLD_UNLOCK(); + mld_dispatch_packet(m); - if (mli != NULL) + + MLD_LOCK(); + if (mli != NULL) { MLI_LOCK(mli); - if (--limit == 0) + } + + if (--limit == 0) { break; + } } - if (mli != NULL) + if (mli != NULL) { MLI_LOCK_ASSERT_HELD(mli); + } } /* @@ -531,27 +556,28 @@ mld_dispatch_queue(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit) static __inline__ int mld_is_addr_reported(const struct in6_addr *addr) { - VERIFY(IN6_IS_ADDR_MULTICAST(addr)); - if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) - return (0); + if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) { + return 0; + } - if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) { + if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL && !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(addr)) { struct in6_addr tmp = *addr; in6_clearscope(&tmp); - if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) - return (0); + if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) { + return 0; + } } - return (1); + return 1; } /* * Attach MLD when PF_INET6 is attached to an interface. */ struct mld_ifinfo * -mld_domifattach(struct ifnet *ifp, int how) +mld_domifattach(struct ifnet *ifp, zalloc_flags_t how) { struct mld_ifinfo *mli; @@ -559,8 +585,9 @@ mld_domifattach(struct ifnet *ifp, int how) (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); mli = mli_alloc(how); - if (mli == NULL) - return (NULL); + if (mli == NULL) { + return NULL; + } MLD_LOCK(); @@ -575,13 +602,14 @@ mld_domifattach(struct ifnet *ifp, int how) ifnet_lock_done(ifp); LIST_INSERT_HEAD(&mli_head, mli, mli_link); + mld_mli_list_genid++; MLD_UNLOCK(); MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (mli); + return mli; } /* @@ -608,6 +636,7 @@ mld_domifreattach(struct mld_ifinfo *mli) ifnet_lock_done(ifp); LIST_INSERT_HEAD(&mli_head, mli, mli_link); + mld_mli_list_genid++; MLD_UNLOCK(); @@ -621,7 +650,7 @@ mld_domifreattach(struct mld_ifinfo *mli) void mld_domifdetach(struct ifnet *ifp) { - SLIST_HEAD(, in6_multi) in6m_dthead; + SLIST_HEAD(, in6_multi) in6m_dthead; SLIST_INIT(&in6m_dthead); @@ -664,6 +693,7 @@ mli_delete(const struct ifnet *ifp, struct mld_in6m_relhead *in6m_dthead) LIST_REMOVE(mli, mli_link); MLI_REMREF(mli); /* release mli_head reference */ + mld_mli_list_genid++; return; } MLI_UNLOCK(mli); @@ -680,10 +710,11 @@ mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli) MLI_LOCK_ASSERT_NOTHELD(mli); MLI_LOCK(mli); if (!(ifp->if_flags & IFF_MULTICAST) && - (ifp->if_eflags & (IFEF_IPV6_ND6ALT|IFEF_LOCALNET_PRIVATE))) + (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) { mli->mli_flags |= MLIF_SILENT; - else + } else { mli->mli_flags &= ~MLIF_SILENT; + } MLI_UNLOCK(mli); } @@ -693,20 +724,23 @@ mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach) MLI_LOCK_ASSERT_HELD(mli); mli->mli_ifp = ifp; - if (mld_v2enable) + if (mld_v2enable) { mli->mli_version = MLD_VERSION_2; - else + } else { mli->mli_version = MLD_VERSION_1; + } mli->mli_flags = 0; mli->mli_rv = MLD_RV_INIT; mli->mli_qi = MLD_QI_INIT; mli->mli_qri = MLD_QRI_INIT; mli->mli_uri = MLD_URI_INIT; - if (mld_use_allow) + if (mld_use_allow) { mli->mli_flags |= MLIF_USEALLOW; - if (!reattach) + } + if (!reattach) { SLIST_INIT(&mli->mli_relinmhead); + } /* * Responses to general queries are subject to bounds. @@ -716,17 +750,14 @@ mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach) } static struct mld_ifinfo * -mli_alloc(int how) +mli_alloc(zalloc_flags_t how) { - struct mld_ifinfo *mli; - - mli = (how == M_WAITOK) ? zalloc(mli_zone) : zalloc_noblock(mli_zone); + struct mld_ifinfo *mli = zalloc_flags(mli_zone, how | Z_ZERO); if (mli != NULL) { - bzero(mli, mli_size); lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr); mli->mli_debug |= IFD_ALLOC; } - return (mli); + return mli; } static void @@ -756,23 +787,25 @@ mli_free(struct mld_ifinfo *mli) void mli_addref(struct mld_ifinfo *mli, int locked) { - if (!locked) + if (!locked) { MLI_LOCK_SPIN(mli); - else + } else { MLI_LOCK_ASSERT_HELD(mli); + } if (++mli->mli_refcnt == 0) { panic("%s: mli=%p wraparound refcnt", __func__, mli); /* NOTREACHED */ } - if (!locked) + if (!locked) { MLI_UNLOCK(mli); + } } void mli_remref(struct mld_ifinfo *mli) { - SLIST_HEAD(, in6_multi) in6m_dthead; + SLIST_HEAD(, in6_multi) in6m_dthead; struct ifnet *ifp; MLI_LOCK_SPIN(mli); @@ -817,11 +850,11 @@ static int mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld) { - struct mld_ifinfo *mli; - struct in6_multi *inm; - int err = 0, is_general_query; - uint16_t timer; - struct mld_tparams mtp = { 0, 0, 0, 0 }; + struct mld_ifinfo *mli; + struct in6_multi *inm; + int err = 0, is_general_query; + uint16_t timer; + struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; MLD_LOCK_ASSERT_NOTHELD(); @@ -854,7 +887,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, * MLDv1 General Query. * If this was not sent to the all-nodes group, ignore it. */ - struct in6_addr dst; + struct in6_addr dst; dst = ip6->ip6_dst; in6_clearscope(&dst); @@ -868,7 +901,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, * Embed scope ID of receiving interface in MLD query for * lookup whilst we don't hold other locks. */ - in6_setscope(&mld->mld_addr, ifp, NULL); + (void)in6_setscope(&mld->mld_addr, ifp, NULL); } /* @@ -882,8 +915,9 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, MLI_UNLOCK(mli); timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE; - if (timer == 0) + if (timer == 0) { timer = 1; + } if (is_general_query) { struct in6_multistep step; @@ -898,8 +932,9 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp == ifp) + if (inm->in6m_ifp == ifp) { mtp.cst += mld_v1_update_group(inm, timer); + } IN6M_UNLOCK(inm); IN6_NEXT_MULTI(step, inm); } @@ -930,7 +965,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, done: mld_set_timeout(&mtp); - return (err); + return err; } /* @@ -943,7 +978,7 @@ done: * We may be updating the group for the first time since we switched * to MLDv2. If we are, then we must clear any recorded source lists, * and transition to REPORTING state; the group timer is overloaded - * for group and group-source query responses. + * for group and group-source query responses. * * Unlike MLDv2, the delay per group should be jittered * to avoid bursts of MLDv1 reports. @@ -968,7 +1003,7 @@ mld_v1_update_group(struct in6_multi *inm, const int timer) "skipping.\n", __func__)); break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_SG_QUERY_PENDING_MEMBER: case MLD_G_QUERY_PENDING_MEMBER: case MLD_IDLE_MEMBER: @@ -986,7 +1021,7 @@ mld_v1_update_group(struct in6_multi *inm, const int timer) break; } - return (inm->in6m_timer); + return inm->in6m_timer; } /* @@ -1001,14 +1036,13 @@ static int mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, struct mbuf *m, const int off, const int icmp6len) { - struct mld_ifinfo *mli; - struct mldv2_query *mld; - struct in6_multi *inm; - uint32_t maxdelay, nsrc, qqi; - int err = 0, is_general_query; - uint16_t timer; - uint8_t qrv; - struct mld_tparams mtp = { 0, 0, 0, 0 }; + struct mld_ifinfo *mli; + struct mldv2_query *mld; + struct in6_multi *inm; + uint32_t maxdelay, nsrc, qqi, timer; + int err = 0, is_general_query; + uint8_t qrv; + struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; MLD_LOCK_ASSERT_NOTHELD(); @@ -1037,14 +1071,15 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off); - maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ - if (maxdelay >= 32768) { - maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) << - (MLD_MRC_EXP(maxdelay) + 3); + maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ + if (maxdelay > SHRT_MAX) { + maxdelay = (MLD_MRC_MANT((uint16_t)maxdelay) | 0x1000) << + (MLD_MRC_EXP((uint16_t)maxdelay) + 3); } timer = maxdelay / MLD_TIMER_SCALE; - if (timer == 0) + if (timer == 0) { timer = 1; + } qrv = MLD_QRV(mld->mld_misc); if (qrv < 2) { @@ -1056,7 +1091,7 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, qqi = mld->mld_qqi; if (qqi >= 128) { qqi = MLD_QQIC_MANT(mld->mld_qqi) << - (MLD_QQIC_EXP(mld->mld_qqi) + 3); + (MLD_QQIC_EXP(mld->mld_qqi) + 3); } nsrc = ntohs(mld->mld_numsrc); @@ -1090,7 +1125,7 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, * lookup whilst we don't hold other locks (due to KAME * locking lameness). We own this mbuf chain just now. */ - in6_setscope(&mld->mld_addr, ifp, NULL); + (void)in6_setscope(&mld->mld_addr, ifp, NULL); } mli = MLD_IFINFO(ifp); @@ -1146,8 +1181,9 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, in6_multihead_lock_shared(); IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); in6_multihead_lock_done(); - if (inm == NULL) + if (inm == NULL) { goto done; + } IN6M_LOCK(inm); if (nsrc > 0) { @@ -1189,7 +1225,7 @@ done: } mld_set_timeout(&mtp); - return (err); + return err; } /* @@ -1201,9 +1237,9 @@ static int mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, const int off) { - struct mldv2_query *mld; - int retval; - uint16_t nsrc; + struct mldv2_query *mld; + int retval; + uint16_t nsrc; IN6M_LOCK_ASSERT_HELD(inm); @@ -1218,7 +1254,7 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, case MLD_AWAKENING_MEMBER: case MLD_IDLE_MEMBER: case MLD_LEAVING_MEMBER: - return (retval); + return retval; case MLD_REPORTING_MEMBER: case MLD_G_QUERY_PENDING_MEMBER: case MLD_SG_QUERY_PENDING_MEMBER: @@ -1241,7 +1277,7 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, } inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; inm->in6m_timer = MLD_RANDOM_DELAY(timer); - return (retval); + return retval; } /* @@ -1251,7 +1287,7 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { timer = min(inm->in6m_timer, timer); inm->in6m_timer = MLD_RANDOM_DELAY(timer); - return (retval); + return retval; } /* @@ -1266,10 +1302,10 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, * report for those sources. */ if (inm->in6m_nsrc > 0) { - struct mbuf *m; - uint8_t *sp; - int i, nrecorded; - int soff; + struct mbuf *m; + uint8_t *sp; + int i, nrecorded; + int soff; m = m0; soff = off + sizeof(struct mldv2_query); @@ -1278,26 +1314,28 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, sp = mtod(m, uint8_t *) + soff; retval = in6m_record_source(inm, (const struct in6_addr *)(void *)sp); - if (retval < 0) + if (retval < 0) { break; + } nrecorded += retval; soff += sizeof(struct in6_addr); if (soff >= m->m_len) { soff = soff - m->m_len; m = m->m_next; - if (m == NULL) + if (m == NULL) { break; + } } } if (nrecorded > 0) { - MLD_PRINTF(( "%s: schedule response to SG query\n", + MLD_PRINTF(("%s: schedule response to SG query\n", __func__)); inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; inm->in6m_timer = MLD_RANDOM_DELAY(timer); } } - return (retval); + return retval; } /* @@ -1311,20 +1349,21 @@ static int mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld) { - struct in6_addr src, dst; - struct in6_ifaddr *ia; - struct in6_multi *inm; + struct in6_addr src, dst; + struct in6_ifaddr *ia; + struct in6_multi *inm; if (!mld_v1enable) { MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&mld->mld_addr), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (0); + return 0; } if ((ifp->if_flags & IFF_LOOPBACK) || - (m->m_pkthdr.pkt_flags & PKTF_LOOP)) - return (0); + (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { + return 0; + } /* * MLDv1 reports must originate from a host's link-local address, @@ -1336,7 +1375,7 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&ip6->ip6_src), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (EINVAL); + return EINVAL; } /* @@ -1350,7 +1389,7 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&ip6->ip6_dst), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (EINVAL); + return EINVAL; } /* @@ -1363,18 +1402,18 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be * performed for the on-wire address. */ - ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); + ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); - if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))){ + if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) { IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); - return (0); + return 0; } IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); } else if (IN6_IS_ADDR_UNSPECIFIED(&src)) { - return (0); + return 0; } MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n", @@ -1385,8 +1424,9 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, * Embed scope ID of receiving interface in MLD query for lookup * whilst we don't hold other locks (due to KAME locking lameness). */ - if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) - in6_setscope(&mld->mld_addr, ifp, NULL); + if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { + (void)in6_setscope(&mld->mld_addr, ifp, NULL); + } /* * MLDv1 report suppression. @@ -1432,6 +1472,7 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, "ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&mld->mld_addr), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); + OS_FALLTHROUGH; case MLD_LAZY_MEMBER: inm->in6m_state = MLD_LAZY_MEMBER; break; @@ -1448,7 +1489,7 @@ out: /* XXX Clear embedded scope ID as userland won't expect it. */ in6_clearscope(&mld->mld_addr); - return (0); + return 0; } /* @@ -1465,18 +1506,16 @@ out: int mld_input(struct mbuf *m, int off, int icmp6len) { - struct ifnet *ifp; - struct ip6_hdr *ip6; - struct mld_hdr *mld; - int mldlen; + struct ifnet *ifp = NULL; + struct ip6_hdr *ip6 = NULL; + struct mld_hdr *mld = NULL; + int mldlen = 0; MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m), off)); ifp = m->m_pkthdr.rcvif; - ip6 = mtod(m, struct ip6_hdr *); - /* Pullup to appropriate size. */ mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off); if (mld->mld_type == MLD_LISTENER_QUERY && @@ -1485,11 +1524,14 @@ mld_input(struct mbuf *m, int off, int icmp6len) } else { mldlen = sizeof(struct mld_hdr); } + // check if mldv2_query/mld_hdr fits in the first mbuf + IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE); IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen); if (mld == NULL) { icmp6stat.icp6s_badlen++; - return (IPPROTO_DONE); + return IPPROTO_DONE; } + ip6 = mtod(m, struct ip6_hdr *); /* * Userland needs to see all of this traffic for implementing @@ -1499,18 +1541,21 @@ mld_input(struct mbuf *m, int off, int icmp6len) case MLD_LISTENER_QUERY: icmp6_ifstat_inc(ifp, ifs6_in_mldquery); if (icmp6len == sizeof(struct mld_hdr)) { - if (mld_v1_input_query(ifp, ip6, mld) != 0) - return (0); + if (mld_v1_input_query(ifp, ip6, mld) != 0) { + return 0; + } } else if (icmp6len >= sizeof(struct mldv2_query)) { if (mld_v2_input_query(ifp, ip6, m, off, - icmp6len) != 0) - return (0); + icmp6len) != 0) { + return 0; + } } break; case MLD_LISTENER_REPORT: icmp6_ifstat_inc(ifp, ifs6_in_mldreport); - if (mld_v1_input_report(ifp, m, ip6, mld) != 0) - return (0); + if (mld_v1_input_report(ifp, m, ip6, mld) != 0) { + return 0; + } break; case MLDV2_LISTENER_REPORT: icmp6_ifstat_inc(ifp, ifs6_in_mldreport); @@ -1522,7 +1567,7 @@ mld_input(struct mbuf *m, int off, int icmp6len) break; } - return (0); + return 0; } /* @@ -1537,14 +1582,18 @@ mld_set_timeout(struct mld_tparams *mtp) if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) { MLD_LOCK(); - if (mtp->qpt != 0) + if (mtp->qpt != 0) { querier_present_timers_running6 = 1; - if (mtp->it != 0) + } + if (mtp->it != 0) { interface_timers_running6 = 1; - if (mtp->cst != 0) + } + if (mtp->cst != 0) { current_state_timers_running6 = 1; - if (mtp->sct != 0) + } + if (mtp->sct != 0) { state_change_timers_running6 = 1; + } mld_sched_timeout(); MLD_UNLOCK(); } @@ -1557,13 +1606,15 @@ static void mld_timeout(void *arg) { #pragma unused(arg) - struct ifqueue scq; /* State-change packets */ - struct ifqueue qrq; /* Query response packets */ - struct ifnet *ifp; - struct mld_ifinfo *mli; - struct in6_multi *inm; - int uri_sec = 0; - SLIST_HEAD(, in6_multi) in6m_dthead; + struct ifqueue scq; /* State-change packets */ + struct ifqueue qrq; /* Query response packets */ + struct ifnet *ifp; + struct mld_ifinfo *mli; + struct in6_multi *inm; + int uri_sec = 0; + unsigned int genid = mld_mli_list_genid; + + SLIST_HEAD(, in6_multi) in6m_dthead; SLIST_INIT(&in6m_dthead); @@ -1588,8 +1639,9 @@ mld_timeout(void *arg) LIST_FOREACH(mli, &mli_head, mli_link) { MLI_LOCK(mli); mld_v1_process_querier_timers(mli); - if (mli->mli_v1_timer > 0) + if (mli->mli_v1_timer > 0) { querier_present_timers_running6 = 1; + } MLI_UNLOCK(mli); } } @@ -1600,27 +1652,64 @@ mld_timeout(void *arg) if (interface_timers_running6) { MLD_PRINTF(("%s: interface timers running\n", __func__)); interface_timers_running6 = 0; - LIST_FOREACH(mli, &mli_head, mli_link) { + mli = LIST_FIRST(&mli_head); + + while (mli != NULL) { + if (mli->mli_flags & MLIF_PROCESSED) { + mli = LIST_NEXT(mli, mli_link); + continue; + } + MLI_LOCK(mli); if (mli->mli_version != MLD_VERSION_2) { MLI_UNLOCK(mli); + mli = LIST_NEXT(mli, mli_link); continue; } + /* + * XXX The logic below ends up calling + * mld_dispatch_packet which can unlock mli + * and the global MLD lock. + * Therefore grab a reference on MLI and also + * check for generation count to see if we should + * iterate the list again. + */ + MLI_ADDREF_LOCKED(mli); + if (mli->mli_v2_timer == 0) { /* Do nothing. */ } else if (--mli->mli_v2_timer == 0) { - if (mld_v2_dispatch_general_query(mli) > 0) + if (mld_v2_dispatch_general_query(mli) > 0) { interface_timers_running6 = 1; + } } else { interface_timers_running6 = 1; } + mli->mli_flags |= MLIF_PROCESSED; MLI_UNLOCK(mli); + MLI_REMREF(mli); + + if (genid != mld_mli_list_genid) { + MLD_PRINTF(("%s: MLD information list changed " + "in the middle of iteration! Restart iteration.\n", + __func__)); + mli = LIST_FIRST(&mli_head); + genid = mld_mli_list_genid; + } else { + mli = LIST_NEXT(mli, mli_link); + } } + + LIST_FOREACH(mli, &mli_head, mli_link) + mli->mli_flags &= ~MLIF_PROCESSED; } + + if (!current_state_timers_running6 && - !state_change_timers_running6) + !state_change_timers_running6) { goto out_locked; + } current_state_timers_running6 = 0; state_change_timers_running6 = 0; @@ -1637,9 +1726,16 @@ mld_timeout(void *arg) * MLD host report and state-change timer processing. * Note: Processing a v2 group timer may remove a node. */ - LIST_FOREACH(mli, &mli_head, mli_link) { + mli = LIST_FIRST(&mli_head); + + while (mli != NULL) { struct in6_multistep step; + if (mli->mli_flags & MLIF_PROCESSED) { + mli = LIST_NEXT(mli, mli_link); + continue; + } + MLI_LOCK(mli); ifp = mli->mli_ifp; uri_sec = MLD_RANDOM_DELAY(mli->mli_uri); @@ -1649,8 +1745,9 @@ mld_timeout(void *arg) IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } MLI_LOCK(mli); switch (mli->mli_version) { @@ -1670,13 +1767,22 @@ next: } in6_multihead_lock_done(); + /* + * XXX The logic below ends up calling + * mld_dispatch_packet which can unlock mli + * and the global MLD lock. + * Therefore grab a reference on MLI and also + * check for generation count to see if we should + * iterate the list again. + */ MLI_LOCK(mli); + MLI_ADDREF_LOCKED(mli); if (mli->mli_version == MLD_VERSION_1) { - mld_dispatch_queue(mli, &mli->mli_v1q, 0); + mld_dispatch_queue_locked(mli, &mli->mli_v1q, 0); } else if (mli->mli_version == MLD_VERSION_2) { MLI_UNLOCK(mli); - mld_dispatch_queue(NULL, &qrq, 0); - mld_dispatch_queue(NULL, &scq, 0); + mld_dispatch_queue_locked(NULL, &qrq, 0); + mld_dispatch_queue_locked(NULL, &scq, 0); VERIFY(qrq.ifq_len == 0); VERIFY(scq.ifq_len == 0); MLI_LOCK(mli); @@ -1694,12 +1800,27 @@ next: */ mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead); VERIFY(SLIST_EMPTY(&mli->mli_relinmhead)); + mli->mli_flags |= MLIF_PROCESSED; MLI_UNLOCK(mli); + MLI_REMREF(mli); IF_DRAIN(&qrq); IF_DRAIN(&scq); + + if (genid != mld_mli_list_genid) { + MLD_PRINTF(("%s: MLD information list changed " + "in the middle of iteration! Restart iteration.\n", + __func__)); + mli = LIST_FIRST(&mli_head); + genid = mld_mli_list_genid; + } else { + mli = LIST_NEXT(mli, mli_link); + } } + LIST_FOREACH(mli, &mli_head, mli_link) + mli->mli_flags &= ~MLIF_PROCESSED; + out_locked: /* re-arm the timer if there's work to do */ mld_timeout_run = 0; @@ -1806,7 +1927,7 @@ mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version) if (report_timer_expired) { inm->in6m_state = MLD_IDLE_MEMBER; (void) mld_v1_transmit_report(inm, - MLD_LISTENER_REPORT); + MLD_LISTENER_REPORT); IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_HELD(inm->in6m_mli); } @@ -1865,8 +1986,9 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, /* We are in timer callback, so be quick about it. */ if (!state_change_retransmit_timer_expired && - !query_response_timer_expired) + !query_response_timer_expired) { return; + } switch (inm->in6m_state) { case MLD_NOT_MEMBER: @@ -1895,7 +2017,7 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, inm->in6m_state = MLD_REPORTING_MEMBER; in6m_clear_recorded(inm); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_REPORTING_MEMBER: case MLD_LEAVING_MEMBER: if (state_change_retransmit_timer_expired) { @@ -1906,7 +2028,7 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, * reset the timer. */ if (--inm->in6m_scrv > 0) { - inm->in6m_sctimer = uri_sec; + inm->in6m_sctimer = (uint16_t)uri_sec; state_change_timers_running6 = 1; /* caller will schedule timer */ } @@ -1983,7 +2105,7 @@ mld_set_version(struct mld_ifinfo *mli, const int mld_version) MLI_LOCK_ASSERT_HELD(mli); - return (mli->mli_v1_timer); + return mli->mli_v1_timer; } /* @@ -1998,9 +2120,9 @@ mld_set_version(struct mld_ifinfo *mli, const int mld_version) static void mld_v2_cancel_link_timers(struct mld_ifinfo *mli) { - struct ifnet *ifp; - struct in6_multi *inm; - struct in6_multistep step; + struct ifnet *ifp; + struct in6_multi *inm; + struct in6_multistep step; MLI_LOCK_ASSERT_HELD(mli); @@ -2025,8 +2147,9 @@ mld_v2_cancel_link_timers(struct mld_ifinfo *mli) IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } switch (inm->in6m_state) { case MLD_NOT_MEMBER: @@ -2055,11 +2178,11 @@ mld_v2_cancel_link_timers(struct mld_ifinfo *mli) SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, in6m_nrele); MLI_UNLOCK(mli); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_G_QUERY_PENDING_MEMBER: case MLD_SG_QUERY_PENDING_MEMBER: in6m_clear_recorded(inm); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_REPORTING_MEMBER: inm->in6m_state = MLD_REPORTING_MEMBER; break; @@ -2106,34 +2229,36 @@ mld_v1_process_querier_timers(struct mld_ifinfo *mli) * Transmit an MLDv1 report immediately. */ static int -mld_v1_transmit_report(struct in6_multi *in6m, const int type) +mld_v1_transmit_report(struct in6_multi *in6m, const uint8_t type) { - struct ifnet *ifp; - struct in6_ifaddr *ia; - struct ip6_hdr *ip6; - struct mbuf *mh, *md; - struct mld_hdr *mld; - int error = 0; + struct ifnet *ifp; + struct in6_ifaddr *ia; + struct ip6_hdr *ip6; + struct mbuf *mh, *md; + struct mld_hdr *mld; + int error = 0; IN6M_LOCK_ASSERT_HELD(in6m); MLI_LOCK_ASSERT_HELD(in6m->in6m_mli); ifp = in6m->in6m_ifp; /* ia may be NULL if link-local address is tentative. */ - ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); + ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); MGETHDR(mh, M_DONTWAIT, MT_HEADER); if (mh == NULL) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - return (ENOMEM); + } + return ENOMEM; } MGET(md, M_DONTWAIT, MT_DATA); if (md == NULL) { m_free(mh); - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - return (ENOMEM); + } + return ENOMEM; } mh->m_next = md; @@ -2151,8 +2276,9 @@ mld_v1_transmit_report(struct in6_multi *in6m, const int type) ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_nxt = IPPROTO_ICMPV6; - if (ia != NULL) + if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); + } ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; if (ia != NULL) { IFA_UNLOCK(&ia->ia_ifa); @@ -2185,16 +2311,16 @@ mld_v1_transmit_report(struct in6_multi *in6m, const int type) * Instead we defer the work to the mld_timeout() thread, thus * avoiding unlocking in_multihead_lock here. */ - if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) { - MLD_PRINTF(("%s: v1 outbound queue full\n", __func__)); - error = ENOMEM; - m_freem(mh); - } else { - IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh); + if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) { + MLD_PRINTF(("%s: v1 outbound queue full\n", __func__)); + error = ENOMEM; + m_freem(mh); + } else { + IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh); VERIFY(error == 0); } - return (error); + return error; } /* @@ -2226,7 +2352,7 @@ mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp, int error = 0; VERIFY(mtp != NULL); - bzero(mtp, sizeof (*mtp)); + bzero(mtp, sizeof(*mtp)); IN6M_LOCK_ASSERT_HELD(inm); VERIFY(inm->in6m_mli != NULL); @@ -2269,7 +2395,7 @@ mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp, error = mld_handle_state_change(inm, mli, mtp); out: - return (error); + return error; } /* @@ -2288,10 +2414,10 @@ static int mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, struct mld_tparams *mtp, const int delay) { - struct ifnet *ifp; - struct ifqueue *ifq; - int error, retval, syncstates; - int odelay; + struct ifnet *ifp; + struct ifqueue *ifq; + int error, retval, syncstates; + int odelay; IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_NOTHELD(mli); @@ -2324,7 +2450,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, if ((ifp->if_flags & IFF_LOOPBACK) || (mli->mli_flags & MLIF_SILENT) || (IFNET_IS_CELLULAR(ifp) && - IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) || + (IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&inm->in6m_addr))) || !mld_is_addr_reported(&inm->in6m_addr)) { MLD_PRINTF(("%s: not kicking state machine for silent group\n", __func__)); @@ -2364,7 +2490,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, } else { inm->in6m_state = MLD_IDLE_MEMBER; error = mld_v1_transmit_report(inm, - MLD_LISTENER_REPORT); + MLD_LISTENER_REPORT); IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_HELD(mli); @@ -2411,13 +2537,14 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, * use this delay if sooner than the existing one. */ VERIFY(mli->mli_rv > 1); - inm->in6m_scrv = mli->mli_rv; + inm->in6m_scrv = (uint16_t)mli->mli_rv; if (delay) { if (inm->in6m_sctimer > 1) { inm->in6m_sctimer = - min(inm->in6m_sctimer, delay); - } else - inm->in6m_sctimer = delay; + MIN(inm->in6m_sctimer, (uint16_t)delay); + } else { + inm->in6m_sctimer = (uint16_t)delay; + } } else { inm->in6m_sctimer = 1; } @@ -2440,7 +2567,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, if_name(inm->in6m_ifp))); } - return (error); + return error; } /* @@ -2450,8 +2577,8 @@ static int mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, struct mld_tparams *mtp) { - struct ifnet *ifp; - int retval = 0; + struct ifnet *ifp; + int retval = 0; IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_NOTHELD(mli); @@ -2502,13 +2629,13 @@ mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, * If record(s) were enqueued, start the state-change * report timer for this group. */ - inm->in6m_scrv = mli->mli_rv; + inm->in6m_scrv = (uint16_t)mli->mli_rv; inm->in6m_sctimer = 1; mtp->sct = 1; MLI_UNLOCK(mli); done: - return (retval); + return retval; } /* @@ -2571,7 +2698,7 @@ mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli, */ IF_DRAIN(&inm->in6m_scq); inm->in6m_timer = 0; - inm->in6m_scrv = mli->mli_rv; + inm->in6m_scrv = (uint16_t)mli->mli_rv; MLD_PRINTF(("%s: Leaving %s/%s with %d " "pending retransmissions.\n", __func__, ip6_sprintf(&inm->in6m_addr), @@ -2596,12 +2723,12 @@ mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli, VERIFY(inm->in6m_nrelecnt != 0); retval = mld_v2_enqueue_group_record( - &inm->in6m_scq, inm, 1, 0, 0, - (mli->mli_flags & MLIF_USEALLOW)); + &inm->in6m_scq, inm, 1, 0, 0, + (mli->mli_flags & MLIF_USEALLOW)); mtp->cst = (inm->in6m_scq.ifq_len > 0); KASSERT(retval != 0, ("%s: enqueue record = %d\n", __func__, - retval)); + retval)); inm->in6m_state = MLD_LEAVING_MEMBER; inm->in6m_sctimer = 1; @@ -2662,17 +2789,17 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, const int is_state_change, const int is_group_query, const int is_source_query, const int use_block_allow) { - struct mldv2_record mr; - struct mldv2_record *pmr; - struct ifnet *ifp; - struct ip6_msource *ims, *nims; - struct mbuf *m0, *m, *md; - int error, is_filter_list_change; - int minrec0len, m0srcs, msrcs, nbytes, off; - int record_has_sources; - int now; - int type; - uint8_t mode; + struct mldv2_record mr; + struct mldv2_record *pmr; + struct ifnet *ifp; + struct ip6_msource *ims, *nims; + struct mbuf *m0, *m, *md; + int error, is_filter_list_change; + int minrec0len, m0srcs, msrcs, nbytes, off; + int record_has_sources; + int now; + uint8_t type; + uint8_t mode; IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_HELD(inm->in6m_mli); @@ -2689,7 +2816,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, record_has_sources = 1; pmr = NULL; type = MLD_DO_NOTHING; - mode = inm->in6m_st[1].iss_fmode; + mode = (uint8_t)inm->in6m_st[1].iss_fmode; /* * If we did not transition out of ASM mode during t0->t1, @@ -2697,8 +2824,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, * the generation of source records. */ if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && - inm->in6m_nsrc == 0) + inm->in6m_nsrc == 0) { record_has_sources = 0; + } if (is_state_change) { /* @@ -2742,8 +2870,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, } } else { type = MLD_CHANGE_TO_INCLUDE_MODE; - if (mode == MCAST_UNDEFINED) + if (mode == MCAST_UNDEFINED) { record_has_sources = 0; + } } } } else { @@ -2768,14 +2897,15 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, /* * Generate the filter list changes using a separate function. */ - if (is_filter_list_change) - return (mld_v2_enqueue_filter_change(ifq, inm)); + if (is_filter_list_change) { + return mld_v2_enqueue_filter_change(ifq, inm); + } if (type == MLD_DO_NOTHING) { MLD_PRINTF(("%s: nothing to do for %s/%s\n", __func__, ip6_sprintf(&inm->in6m_addr), if_name(inm->in6m_ifp))); - return (0); + return 0; } /* @@ -2784,8 +2914,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, * ideally more. */ minrec0len = sizeof(struct mldv2_record); - if (record_has_sources) + if (record_has_sources) { minrec0len += sizeof(struct in6_addr); + } MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__, mld_rec_type_to_str(type), ip6_sprintf(&inm->in6m_addr), @@ -2804,26 +2935,29 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, m0 != NULL && (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && (m0->m_pkthdr.len + minrec0len) < - (ifp->if_mtu - MLD_MTUSPACE)) { + (ifp->if_mtu - MLD_MTUSPACE)) { m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - - sizeof(struct mldv2_record)) / - sizeof(struct in6_addr); + sizeof(struct mldv2_record)) / + sizeof(struct in6_addr); m = m0; MLD_PRINTF(("%s: use existing packet\n", __func__)); } else { if (IF_QFULL(ifq)) { MLD_PRINTF(("%s: outbound queue full\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m = NULL; m0srcs = (ifp->if_mtu - MLD_MTUSPACE - sizeof(struct mldv2_record)) / sizeof(struct in6_addr); - if (!is_state_change && !is_group_query) + if (!is_state_change && !is_group_query) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) + } + if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m == NULL) - return (-ENOMEM); + } + if (m == NULL) { + return -ENOMEM; + } mld_save_context(m, ifp); @@ -2840,10 +2974,11 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, mr.mr_addr = inm->in6m_addr; in6_clearscope(&mr.mr_addr); if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nbytes += sizeof(struct mldv2_record); @@ -2882,7 +3017,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, MLD_PRINTF(("%s: node is %d\n", __func__, now)); if ((now != mode) || (now == mode && - (!use_block_allow && mode == MCAST_UNDEFINED))) { + (!use_block_allow && mode == MCAST_UNDEFINED))) { MLD_PRINTF(("%s: skip node\n", __func__)); continue; } @@ -2894,28 +3029,31 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, MLD_PRINTF(("%s: append node\n", __func__)); if (!m_append(m, sizeof(struct in6_addr), (void *)&ims->im6s_addr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nbytes += sizeof(struct in6_addr); ++msrcs; - if (msrcs == m0srcs) + if (msrcs == m0srcs) { break; + } } MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__, msrcs)); - pmr->mr_numsrc = htons(msrcs); + pmr->mr_numsrc = htons((uint16_t)msrcs); nbytes += (msrcs * sizeof(struct in6_addr)); } if (is_source_query && msrcs == 0) { MLD_PRINTF(("%s: no recorded sources to report\n", __func__)); - if (m != m0) + if (m != m0) { m_freem(m); - return (0); + } + return 0; } /* @@ -2931,8 +3069,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, /* * No further work needed if no source list in packet(s). */ - if (!record_has_sources) - return (nbytes); + if (!record_has_sources) { + return nbytes; + } /* * Whilst sources remain to be announced, we need to allocate @@ -2942,23 +3081,26 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, while (nims != NULL) { if (IF_QFULL(ifq)) { MLD_PRINTF(("%s: outbound queue full\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) + if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m == NULL) - return (-ENOMEM); + } + if (m == NULL) { + return -ENOMEM; + } mld_save_context(m, ifp); md = m_getptr(m, 0, &off); pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off); MLD_PRINTF(("%s: allocated next packet\n", __func__)); if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m->m_pkthdr.vt_nrecs = 1; nbytes += sizeof(struct mldv2_record); @@ -2973,7 +3115,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, now = im6s_get_mode(inm, ims, 1); if ((now != mode) || (now == mode && - (!use_block_allow && mode == MCAST_UNDEFINED))) { + (!use_block_allow && mode == MCAST_UNDEFINED))) { MLD_PRINTF(("%s: skip node\n", __func__)); continue; } @@ -2985,24 +3127,26 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, MLD_PRINTF(("%s: append node\n", __func__)); if (!m_append(m, sizeof(struct in6_addr), (void *)&ims->im6s_addr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } ++msrcs; - if (msrcs == m0srcs) + if (msrcs == m0srcs) { break; + } } - pmr->mr_numsrc = htons(msrcs); + pmr->mr_numsrc = htons((uint16_t)msrcs); nbytes += (msrcs * sizeof(struct in6_addr)); MLD_PRINTF(("%s: enqueueing next packet\n", __func__)); IF_ENQUEUE(ifq, m); } - return (nbytes); + return nbytes; } /* @@ -3011,9 +3155,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, * current filter modes on each ip_msource node. */ typedef enum { - REC_NONE = 0x00, /* MCAST_UNDEFINED */ - REC_ALLOW = 0x01, /* MCAST_INCLUDE */ - REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ + REC_NONE = 0x00, /* MCAST_UNDEFINED */ + REC_ALLOW = 0x01, /* MCAST_INCLUDE */ + REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ REC_FULL = REC_ALLOW | REC_BLOCK } rectype_t; @@ -3043,35 +3187,36 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) { static const int MINRECLEN = sizeof(struct mldv2_record) + sizeof(struct in6_addr); - struct ifnet *ifp; - struct mldv2_record mr; - struct mldv2_record *pmr; - struct ip6_msource *ims, *nims; - struct mbuf *m, *m0, *md; - int m0srcs, nbytes, npbytes, off, rsrcs, schanged; - int nallow, nblock; - uint8_t mode, now, then; - rectype_t crt, drt, nrt; + struct ifnet *ifp; + struct mldv2_record mr; + struct mldv2_record *pmr; + struct ip6_msource *ims, *nims; + struct mbuf *m, *m0, *md; + int m0srcs, nbytes, npbytes, off, rsrcs, schanged; + int nallow, nblock; + uint8_t mode, now, then; + rectype_t crt, drt, nrt; IN6M_LOCK_ASSERT_HELD(inm); if (inm->in6m_nsrc == 0 || - (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) - return (0); - - ifp = inm->in6m_ifp; /* interface */ - mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ - crt = REC_NONE; /* current group record type */ - drt = REC_NONE; /* mask of completed group record types */ - nrt = REC_NONE; /* record type for current node */ - m0srcs = 0; /* # source which will fit in current mbuf chain */ - npbytes = 0; /* # of bytes appended this packet */ - nbytes = 0; /* # of bytes appended to group's state-change queue */ - rsrcs = 0; /* # sources encoded in current record */ - schanged = 0; /* # nodes encoded in overall filter change */ - nallow = 0; /* # of source entries in ALLOW_NEW */ - nblock = 0; /* # of source entries in BLOCK_OLD */ - nims = NULL; /* next tree node pointer */ + (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) { + return 0; + } + + ifp = inm->in6m_ifp; /* interface */ + mode = (uint8_t)inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ + crt = REC_NONE; /* current group record type */ + drt = REC_NONE; /* mask of completed group record types */ + nrt = REC_NONE; /* record type for current node */ + m0srcs = 0; /* # source which will fit in current mbuf chain */ + npbytes = 0; /* # of bytes appended this packet */ + nbytes = 0; /* # of bytes appended to group's state-change queue */ + rsrcs = 0; /* # sources encoded in current record */ + schanged = 0; /* # nodes encoded in overall filter change */ + nallow = 0; /* # of source entries in ALLOW_NEW */ + nblock = 0; /* # of source entries in BLOCK_OLD */ + nims = NULL; /* next tree node pointer */ /* * For each possible filter record mode. @@ -3085,23 +3230,24 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) m0 = ifq->ifq_tail; if (m0 != NULL && (m0->m_pkthdr.vt_nrecs + 1 <= - MLD_V2_REPORT_MAXRECS) && + MLD_V2_REPORT_MAXRECS) && (m0->m_pkthdr.len + MINRECLEN) < - (ifp->if_mtu - MLD_MTUSPACE)) { + (ifp->if_mtu - MLD_MTUSPACE)) { m = m0; m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - - sizeof(struct mldv2_record)) / - sizeof(struct in6_addr); + sizeof(struct mldv2_record)) / + sizeof(struct in6_addr); MLD_PRINTF(("%s: use previous packet\n", __func__)); } else { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) + if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); + } if (m == NULL) { MLD_PRINTF(("%s: m_get*() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m->m_pkthdr.vt_nrecs = 0; mld_save_context(m, ifp); @@ -3123,11 +3269,12 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) mr.mr_addr = inm->in6m_addr; in6_clearscope(&mr.mr_addr); if (!m_append(m, sizeof(mr), (void *)&mr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } npbytes += sizeof(struct mldv2_record); if (m != m0) { @@ -3176,24 +3323,28 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) continue; } nrt = (rectype_t)now; - if (nrt == REC_NONE) + if (nrt == REC_NONE) { nrt = (rectype_t)(~mode & REC_FULL); + } if (schanged++ == 0) { crt = nrt; - } else if (crt != nrt) + } else if (crt != nrt) { continue; + } if (!m_append(m, sizeof(struct in6_addr), (void *)&ims->im6s_addr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nallow += !!(crt == REC_ALLOW); nblock += !!(crt == REC_BLOCK); - if (++rsrcs == m0srcs) + if (++rsrcs == m0srcs) { break; + } } /* * If we did not append any tree nodes on this @@ -3209,23 +3360,25 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) MLD_PRINTF(("%s: m_adj(m, -mr)\n", __func__)); m_adj(m, -((int)sizeof( - struct mldv2_record))); + struct mldv2_record))); } continue; } npbytes += (rsrcs * sizeof(struct in6_addr)); - if (crt == REC_ALLOW) + if (crt == REC_ALLOW) { pmr->mr_type = MLD_ALLOW_NEW_SOURCES; - else if (crt == REC_BLOCK) + } else if (crt == REC_BLOCK) { pmr->mr_type = MLD_BLOCK_OLD_SOURCES; - pmr->mr_numsrc = htons(rsrcs); + } + pmr->mr_numsrc = htons((uint16_t)rsrcs); /* * Count the new group record, and enqueue this * packet if it wasn't already queued. */ m->m_pkthdr.vt_nrecs++; - if (m != m0) + if (m != m0) { IF_ENQUEUE(ifq, m); + } nbytes += npbytes; } while (nims != NULL); drt |= crt; @@ -3235,19 +3388,19 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__, nallow, nblock)); - return (nbytes); + return nbytes; } static int mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) { - struct ifqueue *gq; - struct mbuf *m; /* pending state-change */ - struct mbuf *m0; /* copy of pending state-change */ - struct mbuf *mt; /* last state-change in packet */ - struct mbuf *n; - int docopy, domerge; - u_int recslen; + struct ifqueue *gq; + struct mbuf *m; /* pending state-change */ + struct mbuf *m0; /* copy of pending state-change */ + struct mbuf *mt; /* last state-change in packet */ + struct mbuf *n; + int docopy, domerge; + u_int recslen; IN6M_LOCK_ASSERT_HELD(inm); @@ -3259,8 +3412,9 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) * If there are further pending retransmissions, make a writable * copy of each queued state-change message before merging. */ - if (inm->in6m_scrv > 0) + if (inm->in6m_scrv > 0) { docopy = 1; + } gq = &inm->in6m_scq; #ifdef MLD_DEBUG @@ -3293,8 +3447,9 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) m->m_pkthdr.vt_nrecs <= MLD_V2_REPORT_MAXRECS) && (mt->m_pkthdr.len + recslen <= - (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) + (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) { domerge = 1; + } } if (!domerge && IF_QFULL(gq)) { @@ -3321,8 +3476,9 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) MLD_PRINTF(("%s: copying 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m))); m0 = m_dup(m, M_NOWAIT); - if (m0 == NULL) - return (ENOMEM); + if (m0 == NULL) { + return ENOMEM; + } m0->m_nextpkt = NULL; m = m->m_nextpkt; } @@ -3333,7 +3489,7 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) (uint64_t)VM_KERNEL_ADDRPERM(ifscq))); IF_ENQUEUE(ifscq, m0); } else { - struct mbuf *mtl; /* last mbuf of packet mt */ + struct mbuf *mtl; /* last mbuf of packet mt */ MLD_PRINTF(("%s: merging 0x%llx with ifscq tail " "0x%llx)\n", __func__, @@ -3350,7 +3506,7 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) } } - return (0); + return 0; } /* @@ -3359,10 +3515,10 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *mli) { - struct ifnet *ifp; - struct in6_multi *inm; - struct in6_multistep step; - int retval; + struct ifnet *ifp; + struct in6_multi *inm; + struct in6_multistep step; + int retval; MLI_LOCK_ASSERT_HELD(mli); @@ -3375,8 +3531,9 @@ mld_v2_dispatch_general_query(struct mld_ifinfo *mli) IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } switch (inm->in6m_state) { case MLD_NOT_MEMBER: @@ -3407,7 +3564,7 @@ next: in6_multihead_lock_done(); MLI_LOCK(mli); - mld_dispatch_queue(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST); + mld_dispatch_queue_locked(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST); MLI_LOCK_ASSERT_HELD(mli); /* @@ -3415,10 +3572,10 @@ next: */ if (mli->mli_gq.ifq_head != NULL) { mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( - MLD_RESPONSE_BURST_INTERVAL); + MLD_RESPONSE_BURST_INTERVAL); } - return (mli->mli_v2_timer); + return mli->mli_v2_timer; } /* @@ -3429,16 +3586,16 @@ next: static void mld_dispatch_packet(struct mbuf *m) { - struct ip6_moptions *im6o; - struct ifnet *ifp; - struct ifnet *oifp = NULL; - struct mbuf *m0; - struct mbuf *md; - struct ip6_hdr *ip6; - struct mld_hdr *mld; - int error; - int off; - int type; + struct ip6_moptions *im6o; + struct ifnet *ifp; + struct ifnet *oifp = NULL; + struct mbuf *m0; + struct mbuf *md; + struct ip6_hdr *ip6; + struct mld_hdr *mld; + int error; + int off; + int type; MLD_PRINTF(("%s: transmit 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m))); @@ -3456,7 +3613,7 @@ mld_dispatch_packet(struct mbuf *m) return; } - im6o = ip6_allocmoptions(M_WAITOK); + im6o = ip6_allocmoptions(Z_WAITOK); if (im6o == NULL) { m_freem(m); return; @@ -3487,7 +3644,7 @@ mld_dispatch_packet(struct mbuf *m) m0->m_pkthdr.rcvif = lo_ifp; ip6 = mtod(m0, struct ip6_hdr *); - (void) in6_setscope(&ip6->ip6_dst, ifp, NULL); + (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* * Retrieve the ICMPv6 type before handoff to ip6_output(), @@ -3498,8 +3655,8 @@ mld_dispatch_packet(struct mbuf *m) type = mld->mld_type; if (ifp->if_eflags & IFEF_TXSTART) { - /* - * Use control service class if the outgoing + /* + * Use control service class if the outgoing * interface supports transmit-start model. */ (void) m_set_service_class(m0, MBUF_SC_CTL); @@ -3513,8 +3670,9 @@ mld_dispatch_packet(struct mbuf *m) if (error) { MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0), error)); - if (oifp != NULL) + if (oifp != NULL) { ifnet_release(oifp); + } return; } @@ -3546,27 +3704,29 @@ mld_dispatch_packet(struct mbuf *m) static struct mbuf * mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) { - struct mbuf *mh; - struct mldv2_report *mld; - struct ip6_hdr *ip6; - struct in6_ifaddr *ia; - int mldreclen; + struct mbuf *mh; + struct mldv2_report *mld; + struct ip6_hdr *ip6; + struct in6_ifaddr *ia; + int mldreclen; VERIFY(m->m_flags & M_PKTHDR); /* * RFC3590: OK to send as :: or tentative during DAD. */ - ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); - if (ia == NULL) + ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); + if (ia == NULL) { MLD_PRINTF(("%s: warning: ia is NULL\n", __func__)); + } MGETHDR(mh, M_DONTWAIT, MT_HEADER); if (mh == NULL) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } m_freem(m); - return (NULL); + return NULL; } MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report)); @@ -3582,8 +3742,9 @@ mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_nxt = IPPROTO_ICMPV6; - if (ia != NULL) + if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); + } ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; if (ia != NULL) { IFA_UNLOCK(&ia->ia_ifa); @@ -3605,7 +3766,7 @@ mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) mh->m_next = m; mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen); - return (mh); + return mh; } #ifdef MLD_DEBUG @@ -3613,20 +3774,20 @@ static const char * mld_rec_type_to_str(const int type) { switch (type) { - case MLD_CHANGE_TO_EXCLUDE_MODE: - return "TO_EX"; - case MLD_CHANGE_TO_INCLUDE_MODE: - return "TO_IN"; - case MLD_MODE_IS_EXCLUDE: - return "MODE_EX"; - case MLD_MODE_IS_INCLUDE: - return "MODE_IN"; - case MLD_ALLOW_NEW_SOURCES: - return "ALLOW_NEW"; - case MLD_BLOCK_OLD_SOURCES: - return "BLOCK_OLD"; - default: - break; + case MLD_CHANGE_TO_EXCLUDE_MODE: + return "TO_EX"; + case MLD_CHANGE_TO_INCLUDE_MODE: + return "TO_IN"; + case MLD_MODE_IS_EXCLUDE: + return "MODE_EX"; + case MLD_MODE_IS_INCLUDE: + return "MODE_IN"; + case MLD_ALLOW_NEW_SOURCES: + return "ALLOW_NEW"; + case MLD_BLOCK_OLD_SOURCES: + return "BLOCK_OLD"; + default: + break; } return "unknown"; } @@ -3635,14 +3796,13 @@ mld_rec_type_to_str(const int type) void mld_init(void) { - MLD_PRINTF(("%s: initializing\n", __func__)); - /* Setup lock group and attribute for mld_mtx */ - mld_mtx_grp_attr = lck_grp_attr_alloc_init(); - mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr); - mld_mtx_attr = lck_attr_alloc_init(); - lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr); + /* Setup lock group and attribute for mld_mtx */ + mld_mtx_grp_attr = lck_grp_attr_alloc_init(); + mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr); + mld_mtx_attr = lck_attr_alloc_init(); + lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr); ip6_initpktopts(&mld_po); mld_po.ip6po_hlim = 1; @@ -3650,14 +3810,4 @@ mld_init(void) mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER; mld_po.ip6po_flags = IP6PO_DONTFRAG; LIST_INIT(&mli_head); - - mli_size = sizeof (struct mld_ifinfo); - mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size, - 0, MLI_ZONE_NAME); - if (mli_zone == NULL) { - panic("%s: failed allocating %s", __func__, MLI_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(mli_zone, Z_EXPAND, TRUE); - zone_change(mli_zone, Z_CALLERACCT, FALSE); }