X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/c0fea4742e91338fffdcf79f86a7c1d5e2b97eb1..4d15aeb193b2c68f1d38666c317f8d3734f5f083:/bsd/netinet/raw_ip.c diff --git a/bsd/netinet/raw_ip.c b/bsd/netinet/raw_ip.c index 4028d1291..1f7ccb227 100644 --- a/bsd/netinet/raw_ip.c +++ b/bsd/netinet/raw_ip.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1982, 1986, 1988, 1993 @@ -53,22 +59,29 @@ * * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include #include #include #include #include +#include #include #include #include #include #include #include +#include +#include -#if __FreeBSD__ -#include -#endif +#include #include #include @@ -76,11 +89,15 @@ #define _IP_VHL #include #include +#include #include #include #include #include -#include + +#if INET6 +#include +#endif /* INET6 */ #include @@ -92,17 +109,25 @@ #include #endif -#if IPSEC -extern int ipsec_bypass; -extern lck_mtx_t *sadb_mutex; -#endif +#if CONFIG_MACF_NET +#include +#endif /* MAC_NET */ + +int load_ipfw(void); +int rip_detach(struct socket *); +int rip_abort(struct socket *); +int rip_disconnect(struct socket *); +int rip_bind(struct socket *, struct sockaddr *, struct proc *); +int rip_connect(struct socket *, struct sockaddr *, struct proc *); +int rip_shutdown(struct socket *); -extern u_long route_generation; struct inpcbhead ripcb; struct inpcbinfo ripcbinfo; /* control hooks for ipfw and dummynet */ +#if IPFIREWALL ip_fw_ctl_t *ip_fw_ctl_ptr; +#endif /* IPFIREWALL */ #if DUMMYNET ip_dn_ctl_t *ip_dn_ctl_ptr; #endif /* DUMMYNET */ @@ -121,61 +146,72 @@ ip_dn_ctl_t *ip_dn_ctl_ptr; * Initialize raw connection block q. */ void -rip_init() +rip_init(struct protosw *pp, struct domain *dp) { - struct inpcbinfo *pcbinfo; +#pragma unused(dp) + static int rip_initialized = 0; + struct inpcbinfo *pcbinfo; + + VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + + if (rip_initialized) + return; + rip_initialized = 1; LIST_INIT(&ripcb); - ripcbinfo.listhead = &ripcb; + ripcbinfo.ipi_listhead = &ripcb; /* * XXX We don't use the hash list for raw IP, but it's easier * to allocate a one entry hash list than it is to check all - * over the place for hashbase == NULL. + * over the place for ipi_hashbase == NULL. */ - ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask); - ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask); + ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask); + ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask); - ripcbinfo.ipi_zone = (void *) zinit(sizeof(struct inpcb), - (4096 * sizeof(struct inpcb)), - 4096, "ripzone"); + ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb), + (4096 * sizeof(struct inpcb)), 4096, "ripzone"); pcbinfo = &ripcbinfo; /* * allocate lock group attribute and group for udp pcb mutexes */ - pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init(); + pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init(); + pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr); - pcbinfo->mtx_grp = lck_grp_alloc_init("ripcb", pcbinfo->mtx_grp_attr); - /* * allocate the lock attribute for udp pcb mutexes */ - pcbinfo->mtx_attr = lck_attr_alloc_init(); - - if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) - return; /* pretty much dead if this fails... */ + pcbinfo->ipi_lock_attr = lck_attr_alloc_init(); + if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp, + pcbinfo->ipi_lock_attr)) == NULL) { + panic("%s: unable to allocate PCB lock\n", __func__); + /* NOTREACHED */ + } + in_pcbinfo_attach(&ripcbinfo); } -static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; +static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} }; /* * Setup generic address and protocol structures * for raw_input routine, then pass them along with * mbuf chain. */ void -rip_input(m, iphlen) - struct mbuf *m; - int iphlen; +rip_input(struct mbuf *m, int iphlen) { - register struct ip *ip = mtod(m, struct ip *); - register struct inpcb *inp; + struct ip *ip = mtod(m, struct ip *); + struct inpcb *inp; struct inpcb *last = 0; struct mbuf *opts = 0; - int skipit; + int skipit = 0, ret = 0; + struct ifnet *ifp = m->m_pkthdr.rcvif; + + /* Expect 32-bit aligned data pointer on strict-align platforms */ + MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); ripsrc.sin_addr = ip->ip_src; - lck_rw_lock_shared(ripcbinfo.mtx); + lck_rw_lock_shared(ripcbinfo.ipi_lock); LIST_FOREACH(inp, &ripcb, inp_list) { #if INET6 if ((inp->inp_vflag & INP_IPV4) == 0) @@ -189,43 +225,57 @@ rip_input(m, iphlen) if (inp->inp_faddr.s_addr && inp->inp_faddr.s_addr != ip->ip_src.s_addr) continue; + if (inp_restricted_recv(inp, ifp)) + continue; if (last) { struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); - -#if IPSEC - /* check AH/ESP integrity. */ + skipit = 0; - if (ipsec_bypass == 0 && n) { - lck_mtx_lock(sadb_mutex); - if (ipsec4_in_reject_so(n, last->inp_socket)) { + +#if NECP + if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0, + &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL)) { + m_freem(n); + /* do not inject data to pcb */ + skipit = 1; + } +#endif /* NECP */ +#if CONFIG_MACF_NET + if (n && skipit == 0) { + if (mac_inpcb_check_deliver(last, n, AF_INET, + SOCK_RAW) != 0) { m_freem(n); - ipsecstat.in_polvio++; - /* do not inject data to pcb */ skipit = 1; } - lck_mtx_unlock(sadb_mutex); - } -#endif /*IPSEC*/ + } +#endif if (n && skipit == 0) { int error = 0; - if (last->inp_flags & INP_CONTROLOPTS || - last->inp_socket->so_options & SO_TIMESTAMP) - ip_savecontrol(last, &opts, ip, n); + if ((last->inp_flags & INP_CONTROLOPTS) != 0 || + (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || + (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) { + ret = ip_savecontrol(last, &opts, ip, n); + if (ret != 0) { + m_freem(n); + m_freem(opts); + last = inp; + continue; + } + } if (last->inp_flags & INP_STRIPHDR) { n->m_len -= iphlen; n->m_pkthdr.len -= iphlen; n->m_data += iphlen; } -// ###LOCK need to lock that socket? + so_recv_data_stat(last->inp_socket, m, 0); if (sbappendaddr(&last->inp_socket->so_rcv, (struct sockaddr *)&ripsrc, n, opts, &error) != 0) { sorwakeup(last->inp_socket); - } - else { + } else { if (error) { /* should notify about lost packet */ - kprintf("rip_input can't append to socket\n"); + ipstat.ips_raw_sappend_fail++; } } opts = 0; @@ -233,44 +283,61 @@ rip_input(m, iphlen) } last = inp; } - lck_rw_done(ripcbinfo.mtx); -#if IPSEC - /* check AH/ESP integrity. */ + skipit = 0; - if (ipsec_bypass == 0 && last) { - lck_mtx_lock(sadb_mutex); - if (ipsec4_in_reject_so(m, last->inp_socket)) { - m_freem(m); - ipsecstat.in_polvio++; - ipstat.ips_delivered--; - /* do not inject data to pcb */ +#if NECP + if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0, + &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL)) { + m_freem(m); + OSAddAtomic(1, &ipstat.ips_delivered); + /* do not inject data to pcb */ + skipit = 1; + } +#endif /* NECP */ +#if CONFIG_MACF_NET + if (last && skipit == 0) { + if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) { skipit = 1; + m_freem(m); } - lck_mtx_unlock(sadb_mutex); - } -#endif /*IPSEC*/ + } +#endif if (skipit == 0) { if (last) { - if (last->inp_flags & INP_CONTROLOPTS || - last->inp_socket->so_options & SO_TIMESTAMP) - ip_savecontrol(last, &opts, ip, m); + if ((last->inp_flags & INP_CONTROLOPTS) != 0 || + (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || + (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) { + ret = ip_savecontrol(last, &opts, ip, m); + if (ret != 0) { + m_freem(m); + m_freem(opts); + goto unlock; + } + } if (last->inp_flags & INP_STRIPHDR) { m->m_len -= iphlen; m->m_pkthdr.len -= iphlen; m->m_data += iphlen; } + so_recv_data_stat(last->inp_socket, m, 0); if (sbappendaddr(&last->inp_socket->so_rcv, (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) { sorwakeup(last->inp_socket); } else { - kprintf("rip_input(2) can't append to socket\n"); + ipstat.ips_raw_sappend_fail++; } } else { m_freem(m); - ipstat.ips_noproto++; - ipstat.ips_delivered--; + OSAddAtomic(1, &ipstat.ips_noproto); + OSAddAtomic(-1, &ipstat.ips_delivered); } } +unlock: + /* + * Keep the list locked because socket filter may force the socket lock + * to be released when calling sbappendaddr() -- see rdar://7627704 + */ + lck_rw_done(ripcbinfo.ipi_lock); } /* @@ -278,14 +345,61 @@ rip_input(m, iphlen) * Tack on options user may have setup with control call. */ int -rip_output(m, so, dst) - register struct mbuf *m; - struct socket *so; - u_long dst; +rip_output( + struct mbuf *m, + struct socket *so, + u_int32_t dst, + struct mbuf *control) { - register struct ip *ip; - register struct inpcb *inp = sotoinpcb(so); + struct ip *ip; + struct inpcb *inp = sotoinpcb(so); int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST; + struct ip_out_args ipoa = + { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0, 0, 0 }; + struct ip_moptions *imo; + int error = 0; + int sotc = SO_TC_UNSPEC; + int netsvctype = _NET_SERVICE_TYPE_UNSPEC; + + if (control != NULL) { + sotc = so_tc_from_control(control, &netsvctype); + + m_freem(control); + control = NULL; + } + if (sotc == SO_TC_UNSPEC) { + sotc = so->so_traffic_class; + netsvctype = so->so_netsvctype; + } + + if (inp == NULL +#if NECP + || (necp_socket_should_use_flow_divert(inp)) +#endif /* NECP */ + ) { + if (m != NULL) + m_freem(m); + VERIFY(control == NULL); + return (inp == NULL ? EINVAL : EPROTOTYPE); + } + + flags |= IP_OUTARGS; + /* If socket was bound to an ifindex, tell ip_output about it */ + if (inp->inp_flags & INP_BOUND_IF) { + ipoa.ipoa_boundif = inp->inp_boundifp->if_index; + ipoa.ipoa_flags |= IPOAF_BOUND_IF; + } + if (INP_NO_CELLULAR(inp)) + ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; + if (INP_NO_EXPENSIVE(inp)) + ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; + if (INP_AWDL_UNRESTRICTED(inp)) + ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; + ipoa.ipoa_sotc = sotc; + ipoa.ipoa_netsvctype = netsvctype; + + if (inp->inp_flowhash == 0) + inp->inp_flowhash = inp_calc_flowhash(inp); /* * If the user handed us a complete IP packet, use it. @@ -296,7 +410,9 @@ rip_output(m, so, dst) m_freem(m); return(EMSGSIZE); } - M_PREPEND(m, sizeof(struct ip), M_WAIT); + M_PREPEND(m, sizeof(struct ip), M_WAIT, 1); + if (m == NULL) + return ENOBUFS; ip = mtod(m, struct ip *); ip->ip_tos = inp->inp_ip_tos; ip->ip_off = 0; @@ -321,60 +437,187 @@ rip_output(m, so, dst) return EINVAL; } if (ip->ip_id == 0) -#if RANDOM_IP_ID ip->ip_id = ip_randomid(); -#else - ip->ip_id = htons(ip_id++); -#endif /* XXX prevent ip_output from overwriting header fields */ flags |= IP_RAWOUTPUT; - ipstat.ips_rawout++; + OSAddAtomic(1, &ipstat.ips_rawout); } + if (inp->inp_laddr.s_addr != INADDR_ANY) + ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR; + +#if NECP + { + necp_kernel_policy_id policy_id; + u_int32_t route_rule_id; + + /* + * We need a route to perform NECP route rule checks + */ + if (net_qos_policy_restricted != 0 && + ROUTE_UNUSABLE(&inp->inp_route)) { + struct sockaddr_in to; + struct sockaddr_in from; + struct in_addr laddr = ip->ip_src; + + ROUTE_RELEASE(&inp->inp_route); + + bzero(&from, sizeof(struct sockaddr_in)); + from.sin_family = AF_INET; + from.sin_len = sizeof(struct sockaddr_in); + from.sin_addr = laddr; + + bzero(&to, sizeof(struct sockaddr_in)); + to.sin_family = AF_INET; + to.sin_len = sizeof(struct sockaddr_in); + to.sin_addr.s_addr = ip->ip_dst.s_addr; + + if ((error = in_pcbladdr(inp, (struct sockaddr *)&to, + &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) { + printf("%s in_pcbladdr(%p) error %d\n", + __func__, inp, error); + m_freem(m); + return (error); + } + + inp_update_necp_policy(inp, (struct sockaddr *)&from, + (struct sockaddr *)&to, ipoa.ipoa_boundif); + inp->inp_policyresult.results.qos_marking_gencount = 0; + } + + if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0, + &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id)) { + m_freem(m); + return(EHOSTUNREACH); + } + + necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id); + + if (net_qos_policy_restricted != 0) { + struct ifnet *rt_ifp = NULL; + + if (inp->inp_route.ro_rt != NULL) + rt_ifp = inp->inp_route.ro_rt->rt_ifp; + + printf("%s inp %p last_pid %u inp_boundifp %d inp_last_outifp %d rt_ifp %d route_rule_id %u\n", + __func__, inp, + inp->inp_socket != NULL ? inp->inp_socket->last_pid : -1, + inp->inp_boundifp != NULL ? inp->inp_boundifp->if_index : -1, + inp->inp_last_outifp != NULL ? inp->inp_last_outifp->if_index : -1, + rt_ifp != NULL ? rt_ifp->if_index : -1, + route_rule_id); + necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, + NULL, route_rule_id); + } + } +#endif /* NECP */ + if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + #if IPSEC - if (ipsec_bypass == 0 && ipsec_setsocket(m, so) != 0) { + if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) { m_freem(m); return ENOBUFS; } #endif /*IPSEC*/ - if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) { - rtfree(inp->inp_route.ro_rt); - inp->inp_route.ro_rt = (struct rtentry *)0; + if (ROUTE_UNUSABLE(&inp->inp_route)) + ROUTE_RELEASE(&inp->inp_route); + + set_packet_service_class(m, so, sotc, 0); + m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB; + m->m_pkthdr.pkt_flowid = inp->inp_flowhash; + m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | + PKTF_FLOW_RAWSOCK); + m->m_pkthdr.pkt_proto = inp->inp_ip_p; + +#if CONFIG_MACF_NET + mac_mbuf_label_associate_inpcb(inp, m); +#endif + + imo = inp->inp_moptions; + if (imo != NULL) + IMO_ADDREF(imo); + /* + * The domain lock is held across ip_output, so it is okay + * to pass the PCB cached route pointer directly to IP and + * the modules beneath it. + */ + // TODO: PASS DOWN ROUTE RULE ID + error = ip_output(m, inp->inp_options, &inp->inp_route, flags, + imo, &ipoa); + + if (imo != NULL) + IMO_REMREF(imo); + + if (inp->inp_route.ro_rt != NULL) { + struct rtentry *rt = inp->inp_route.ro_rt; + struct ifnet *outif; + + if ((rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) || + inp->inp_socket == NULL || + !(inp->inp_socket->so_state & SS_ISCONNECTED)) { + rt = NULL; /* unusable */ + } + /* + * Always discard the cached route for unconnected + * socket or if it is a multicast route. + */ + if (rt == NULL) + ROUTE_RELEASE(&inp->inp_route); + + /* + * If this is a connected socket and the destination + * route is unicast, update outif with that of the + * route interface used by IP. + */ + if (rt != NULL && (outif = rt->rt_ifp) != inp->inp_last_outifp) + inp->inp_last_outifp = outif; + } else { + ROUTE_RELEASE(&inp->inp_route); } - return (ip_output_list(m, 0, inp->inp_options, &inp->inp_route, flags, - inp->inp_moptions)); + /* + * If output interface was cellular/expensive, and this socket is + * denied access to it, generate an event. + */ + if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) && + (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) + soevent(so, (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED)); + + return (error); } -extern int -load_ipfw() +#if IPFIREWALL +int +load_ipfw(void) { kern_return_t err; - + ipfw_init(); - + #if DUMMYNET if (!DUMMYNET_LOADED) ip_dn_init(); #endif /* DUMMYNET */ err = 0; - + return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err; } +#endif /* IPFIREWALL */ /* * Raw IP socket option processing. */ int -rip_ctloutput(so, sopt) - struct socket *so; - struct sockopt *sopt; +rip_ctloutput(struct socket *so, struct sockopt *sopt) { struct inpcb *inp = sotoinpcb(so); int error, optval; - if (sopt->sopt_level != IPPROTO_IP) + /* Allow at this level */ + if (sopt->sopt_level != IPPROTO_IP && + !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) return (EINVAL); error = 0; @@ -387,11 +630,12 @@ rip_ctloutput(so, sopt) error = sooptcopyout(sopt, &optval, sizeof optval); break; - case IP_STRIPHDR: - optval = inp->inp_flags & INP_STRIPHDR; - error = sooptcopyout(sopt, &optval, sizeof optval); - break; + case IP_STRIPHDR: + optval = inp->inp_flags & INP_STRIPHDR; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; +#if IPFIREWALL case IP_FW_ADD: case IP_FW_GET: case IP_OLD_FW_ADD: @@ -403,9 +647,12 @@ rip_ctloutput(so, sopt) else error = ENOPROTOOPT; break; +#endif /* IPFIREWALL */ #if DUMMYNET case IP_DUMMYNET_GET: + if (!DUMMYNET_LOADED) + ip_dn_init(); if (DUMMYNET_LOADED) error = ip_dn_ctl_ptr(sopt); else @@ -413,17 +660,6 @@ rip_ctloutput(so, sopt) break ; #endif /* DUMMYNET */ - case MRT_INIT: - case MRT_DONE: - case MRT_ADD_VIF: - case MRT_DEL_VIF: - case MRT_ADD_MFC: - case MRT_DEL_MFC: - case MRT_VERSION: - case MRT_ASSERT: - error = ip_mrouter_get(so, sopt); - break; - default: error = ip_ctloutput(so, sopt); break; @@ -443,18 +679,18 @@ rip_ctloutput(so, sopt) inp->inp_flags &= ~INP_HDRINCL; break; - case IP_STRIPHDR: - error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) - break; - if (optval) - inp->inp_flags |= INP_STRIPHDR; - else - inp->inp_flags &= ~INP_STRIPHDR; - break; - + case IP_STRIPHDR: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + inp->inp_flags |= INP_STRIPHDR; + else + inp->inp_flags &= ~INP_STRIPHDR; + break; +#if IPFIREWALL case IP_FW_ADD: case IP_FW_DEL: case IP_FW_FLUSH: @@ -472,11 +708,14 @@ rip_ctloutput(so, sopt) else error = ENOPROTOOPT; break; +#endif /* IPFIREWALL */ #if DUMMYNET case IP_DUMMYNET_CONFIGURE: case IP_DUMMYNET_DEL: case IP_DUMMYNET_FLUSH: + if (!DUMMYNET_LOADED) + ip_dn_init(); if (DUMMYNET_LOADED) error = ip_dn_ctl_ptr(sopt); else @@ -484,32 +723,12 @@ rip_ctloutput(so, sopt) break ; #endif - case IP_RSVP_ON: - error = ip_rsvp_init(so); - break; - - case IP_RSVP_OFF: - error = ip_rsvp_done(); - break; - - /* XXX - should be combined */ - case IP_RSVP_VIF_ON: - error = ip_rsvp_vif_init(so, sopt); - break; - - case IP_RSVP_VIF_OFF: - error = ip_rsvp_vif_done(so, sopt); - break; + case SO_FLUSH: + if ((error = sooptcopyin(sopt, &optval, sizeof (optval), + sizeof (optval))) != 0) + break; - case MRT_INIT: - case MRT_DONE: - case MRT_ADD_VIF: - case MRT_DEL_VIF: - case MRT_ADD_MFC: - case MRT_DEL_MFC: - case MRT_VERSION: - case MRT_ASSERT: - error = ip_mrouter_set(so, sopt); + error = inp_flush(inp, optval); break; default: @@ -530,23 +749,29 @@ rip_ctloutput(so, sopt) * interface routes. */ void -rip_ctlinput(cmd, sa, vip) - int cmd; - struct sockaddr *sa; - void *vip; +rip_ctlinput( + int cmd, + struct sockaddr *sa, + __unused void *vip) { struct in_ifaddr *ia; struct ifnet *ifp; int err; - int flags; + int flags, done = 0; switch (cmd) { case PRC_IFDOWN: - lck_mtx_lock(rt_mtx); + lck_rw_lock_shared(in_ifaddr_rwlock); for (ia = in_ifaddrhead.tqh_first; ia; ia = ia->ia_link.tqe_next) { - if (ia->ia_ifa.ifa_addr == sa - && (ia->ia_flags & IFA_ROUTE)) { + IFA_LOCK(&ia->ia_ifa); + if (ia->ia_ifa.ifa_addr == sa && + (ia->ia_flags & IFA_ROUTE)) { + done = 1; + IFA_ADDREF_LOCKED(&ia->ia_ifa); + IFA_UNLOCK(&ia->ia_ifa); + lck_rw_done(in_ifaddr_rwlock); + lck_mtx_lock(rnh_lock); /* * in_ifscrub kills the interface route. */ @@ -558,23 +783,38 @@ rip_ctlinput(cmd, sa, vip) * a routing process they will come back. */ in_ifadown(&ia->ia_ifa, 1); + lck_mtx_unlock(rnh_lock); + IFA_REMREF(&ia->ia_ifa); break; } + IFA_UNLOCK(&ia->ia_ifa); } - lck_mtx_unlock(rt_mtx); + if (!done) + lck_rw_done(in_ifaddr_rwlock); break; case PRC_IFUP: - lck_mtx_lock(rt_mtx); + lck_rw_lock_shared(in_ifaddr_rwlock); for (ia = in_ifaddrhead.tqh_first; ia; ia = ia->ia_link.tqe_next) { - if (ia->ia_ifa.ifa_addr == sa) + IFA_LOCK(&ia->ia_ifa); + if (ia->ia_ifa.ifa_addr == sa) { + /* keep it locked */ break; + } + IFA_UNLOCK(&ia->ia_ifa); } - if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) { - lck_mtx_unlock(rt_mtx); + if (ia == NULL || (ia->ia_flags & IFA_ROUTE) || + (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) { + if (ia != NULL) + IFA_UNLOCK(&ia->ia_ifa); + lck_rw_done(in_ifaddr_rwlock); return; } + IFA_ADDREF_LOCKED(&ia->ia_ifa); + IFA_UNLOCK(&ia->ia_ifa); + lck_rw_done(in_ifaddr_rwlock); + flags = RTF_UP; ifp = ia->ia_ifa.ifa_ifp; @@ -582,45 +822,43 @@ rip_ctlinput(cmd, sa, vip) || (ifp->if_flags & IFF_POINTOPOINT)) flags |= RTF_HOST; - err = rtinit_locked(&ia->ia_ifa, RTM_ADD, flags); - lck_mtx_unlock(rt_mtx); - if (err == 0) + err = rtinit(&ia->ia_ifa, RTM_ADD, flags); + if (err == 0) { + IFA_LOCK_SPIN(&ia->ia_ifa); ia->ia_flags |= IFA_ROUTE; + IFA_UNLOCK(&ia->ia_ifa); + } + IFA_REMREF(&ia->ia_ifa); break; } } -u_long rip_sendspace = RIPSNDQ; -u_long rip_recvspace = RIPRCVQ; +u_int32_t rip_sendspace = RIPSNDQ; +u_int32_t rip_recvspace = RIPRCVQ; -SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, +SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED, &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); -SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, +SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED, &rip_recvspace, 0, "Maximum incoming raw IP datagram size"); +SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED, + &ripcbinfo.ipi_count, 0, "Number of active PCBs"); static int rip_attach(struct socket *so, int proto, struct proc *p) { struct inpcb *inp; - int error, s; + int error; inp = sotoinpcb(so); if (inp) panic("rip_attach"); -#if __APPLE__ if ((so->so_state & SS_PRIV) == 0) return (EPERM); -#else - if (p && (error = suser(p)) != 0) - return error; -#endif error = soreserve(so, rip_sendspace, rip_recvspace); if (error) return error; - s = splnet(); error = in_pcballoc(so, &ripcbinfo, p); - splx(s); if (error) return error; inp = (struct inpcb *)so->so_pcb; @@ -638,11 +876,6 @@ rip_detach(struct socket *so) inp = sotoinpcb(so); if (inp == 0) panic("rip_detach"); - if (so == ip_mrouter) - ip_mrouter_done(); - ip_rsvp_force_done(so); - if (so == ip_rsvpd) - ip_rsvp_done(); in_pcbdetach(inp); return 0; } @@ -665,33 +898,63 @@ rip_disconnect(struct socket *so) __private_extern__ int rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p) { +#pragma unused(p) struct inpcb *inp = sotoinpcb(so); - struct sockaddr_in *addr = (struct sockaddr_in *)nam; + struct sockaddr_in sin; struct ifaddr *ifa = NULL; + struct ifnet *outif = NULL; - if (nam->sa_len != sizeof(*addr)) - return EINVAL; + if (inp == NULL +#if NECP + || (necp_socket_should_use_flow_divert(inp)) +#endif /* NECP */ + ) + return (inp == NULL ? EINVAL : EPROTOTYPE); - if (TAILQ_EMPTY(&ifnet_head) || ((addr->sin_family != AF_INET) && - (addr->sin_family != AF_IMPLINK)) || - (addr->sin_addr.s_addr && - (ifa = ifa_ifwithaddr((struct sockaddr *)addr)) == 0)) { - return EADDRNOTAVAIL; - } - else if (ifa) { - ifafree(ifa); - ifa = NULL; + if (nam->sa_len != sizeof (struct sockaddr_in)) + return (EINVAL); + + /* Sanitized local copy for interface address searches */ + bzero(&sin, sizeof (sin)); + sin.sin_family = AF_INET; + sin.sin_len = sizeof (struct sockaddr_in); + sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr; + + if (TAILQ_EMPTY(&ifnet_head) || + (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) || + (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) { + return (EADDRNOTAVAIL); + } else if (ifa) { + /* + * Opportunistically determine the outbound + * interface that may be used; this may not + * hold true if we end up using a route + * going over a different interface, e.g. + * when sending to a local address. This + * will get updated again after sending. + */ + IFA_LOCK(ifa); + outif = ifa->ifa_ifp; + IFA_UNLOCK(ifa); + IFA_REMREF(ifa); } - inp->inp_laddr = addr->sin_addr; - return 0; + inp->inp_laddr = sin.sin_addr; + inp->inp_last_outifp = outif; + return (0); } __private_extern__ int -rip_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) { struct inpcb *inp = sotoinpcb(so); - struct sockaddr_in *addr = (struct sockaddr_in *)nam; - + struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam; + + if (inp == NULL +#if NECP + || (necp_socket_should_use_flow_divert(inp)) +#endif /* NECP */ + ) + return (inp == NULL ? EINVAL : EPROTOTYPE); if (nam->sa_len != sizeof(*addr)) return EINVAL; if (TAILQ_EMPTY(&ifnet_head)) @@ -701,6 +964,7 @@ rip_connect(struct socket *so, struct sockaddr *nam, struct proc *p) return EAFNOSUPPORT; inp->inp_faddr = addr->sin_addr; soisconnected(so); + return 0; } @@ -713,54 +977,90 @@ rip_shutdown(struct socket *so) __private_extern__ int rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, - struct mbuf *control, struct proc *p) + struct mbuf *control, struct proc *p) { +#pragma unused(flags, p) struct inpcb *inp = sotoinpcb(so); - register u_long dst; + u_int32_t dst; + int error = 0; + + if (inp == NULL +#if NECP + || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE)) +#endif /* NECP */ + ) { + if (inp == NULL) + error = EINVAL; + else + error = EPROTOTYPE; + goto bad; + } if (so->so_state & SS_ISCONNECTED) { - if (nam) { - m_freem(m); - return EISCONN; + if (nam != NULL) { + error = EISCONN; + goto bad; } dst = inp->inp_faddr.s_addr; } else { if (nam == NULL) { - m_freem(m); - return ENOTCONN; + error = ENOTCONN; + goto bad; } - dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; + dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr; } - return rip_output(m, so, dst); + return (rip_output(m, so, dst, control)); + +bad: + VERIFY(error != 0); + + if (m != NULL) + m_freem(m); + if (control != NULL) + m_freem(control); + + return (error); } /* note: rip_unlock is called from different protos instead of the generic socket_unlock, - * it will handle the socket dealloc on last reference + * it will handle the socket dealloc on last reference * */ int -rip_unlock(struct socket *so, int refcount, int debug) +rip_unlock(struct socket *so, int refcount, void *debug) { - int lr_saved; + void *lr_saved; struct inpcb *inp = sotoinpcb(so); - if (debug == 0) - lr_saved = (unsigned int) __builtin_return_address(0); - else lr_saved = debug; + if (debug == NULL) + lr_saved = __builtin_return_address(0); + else + lr_saved = debug; if (refcount) { - if (so->so_usecount <= 0) - panic("rip_unlock: bad refoucnt so=%x val=%x\n", so, so->so_usecount); + if (so->so_usecount <= 0) { + panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n", + so, so->so_usecount, solockhistory_nr(so)); + /* NOTREACHED */ + } so->so_usecount--; if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) { /* cleanup after last reference */ lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx); - lck_rw_lock_exclusive(ripcbinfo.mtx); + lck_rw_lock_exclusive(ripcbinfo.ipi_lock); + if (inp->inp_state != INPCB_STATE_DEAD) { +#if INET6 + if (SOCK_CHECK_DOM(so, PF_INET6)) + in6_pcbdetach(inp); + else +#endif /* INET6 */ + in_pcbdetach(inp); + } in_pcbdispose(inp); - lck_rw_done(ripcbinfo.mtx); + lck_rw_done(ripcbinfo.ipi_lock); return(0); } } - so->unlock_lr[so->next_unlock_lr] = (u_int *)lr_saved; + so->unlock_lr[so->next_unlock_lr] = lr_saved; so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx); return(0); @@ -769,7 +1069,8 @@ rip_unlock(struct socket *so, int refcount, int debug) static int rip_pcblist SYSCTL_HANDLER_ARGS { - int error, i, n, s; +#pragma unused(oidp, arg1, arg2) + int error, i, n; struct inpcb *inp, **inp_list; inp_gen_t gencnt; struct xinpgen xig; @@ -778,17 +1079,17 @@ rip_pcblist SYSCTL_HANDLER_ARGS * The process of preparing the TCB list is too time-consuming and * resource-intensive to repeat twice on every request. */ - lck_rw_lock_exclusive(ripcbinfo.mtx); + lck_rw_lock_exclusive(ripcbinfo.ipi_lock); if (req->oldptr == USER_ADDR_NULL) { n = ripcbinfo.ipi_count; req->oldidx = 2 * (sizeof xig) + (n + n/8) * sizeof(struct xinpcb); - lck_rw_done(ripcbinfo.mtx); + lck_rw_done(ripcbinfo.ipi_lock); return 0; } if (req->newptr != USER_ADDR_NULL) { - lck_rw_done(ripcbinfo.mtx); + lck_rw_done(ripcbinfo.ipi_lock); return EPERM; } @@ -797,7 +1098,7 @@ rip_pcblist SYSCTL_HANDLER_ARGS */ gencnt = ripcbinfo.ipi_gencnt; n = ripcbinfo.ipi_count; - + bzero(&xig, sizeof(xig)); xig.xig_len = sizeof xig; xig.xig_count = n; @@ -805,24 +1106,24 @@ rip_pcblist SYSCTL_HANDLER_ARGS xig.xig_sogen = so_gencnt; error = SYSCTL_OUT(req, &xig, sizeof xig); if (error) { - lck_rw_done(ripcbinfo.mtx); + lck_rw_done(ripcbinfo.ipi_lock); return error; } /* * We are done if there is no pcb */ if (n == 0) { - lck_rw_done(ripcbinfo.mtx); - return 0; + lck_rw_done(ripcbinfo.ipi_lock); + return 0; } inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); if (inp_list == 0) { - lck_rw_done(ripcbinfo.mtx); + lck_rw_done(ripcbinfo.ipi_lock); return ENOMEM; } - - for (inp = ripcbinfo.listhead->lh_first, i = 0; inp && i < n; + + for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; inp = inp->inp_list.le_next) { if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) inp_list[i++] = inp; @@ -860,17 +1161,147 @@ rip_pcblist SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, &xig, sizeof xig); } FREE(inp_list, M_TEMP); - lck_rw_done(ripcbinfo.mtx); + lck_rw_done(ripcbinfo.ipi_lock); return error; } -SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, +SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); + +static int +rip_pcblist64 SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + int error, i, n; + struct inpcb *inp, **inp_list; + inp_gen_t gencnt; + struct xinpgen xig; + + /* + * The process of preparing the TCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + lck_rw_lock_exclusive(ripcbinfo.ipi_lock); + if (req->oldptr == USER_ADDR_NULL) { + n = ripcbinfo.ipi_count; + req->oldidx = 2 * (sizeof xig) + + (n + n/8) * sizeof(struct xinpcb64); + lck_rw_done(ripcbinfo.ipi_lock); + return 0; + } + + if (req->newptr != USER_ADDR_NULL) { + lck_rw_done(ripcbinfo.ipi_lock); + return EPERM; + } + + /* + * OK, now we're committed to doing something. + */ + gencnt = ripcbinfo.ipi_gencnt; + n = ripcbinfo.ipi_count; + + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof xig; + xig.xig_count = n; + xig.xig_gen = gencnt; + xig.xig_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xig, sizeof xig); + if (error) { + lck_rw_done(ripcbinfo.ipi_lock); + return error; + } + /* + * We are done if there is no pcb + */ + if (n == 0) { + lck_rw_done(ripcbinfo.ipi_lock); + return 0; + } + + inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); + if (inp_list == 0) { + lck_rw_done(ripcbinfo.ipi_lock); + return ENOMEM; + } + + for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; + inp = inp->inp_list.le_next) { + if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) + inp_list[i++] = inp; + } + n = i; + + error = 0; + for (i = 0; i < n; i++) { + inp = inp_list[i]; + if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { + struct xinpcb64 xi; + + bzero(&xi, sizeof(xi)); + xi.xi_len = sizeof xi; + inpcb_to_xinpcb64(inp, &xi); + if (inp->inp_socket) + sotoxsocket64(inp->inp_socket, &xi.xi_socket); + error = SYSCTL_OUT(req, &xi, sizeof xi); + } + } + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof xig; + xig.xig_gen = ripcbinfo.ipi_gencnt; + xig.xig_sogen = so_gencnt; + xig.xig_count = ripcbinfo.ipi_count; + error = SYSCTL_OUT(req, &xig, sizeof xig); + } + FREE(inp_list, M_TEMP); + lck_rw_done(ripcbinfo.ipi_lock); + return error; +} + +SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets"); + + + +static int +rip_pcblist_n SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + int error = 0; + + error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo); + + return error; +} + +SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets"); + struct pr_usrreqs rip_usrreqs = { - rip_abort, pru_accept_notsupp, rip_attach, rip_bind, rip_connect, - pru_connect2_notsupp, in_control, rip_detach, rip_disconnect, - pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp, - pru_rcvoob_notsupp, rip_send, pru_sense_null, rip_shutdown, - in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp + .pru_abort = rip_abort, + .pru_attach = rip_attach, + .pru_bind = rip_bind, + .pru_connect = rip_connect, + .pru_control = in_control, + .pru_detach = rip_detach, + .pru_disconnect = rip_disconnect, + .pru_peeraddr = in_getpeeraddr, + .pru_send = rip_send, + .pru_shutdown = rip_shutdown, + .pru_sockaddr = in_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; +/* DSEP Review Done pl-20051213-v02 @3253 */