/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*
* @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
*/
+/*
+ * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <libkern/OSAtomic.h>
+#include <kern/zalloc.h>
-#if __FreeBSD__
-#include <vm/vm_zone.h>
-#endif
+#include <pexpert/pexpert.h>
#include <net/if.h>
#include <net/route.h>
#include <netinet/ip_var.h>
#include <netinet/ip_mroute.h>
+#if INET6
+#include <netinet6/in6_pcb.h>
+#endif /* INET6 */
+
#include <netinet/ip_fw.h>
#if IPSEC
#include <netinet/ip_dummynet.h>
#endif
+#if CONFIG_MACF_NET
+#include <security/mac_framework.h>
+#endif /* MAC_NET */
+
+int load_ipfw(void);
+int rip_detach(struct socket *);
+int rip_abort(struct socket *);
+int rip_disconnect(struct socket *);
+int rip_bind(struct socket *, struct sockaddr *, struct proc *);
+int rip_connect(struct socket *, struct sockaddr *, struct proc *);
+int rip_shutdown(struct socket *);
+
#if IPSEC
extern int ipsec_bypass;
-extern lck_mtx_t *sadb_mutex;
#endif
-extern u_long route_generation;
struct inpcbhead ripcb;
struct inpcbinfo ripcbinfo;
/* control hooks for ipfw and dummynet */
+#if IPFIREWALL
ip_fw_ctl_t *ip_fw_ctl_ptr;
#if DUMMYNET
ip_dn_ctl_t *ip_dn_ctl_ptr;
#endif /* DUMMYNET */
+#endif /* IPFIREWALL */
/*
* Nominal space allocated to a raw ip socket.
* allocate lock group attribute and group for udp pcb mutexes
*/
pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
- lck_grp_attr_setdefault(pcbinfo->mtx_grp_attr);
pcbinfo->mtx_grp = lck_grp_alloc_init("ripcb", pcbinfo->mtx_grp_attr);
* allocate the lock attribute for udp pcb mutexes
*/
pcbinfo->mtx_attr = lck_attr_alloc_init();
- lck_attr_setdefault(pcbinfo->mtx_attr);
if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
return; /* pretty much dead if this fails... */
}
-static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET };
+static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} };
/*
* Setup generic address and protocol structures
* for raw_input routine, then pass them along with
register struct inpcb *inp;
struct inpcb *last = 0;
struct mbuf *opts = 0;
- int skipit;
+ int skipit = 0, ret = 0;
ripsrc.sin_addr = ip->ip_src;
lck_rw_lock_shared(ripcbinfo.mtx);
if (last) {
struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
+ skipit = 0;
#if IPSEC
/* check AH/ESP integrity. */
- skipit = 0;
if (ipsec_bypass == 0 && n) {
- lck_mtx_lock(sadb_mutex);
if (ipsec4_in_reject_so(n, last->inp_socket)) {
m_freem(n);
- ipsecstat.in_polvio++;
+ IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
/* do not inject data to pcb */
skipit = 1;
}
- lck_mtx_unlock(sadb_mutex);
}
#endif /*IPSEC*/
+#if CONFIG_MACF_NET
+ if (n && skipit == 0) {
+ if (mac_inpcb_check_deliver(last, n, AF_INET,
+ SOCK_RAW) != 0) {
+ m_freem(n);
+ skipit = 1;
+ }
+ }
+#endif
if (n && skipit == 0) {
int error = 0;
- if (last->inp_flags & INP_CONTROLOPTS ||
- last->inp_socket->so_options & SO_TIMESTAMP)
- ip_savecontrol(last, &opts, ip, n);
+ if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
+ (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
+ (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
+ ret = ip_savecontrol(last, &opts, ip, n);
+ if (ret != 0) {
+ m_freem(n);
+ m_freem(opts);
+ last = inp;
+ continue;
+ }
+ }
if (last->inp_flags & INP_STRIPHDR) {
n->m_len -= iphlen;
n->m_pkthdr.len -= iphlen;
n->m_data += iphlen;
}
-// ###LOCK need to lock that socket?
+ so_recv_data_stat(last->inp_socket, m, 0);
if (sbappendaddr(&last->inp_socket->so_rcv,
(struct sockaddr *)&ripsrc, n,
opts, &error) != 0) {
sorwakeup(last->inp_socket);
- }
- else {
+ } else {
if (error) {
/* should notify about lost packet */
kprintf("rip_input can't append to socket\n");
}
last = inp;
}
- lck_rw_done(ripcbinfo.mtx);
+
+ skipit = 0;
#if IPSEC
/* check AH/ESP integrity. */
- skipit = 0;
if (ipsec_bypass == 0 && last) {
- lck_mtx_lock(sadb_mutex);
if (ipsec4_in_reject_so(m, last->inp_socket)) {
m_freem(m);
- ipsecstat.in_polvio++;
- ipstat.ips_delivered--;
+ IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
+ OSAddAtomic(1, &ipstat.ips_delivered);
/* do not inject data to pcb */
skipit = 1;
}
- lck_mtx_unlock(sadb_mutex);
}
#endif /*IPSEC*/
+#if CONFIG_MACF_NET
+ if (last && skipit == 0) {
+ if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) {
+ skipit = 1;
+ m_freem(m);
+ }
+ }
+#endif
if (skipit == 0) {
if (last) {
- if (last->inp_flags & INP_CONTROLOPTS ||
- last->inp_socket->so_options & SO_TIMESTAMP)
- ip_savecontrol(last, &opts, ip, m);
+ if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
+ (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
+ (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
+ ret = ip_savecontrol(last, &opts, ip, m);
+ if (ret != 0) {
+ m_freem(m);
+ m_freem(opts);
+ goto unlock;
+ }
+ }
if (last->inp_flags & INP_STRIPHDR) {
m->m_len -= iphlen;
m->m_pkthdr.len -= iphlen;
m->m_data += iphlen;
}
+ so_recv_data_stat(last->inp_socket, m, 0);
if (sbappendaddr(&last->inp_socket->so_rcv,
(struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
sorwakeup(last->inp_socket);
}
} else {
m_freem(m);
- ipstat.ips_noproto++;
- ipstat.ips_delivered--;
+ OSAddAtomic(1, &ipstat.ips_noproto);
+ OSAddAtomic(-1, &ipstat.ips_delivered);
}
}
+unlock:
+ /*
+ * Keep the list locked because socket filter may force the socket lock
+ * to be released when calling sbappendaddr() -- see rdar://7627704
+ */
+ lck_rw_done(ripcbinfo.mtx);
}
/*
* Tack on options user may have setup with control call.
*/
int
-rip_output(m, so, dst)
- register struct mbuf *m;
- struct socket *so;
- u_long dst;
+rip_output(
+ struct mbuf *m,
+ struct socket *so,
+ u_int32_t dst,
+ struct mbuf *control)
{
register struct ip *ip;
register struct inpcb *inp = sotoinpcb(so);
int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
+ struct ip_out_args ipoa;
+ struct ip_moptions *imo;
+ int error = 0;
+ mbuf_traffic_class_t mtc = MBUF_TC_UNSPEC;
+
+ if (control != NULL) {
+ mtc = mbuf_traffic_class_from_control(control);
+
+ m_freem(control);
+ }
+ /* If socket was bound to an ifindex, tell ip_output about it */
+ ipoa.ipoa_boundif = (inp->inp_flags & INP_BOUND_IF) ?
+ inp->inp_boundif : IFSCOPE_NONE;
+ ipoa.ipoa_nocell = (inp->inp_flags & INP_NO_IFT_CELLULAR) ? 1 : 0;
+ flags |= IP_OUTARGS;
/*
* If the user handed us a complete IP packet, use it.
return(EMSGSIZE);
}
M_PREPEND(m, sizeof(struct ip), M_WAIT);
+ if (m == NULL)
+ return ENOBUFS;
ip = mtod(m, struct ip *);
ip->ip_tos = inp->inp_ip_tos;
ip->ip_off = 0;
#endif
/* XXX prevent ip_output from overwriting header fields */
flags |= IP_RAWOUTPUT;
- ipstat.ips_rawout++;
+ OSAddAtomic(1, &ipstat.ips_rawout);
}
#if IPSEC
}
#endif /*IPSEC*/
- if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) {
+ if (inp->inp_route.ro_rt != NULL &&
+ inp->inp_route.ro_rt->generation_id != route_generation) {
rtfree(inp->inp_route.ro_rt);
- inp->inp_route.ro_rt = (struct rtentry *)0;
+ inp->inp_route.ro_rt = NULL;
}
- return (ip_output_list(m, 0, inp->inp_options, &inp->inp_route, flags,
- inp->inp_moptions));
+ set_packet_tclass(m, so, mtc, 0);
+
+#if CONFIG_MACF_NET
+ mac_mbuf_label_associate_inpcb(inp, m);
+#endif
+
+ imo = inp->inp_moptions;
+ if (imo != NULL)
+ IMO_ADDREF(imo);
+ /*
+ * The domain lock is held across ip_output, so it is okay
+ * to pass the PCB cached route pointer directly to IP and
+ * the modules beneath it.
+ */
+ error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
+ imo, &ipoa);
+
+ if (imo != NULL)
+ IMO_REMREF(imo);
+
+ if (inp->inp_route.ro_rt != NULL) {
+ struct rtentry *rt = inp->inp_route.ro_rt;
+ unsigned int outif;
+
+ if ((rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) ||
+ inp->inp_socket == NULL ||
+ !(inp->inp_socket->so_state & SS_ISCONNECTED)) {
+ rt = NULL; /* unusable */
+ }
+ /*
+ * Always discard the cached route for unconnected
+ * socket or if it is a multicast route.
+ */
+ if (rt == NULL) {
+ rtfree(inp->inp_route.ro_rt);
+ inp->inp_route.ro_rt = NULL;
+ }
+ /*
+ * If this is a connected socket and the destination
+ * route is unicast, update outif with that of the route
+ * interface index used by IP.
+ */
+ if (rt != NULL &&
+ (outif = rt->rt_ifp->if_index) != inp->inp_last_outif)
+ inp->inp_last_outif = outif;
+ }
+
+ return (error);
}
-extern int
-load_ipfw()
+#if IPFIREWALL
+int
+load_ipfw(void)
{
kern_return_t err;
return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
}
+#endif /* IPFIREWALL */
/*
* Raw IP socket option processing.
error = sooptcopyout(sopt, &optval, sizeof optval);
break;
+#if IPFIREWALL
case IP_FW_ADD:
case IP_FW_GET:
case IP_OLD_FW_ADD:
else
error = ENOPROTOOPT;
break;
+#endif /* IPFIREWALL */
#if DUMMYNET
case IP_DUMMYNET_GET:
break ;
#endif /* DUMMYNET */
+#if MROUTING
case MRT_INIT:
case MRT_DONE:
case MRT_ADD_VIF:
case MRT_ASSERT:
error = ip_mrouter_get(so, sopt);
break;
+#endif /* MROUTING */
default:
error = ip_ctloutput(so, sopt);
break;
+#if IPFIREWALL
case IP_FW_ADD:
case IP_FW_DEL:
case IP_FW_FLUSH:
else
error = ENOPROTOOPT;
break;
+#endif /* IPFIREWALL */
#if DUMMYNET
case IP_DUMMYNET_CONFIGURE:
break ;
#endif
+#if MROUTING
case IP_RSVP_ON:
error = ip_rsvp_init(so);
break;
case IP_RSVP_VIF_OFF:
error = ip_rsvp_vif_done(so, sopt);
break;
-
+
case MRT_INIT:
case MRT_DONE:
case MRT_ADD_VIF:
case MRT_ASSERT:
error = ip_mrouter_set(so, sopt);
break;
+#endif /* MROUTING */
default:
error = ip_ctloutput(so, sopt);
* interface routes.
*/
void
-rip_ctlinput(cmd, sa, vip)
- int cmd;
- struct sockaddr *sa;
- void *vip;
+rip_ctlinput(
+ int cmd,
+ struct sockaddr *sa,
+ __unused void *vip)
{
struct in_ifaddr *ia;
struct ifnet *ifp;
int err;
- int flags;
+ int flags, done = 0;
switch (cmd) {
case PRC_IFDOWN:
- lck_mtx_lock(rt_mtx);
+ lck_rw_lock_shared(in_ifaddr_rwlock);
for (ia = in_ifaddrhead.tqh_first; ia;
ia = ia->ia_link.tqe_next) {
- if (ia->ia_ifa.ifa_addr == sa
- && (ia->ia_flags & IFA_ROUTE)) {
+ IFA_LOCK(&ia->ia_ifa);
+ if (ia->ia_ifa.ifa_addr == sa &&
+ (ia->ia_flags & IFA_ROUTE)) {
+ done = 1;
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
+ lck_mtx_lock(rnh_lock);
/*
* in_ifscrub kills the interface route.
*/
* a routing process they will come back.
*/
in_ifadown(&ia->ia_ifa, 1);
+ lck_mtx_unlock(rnh_lock);
+ IFA_REMREF(&ia->ia_ifa);
break;
}
+ IFA_UNLOCK(&ia->ia_ifa);
}
- lck_mtx_unlock(rt_mtx);
+ if (!done)
+ lck_rw_done(in_ifaddr_rwlock);
break;
case PRC_IFUP:
- lck_mtx_lock(rt_mtx);
+ lck_rw_lock_shared(in_ifaddr_rwlock);
for (ia = in_ifaddrhead.tqh_first; ia;
ia = ia->ia_link.tqe_next) {
- if (ia->ia_ifa.ifa_addr == sa)
+ IFA_LOCK(&ia->ia_ifa);
+ if (ia->ia_ifa.ifa_addr == sa) {
+ /* keep it locked */
break;
+ }
+ IFA_UNLOCK(&ia->ia_ifa);
}
- if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) {
- lck_mtx_unlock(rt_mtx);
+ if (ia == NULL || (ia->ia_flags & IFA_ROUTE) ||
+ (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) {
+ if (ia != NULL)
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
return;
}
+ IFA_ADDREF_LOCKED(&ia->ia_ifa);
+ IFA_UNLOCK(&ia->ia_ifa);
+ lck_rw_done(in_ifaddr_rwlock);
+
flags = RTF_UP;
ifp = ia->ia_ifa.ifa_ifp;
|| (ifp->if_flags & IFF_POINTOPOINT))
flags |= RTF_HOST;
- err = rtinit_locked(&ia->ia_ifa, RTM_ADD, flags);
- lck_mtx_unlock(rt_mtx);
- if (err == 0)
+ err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
+ if (err == 0) {
+ IFA_LOCK_SPIN(&ia->ia_ifa);
ia->ia_flags |= IFA_ROUTE;
+ IFA_UNLOCK(&ia->ia_ifa);
+ }
+ IFA_REMREF(&ia->ia_ifa);
break;
}
}
-u_long rip_sendspace = RIPSNDQ;
-u_long rip_recvspace = RIPRCVQ;
+u_int32_t rip_sendspace = RIPSNDQ;
+u_int32_t rip_recvspace = RIPRCVQ;
-SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
&rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
-SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
+SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
&rip_recvspace, 0, "Maximum incoming raw IP datagram size");
static int
rip_attach(struct socket *so, int proto, struct proc *p)
{
struct inpcb *inp;
- int error, s;
+ int error;
inp = sotoinpcb(so);
if (inp)
panic("rip_attach");
-#if __APPLE__
if ((so->so_state & SS_PRIV) == 0)
return (EPERM);
-#else
- if (p && (error = suser(p)) != 0)
- return error;
-#endif
error = soreserve(so, rip_sendspace, rip_recvspace);
if (error)
return error;
- s = splnet();
error = in_pcballoc(so, &ripcbinfo, p);
- splx(s);
if (error)
return error;
inp = (struct inpcb *)so->so_pcb;
inp = sotoinpcb(so);
if (inp == 0)
panic("rip_detach");
+#if MROUTING
if (so == ip_mrouter)
ip_mrouter_done();
ip_rsvp_force_done(so);
if (so == ip_rsvpd)
ip_rsvp_done();
+#endif /* MROUTING */
in_pcbdetach(inp);
return 0;
}
}
__private_extern__ int
-rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
+rip_bind(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
{
struct inpcb *inp = sotoinpcb(so);
struct sockaddr_in *addr = (struct sockaddr_in *)nam;
struct ifaddr *ifa = NULL;
+ unsigned int outif = 0;
if (nam->sa_len != sizeof(*addr))
return EINVAL;
return EADDRNOTAVAIL;
}
else if (ifa) {
- ifafree(ifa);
- ifa = NULL;
+ IFA_LOCK(ifa);
+ outif = ifa->ifa_ifp->if_index;
+ IFA_UNLOCK(ifa);
+ IFA_REMREF(ifa);
}
inp->inp_laddr = addr->sin_addr;
+ inp->inp_last_outif = outif;
return 0;
}
__private_extern__ int
-rip_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
+rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
{
struct inpcb *inp = sotoinpcb(so);
struct sockaddr_in *addr = (struct sockaddr_in *)nam;
}
__private_extern__ int
-rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
- struct mbuf *control, struct proc *p)
+rip_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *nam,
+ struct mbuf *control, __unused struct proc *p)
{
struct inpcb *inp = sotoinpcb(so);
- register u_long dst;
+ register u_int32_t dst;
if (so->so_state & SS_ISCONNECTED) {
if (nam) {
}
dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
}
- return rip_output(m, so, dst);
+ return rip_output(m, so, dst, control);
}
+/* note: rip_unlock is called from different protos instead of the generic socket_unlock,
+ * it will handle the socket dealloc on last reference
+ * */
int
-rip_unlock(struct socket *so, int refcount, int debug)
+rip_unlock(struct socket *so, int refcount, void *debug)
{
- int lr_saved;
+ void *lr_saved;
struct inpcb *inp = sotoinpcb(so);
-#ifdef __ppc__
- if (debug == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
- else lr_saved = debug;
-#endif
+
+ if (debug == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = debug;
+
if (refcount) {
- if (so->so_usecount <= 0)
- panic("rip_unlock: bad refoucnt so=%x val=%x\n", so, so->so_usecount);
+ if (so->so_usecount <= 0) {
+ panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
+ so, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
so->so_usecount--;
if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
+ /* cleanup after last reference */
lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
lck_rw_lock_exclusive(ripcbinfo.mtx);
+ if (inp->inp_state != INPCB_STATE_DEAD) {
+#if INET6
+ if (INP_CHECK_SOCKAF(so, AF_INET6))
+ in6_pcbdetach(inp);
+ else
+#endif /* INET6 */
+ in_pcbdetach(inp);
+ }
in_pcbdispose(inp);
lck_rw_done(ripcbinfo.mtx);
return(0);
}
}
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
return(0);
}
static int
rip_pcblist SYSCTL_HANDLER_ARGS
{
- int error, i, n, s;
+#pragma unused(oidp, arg1, arg2)
+ int error, i, n;
struct inpcb *inp, **inp_list;
inp_gen_t gencnt;
struct xinpgen xig;
return error;
}
-SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
+SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
+#if !CONFIG_EMBEDDED
+
+static int
+rip_pcblist64 SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error, i, n;
+ struct inpcb *inp, **inp_list;
+ inp_gen_t gencnt;
+ struct xinpgen xig;
+
+ /*
+ * The process of preparing the TCB list is too time-consuming and
+ * resource-intensive to repeat twice on every request.
+ */
+ lck_rw_lock_exclusive(ripcbinfo.mtx);
+ if (req->oldptr == USER_ADDR_NULL) {
+ n = ripcbinfo.ipi_count;
+ req->oldidx = 2 * (sizeof xig)
+ + (n + n/8) * sizeof(struct xinpcb64);
+ lck_rw_done(ripcbinfo.mtx);
+ return 0;
+ }
+
+ if (req->newptr != USER_ADDR_NULL) {
+ lck_rw_done(ripcbinfo.mtx);
+ return EPERM;
+ }
+
+ /*
+ * OK, now we're committed to doing something.
+ */
+ gencnt = ripcbinfo.ipi_gencnt;
+ n = ripcbinfo.ipi_count;
+
+ bzero(&xig, sizeof(xig));
+ xig.xig_len = sizeof xig;
+ xig.xig_count = n;
+ xig.xig_gen = gencnt;
+ xig.xig_sogen = so_gencnt;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ if (error) {
+ lck_rw_done(ripcbinfo.mtx);
+ return error;
+ }
+ /*
+ * We are done if there is no pcb
+ */
+ if (n == 0) {
+ lck_rw_done(ripcbinfo.mtx);
+ return 0;
+ }
+
+ inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
+ if (inp_list == 0) {
+ lck_rw_done(ripcbinfo.mtx);
+ return ENOMEM;
+ }
+
+ for (inp = ripcbinfo.listhead->lh_first, i = 0; inp && i < n;
+ inp = inp->inp_list.le_next) {
+ if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
+ inp_list[i++] = inp;
+ }
+ n = i;
+
+ error = 0;
+ for (i = 0; i < n; i++) {
+ inp = inp_list[i];
+ if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
+ struct xinpcb64 xi;
+
+ bzero(&xi, sizeof(xi));
+ xi.xi_len = sizeof xi;
+ inpcb_to_xinpcb64(inp, &xi);
+ if (inp->inp_socket)
+ sotoxsocket64(inp->inp_socket, &xi.xi_socket);
+ error = SYSCTL_OUT(req, &xi, sizeof xi);
+ }
+ }
+ if (!error) {
+ /*
+ * Give the user an updated idea of our state.
+ * If the generation differs from what we told
+ * her before, she knows that something happened
+ * while we were processing this request, and it
+ * might be necessary to retry.
+ */
+ bzero(&xig, sizeof(xig));
+ xig.xig_len = sizeof xig;
+ xig.xig_gen = ripcbinfo.ipi_gencnt;
+ xig.xig_sogen = so_gencnt;
+ xig.xig_count = ripcbinfo.ipi_count;
+ error = SYSCTL_OUT(req, &xig, sizeof xig);
+ }
+ FREE(inp_list, M_TEMP);
+ lck_rw_done(ripcbinfo.mtx);
+ return error;
+}
+
+SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
+
+#endif /* !CONFIG_EMBEDDED */
+
+
+static int
+rip_pcblist_n SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error = 0;
+
+ error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo);
+
+ return error;
+}
+
+SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n, CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
+ rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets");
+
struct pr_usrreqs rip_usrreqs = {
rip_abort, pru_accept_notsupp, rip_attach, rip_bind, rip_connect,
pru_connect2_notsupp, in_control, rip_detach, rip_disconnect,
pru_rcvoob_notsupp, rip_send, pru_sense_null, rip_shutdown,
in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
};
+/* DSEP Review Done pl-20051213-v02 @3253 */