2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1982, 1986, 1988, 1993
25 * The Regents of the University of California. All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/malloc.h>
64 #include <sys/domain.h>
65 #include <sys/protosw.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/sysctl.h>
71 #include <vm/vm_zone.h>
75 #include <net/route.h>
78 #include <netinet/in.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/ip.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_var.h>
83 #include <netinet/ip_var.h>
84 #include <netinet/ip_mroute.h>
86 #include <netinet/ip_fw.h>
89 #include <netinet6/ipsec.h>
93 #include <netinet/ip_dummynet.h>
97 extern int ipsec_bypass
;
98 extern lck_mtx_t
*sadb_mutex
;
101 extern u_long route_generation
;
102 struct inpcbhead ripcb
;
103 struct inpcbinfo ripcbinfo
;
105 /* control hooks for ipfw and dummynet */
106 ip_fw_ctl_t
*ip_fw_ctl_ptr
;
108 ip_dn_ctl_t
*ip_dn_ctl_ptr
;
109 #endif /* DUMMYNET */
112 * Nominal space allocated to a raw ip socket.
118 * Raw interface to IP protocol.
122 * Initialize raw connection block q.
127 struct inpcbinfo
*pcbinfo
;
130 ripcbinfo
.listhead
= &ripcb
;
132 * XXX We don't use the hash list for raw IP, but it's easier
133 * to allocate a one entry hash list than it is to check all
134 * over the place for hashbase == NULL.
136 ripcbinfo
.hashbase
= hashinit(1, M_PCB
, &ripcbinfo
.hashmask
);
137 ripcbinfo
.porthashbase
= hashinit(1, M_PCB
, &ripcbinfo
.porthashmask
);
139 ripcbinfo
.ipi_zone
= (void *) zinit(sizeof(struct inpcb
),
140 (4096 * sizeof(struct inpcb
)),
143 pcbinfo
= &ripcbinfo
;
145 * allocate lock group attribute and group for udp pcb mutexes
147 pcbinfo
->mtx_grp_attr
= lck_grp_attr_alloc_init();
148 lck_grp_attr_setdefault(pcbinfo
->mtx_grp_attr
);
150 pcbinfo
->mtx_grp
= lck_grp_alloc_init("ripcb", pcbinfo
->mtx_grp_attr
);
153 * allocate the lock attribute for udp pcb mutexes
155 pcbinfo
->mtx_attr
= lck_attr_alloc_init();
156 lck_attr_setdefault(pcbinfo
->mtx_attr
);
158 if ((pcbinfo
->mtx
= lck_rw_alloc_init(pcbinfo
->mtx_grp
, pcbinfo
->mtx_attr
)) == NULL
)
159 return; /* pretty much dead if this fails... */
163 static struct sockaddr_in ripsrc
= { sizeof(ripsrc
), AF_INET
};
165 * Setup generic address and protocol structures
166 * for raw_input routine, then pass them along with
174 register struct ip
*ip
= mtod(m
, struct ip
*);
175 register struct inpcb
*inp
;
176 struct inpcb
*last
= 0;
177 struct mbuf
*opts
= 0;
180 ripsrc
.sin_addr
= ip
->ip_src
;
181 lck_rw_lock_shared(ripcbinfo
.mtx
);
182 LIST_FOREACH(inp
, &ripcb
, inp_list
) {
184 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
187 if (inp
->inp_ip_p
&& (inp
->inp_ip_p
!= ip
->ip_p
))
189 if (inp
->inp_laddr
.s_addr
&&
190 inp
->inp_laddr
.s_addr
!= ip
->ip_dst
.s_addr
)
192 if (inp
->inp_faddr
.s_addr
&&
193 inp
->inp_faddr
.s_addr
!= ip
->ip_src
.s_addr
)
196 struct mbuf
*n
= m_copy(m
, 0, (int)M_COPYALL
);
199 /* check AH/ESP integrity. */
201 if (ipsec_bypass
== 0 && n
) {
202 lck_mtx_lock(sadb_mutex
);
203 if (ipsec4_in_reject_so(n
, last
->inp_socket
)) {
205 ipsecstat
.in_polvio
++;
206 /* do not inject data to pcb */
209 lck_mtx_unlock(sadb_mutex
);
212 if (n
&& skipit
== 0) {
214 if (last
->inp_flags
& INP_CONTROLOPTS
||
215 last
->inp_socket
->so_options
& SO_TIMESTAMP
)
216 ip_savecontrol(last
, &opts
, ip
, n
);
217 if (last
->inp_flags
& INP_STRIPHDR
) {
219 n
->m_pkthdr
.len
-= iphlen
;
222 // ###LOCK need to lock that socket?
223 if (sbappendaddr(&last
->inp_socket
->so_rcv
,
224 (struct sockaddr
*)&ripsrc
, n
,
225 opts
, &error
) != 0) {
226 sorwakeup(last
->inp_socket
);
230 /* should notify about lost packet */
231 kprintf("rip_input can't append to socket\n");
239 lck_rw_done(ripcbinfo
.mtx
);
241 /* check AH/ESP integrity. */
243 if (ipsec_bypass
== 0 && last
) {
244 lck_mtx_lock(sadb_mutex
);
245 if (ipsec4_in_reject_so(m
, last
->inp_socket
)) {
247 ipsecstat
.in_polvio
++;
248 ipstat
.ips_delivered
--;
249 /* do not inject data to pcb */
252 lck_mtx_unlock(sadb_mutex
);
257 if (last
->inp_flags
& INP_CONTROLOPTS
||
258 last
->inp_socket
->so_options
& SO_TIMESTAMP
)
259 ip_savecontrol(last
, &opts
, ip
, m
);
260 if (last
->inp_flags
& INP_STRIPHDR
) {
262 m
->m_pkthdr
.len
-= iphlen
;
265 if (sbappendaddr(&last
->inp_socket
->so_rcv
,
266 (struct sockaddr
*)&ripsrc
, m
, opts
, NULL
) != 0) {
267 sorwakeup(last
->inp_socket
);
269 kprintf("rip_input(2) can't append to socket\n");
273 ipstat
.ips_noproto
++;
274 ipstat
.ips_delivered
--;
280 * Generate IP header and pass packet to ip_output.
281 * Tack on options user may have setup with control call.
284 rip_output(m
, so
, dst
)
285 register struct mbuf
*m
;
289 register struct ip
*ip
;
290 register struct inpcb
*inp
= sotoinpcb(so
);
291 int flags
= (so
->so_options
& SO_DONTROUTE
) | IP_ALLOWBROADCAST
;
294 * If the user handed us a complete IP packet, use it.
295 * Otherwise, allocate an mbuf for a header and fill it in.
297 if ((inp
->inp_flags
& INP_HDRINCL
) == 0) {
298 if (m
->m_pkthdr
.len
+ sizeof(struct ip
) > IP_MAXPACKET
) {
302 M_PREPEND(m
, sizeof(struct ip
), M_WAIT
);
303 ip
= mtod(m
, struct ip
*);
304 ip
->ip_tos
= inp
->inp_ip_tos
;
306 ip
->ip_p
= inp
->inp_ip_p
;
307 ip
->ip_len
= m
->m_pkthdr
.len
;
308 ip
->ip_src
= inp
->inp_laddr
;
309 ip
->ip_dst
.s_addr
= dst
;
310 ip
->ip_ttl
= inp
->inp_ip_ttl
;
312 if (m
->m_pkthdr
.len
> IP_MAXPACKET
) {
316 ip
= mtod(m
, struct ip
*);
317 /* don't allow both user specified and setsockopt options,
318 and don't allow packet length sizes that will crash */
319 if (((IP_VHL_HL(ip
->ip_vhl
) != (sizeof (*ip
) >> 2))
321 || (ip
->ip_len
> m
->m_pkthdr
.len
)
322 || (ip
->ip_len
< (IP_VHL_HL(ip
->ip_vhl
) << 2))) {
328 ip
->ip_id
= ip_randomid();
330 ip
->ip_id
= htons(ip_id
++);
332 /* XXX prevent ip_output from overwriting header fields */
333 flags
|= IP_RAWOUTPUT
;
338 if (ipsec_bypass
== 0 && ipsec_setsocket(m
, so
) != 0) {
344 if (inp
->inp_route
.ro_rt
&& inp
->inp_route
.ro_rt
->generation_id
!= route_generation
) {
345 rtfree(inp
->inp_route
.ro_rt
);
346 inp
->inp_route
.ro_rt
= (struct rtentry
*)0;
349 return (ip_output_list(m
, 0, inp
->inp_options
, &inp
->inp_route
, flags
,
361 if (!DUMMYNET_LOADED
)
363 #endif /* DUMMYNET */
366 return err
== 0 && ip_fw_ctl_ptr
== NULL
? -1 : err
;
370 * Raw IP socket option processing.
373 rip_ctloutput(so
, sopt
)
375 struct sockopt
*sopt
;
377 struct inpcb
*inp
= sotoinpcb(so
);
380 if (sopt
->sopt_level
!= IPPROTO_IP
)
385 switch (sopt
->sopt_dir
) {
387 switch (sopt
->sopt_name
) {
389 optval
= inp
->inp_flags
& INP_HDRINCL
;
390 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
394 optval
= inp
->inp_flags
& INP_STRIPHDR
;
395 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
402 if (ip_fw_ctl_ptr
== 0)
404 if (ip_fw_ctl_ptr
&& error
== 0)
405 error
= ip_fw_ctl_ptr(sopt
);
411 case IP_DUMMYNET_GET
:
413 error
= ip_dn_ctl_ptr(sopt
);
417 #endif /* DUMMYNET */
427 error
= ip_mrouter_get(so
, sopt
);
431 error
= ip_ctloutput(so
, sopt
);
437 switch (sopt
->sopt_name
) {
439 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
444 inp
->inp_flags
|= INP_HDRINCL
;
446 inp
->inp_flags
&= ~INP_HDRINCL
;
450 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
455 inp
->inp_flags
|= INP_STRIPHDR
;
457 inp
->inp_flags
&= ~INP_STRIPHDR
;
468 case IP_OLD_FW_FLUSH
:
470 case IP_OLD_FW_RESETLOG
:
471 if (ip_fw_ctl_ptr
== 0)
473 if (ip_fw_ctl_ptr
&& error
== 0)
474 error
= ip_fw_ctl_ptr(sopt
);
480 case IP_DUMMYNET_CONFIGURE
:
481 case IP_DUMMYNET_DEL
:
482 case IP_DUMMYNET_FLUSH
:
484 error
= ip_dn_ctl_ptr(sopt
);
486 error
= ENOPROTOOPT
;
491 error
= ip_rsvp_init(so
);
495 error
= ip_rsvp_done();
498 /* XXX - should be combined */
500 error
= ip_rsvp_vif_init(so
, sopt
);
503 case IP_RSVP_VIF_OFF
:
504 error
= ip_rsvp_vif_done(so
, sopt
);
515 error
= ip_mrouter_set(so
, sopt
);
519 error
= ip_ctloutput(so
, sopt
);
529 * This function exists solely to receive the PRC_IFDOWN messages which
530 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
531 * and calls in_ifadown() to remove all routes corresponding to that address.
532 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
536 rip_ctlinput(cmd
, sa
, vip
)
541 struct in_ifaddr
*ia
;
548 lck_mtx_lock(rt_mtx
);
549 for (ia
= in_ifaddrhead
.tqh_first
; ia
;
550 ia
= ia
->ia_link
.tqe_next
) {
551 if (ia
->ia_ifa
.ifa_addr
== sa
552 && (ia
->ia_flags
& IFA_ROUTE
)) {
554 * in_ifscrub kills the interface route.
556 in_ifscrub(ia
->ia_ifp
, ia
, 1);
558 * in_ifadown gets rid of all the rest of
559 * the routes. This is not quite the right
560 * thing to do, but at least if we are running
561 * a routing process they will come back.
563 in_ifadown(&ia
->ia_ifa
, 1);
567 lck_mtx_unlock(rt_mtx
);
571 lck_mtx_lock(rt_mtx
);
572 for (ia
= in_ifaddrhead
.tqh_first
; ia
;
573 ia
= ia
->ia_link
.tqe_next
) {
574 if (ia
->ia_ifa
.ifa_addr
== sa
)
577 if (ia
== 0 || (ia
->ia_flags
& IFA_ROUTE
)) {
578 lck_mtx_unlock(rt_mtx
);
582 ifp
= ia
->ia_ifa
.ifa_ifp
;
584 if ((ifp
->if_flags
& IFF_LOOPBACK
)
585 || (ifp
->if_flags
& IFF_POINTOPOINT
))
588 err
= rtinit_locked(&ia
->ia_ifa
, RTM_ADD
, flags
);
589 lck_mtx_unlock(rt_mtx
);
591 ia
->ia_flags
|= IFA_ROUTE
;
596 u_long rip_sendspace
= RIPSNDQ
;
597 u_long rip_recvspace
= RIPRCVQ
;
599 SYSCTL_INT(_net_inet_raw
, OID_AUTO
, maxdgram
, CTLFLAG_RW
,
600 &rip_sendspace
, 0, "Maximum outgoing raw IP datagram size");
601 SYSCTL_INT(_net_inet_raw
, OID_AUTO
, recvspace
, CTLFLAG_RW
,
602 &rip_recvspace
, 0, "Maximum incoming raw IP datagram size");
605 rip_attach(struct socket
*so
, int proto
, struct proc
*p
)
614 if ((so
->so_state
& SS_PRIV
) == 0)
617 if (p
&& (error
= suser(p
)) != 0)
621 error
= soreserve(so
, rip_sendspace
, rip_recvspace
);
625 error
= in_pcballoc(so
, &ripcbinfo
, p
);
629 inp
= (struct inpcb
*)so
->so_pcb
;
630 inp
->inp_vflag
|= INP_IPV4
;
631 inp
->inp_ip_p
= proto
;
632 inp
->inp_ip_ttl
= ip_defttl
;
636 __private_extern__
int
637 rip_detach(struct socket
*so
)
644 if (so
== ip_mrouter
)
646 ip_rsvp_force_done(so
);
653 __private_extern__
int
654 rip_abort(struct socket
*so
)
656 soisdisconnected(so
);
657 return rip_detach(so
);
660 __private_extern__
int
661 rip_disconnect(struct socket
*so
)
663 if ((so
->so_state
& SS_ISCONNECTED
) == 0)
665 return rip_abort(so
);
668 __private_extern__
int
669 rip_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
671 struct inpcb
*inp
= sotoinpcb(so
);
672 struct sockaddr_in
*addr
= (struct sockaddr_in
*)nam
;
673 struct ifaddr
*ifa
= NULL
;
675 if (nam
->sa_len
!= sizeof(*addr
))
678 if (TAILQ_EMPTY(&ifnet_head
) || ((addr
->sin_family
!= AF_INET
) &&
679 (addr
->sin_family
!= AF_IMPLINK
)) ||
680 (addr
->sin_addr
.s_addr
&&
681 (ifa
= ifa_ifwithaddr((struct sockaddr
*)addr
)) == 0)) {
682 return EADDRNOTAVAIL
;
688 inp
->inp_laddr
= addr
->sin_addr
;
692 __private_extern__
int
693 rip_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
695 struct inpcb
*inp
= sotoinpcb(so
);
696 struct sockaddr_in
*addr
= (struct sockaddr_in
*)nam
;
698 if (nam
->sa_len
!= sizeof(*addr
))
700 if (TAILQ_EMPTY(&ifnet_head
))
701 return EADDRNOTAVAIL
;
702 if ((addr
->sin_family
!= AF_INET
) &&
703 (addr
->sin_family
!= AF_IMPLINK
))
705 inp
->inp_faddr
= addr
->sin_addr
;
710 __private_extern__
int
711 rip_shutdown(struct socket
*so
)
717 __private_extern__
int
718 rip_send(struct socket
*so
, int flags
, struct mbuf
*m
, struct sockaddr
*nam
,
719 struct mbuf
*control
, struct proc
*p
)
721 struct inpcb
*inp
= sotoinpcb(so
);
724 if (so
->so_state
& SS_ISCONNECTED
) {
729 dst
= inp
->inp_faddr
.s_addr
;
735 dst
= ((struct sockaddr_in
*)nam
)->sin_addr
.s_addr
;
737 return rip_output(m
, so
, dst
);
741 rip_unlock(struct socket
*so
, int refcount
, int debug
)
744 struct inpcb
*inp
= sotoinpcb(so
);
747 __asm__
volatile("mflr %0" : "=r" (lr_saved
));
749 else lr_saved
= debug
;
752 if (so
->so_usecount
<= 0)
753 panic("rip_unlock: bad refoucnt so=%x val=%x\n", so
, so
->so_usecount
);
755 if (so
->so_usecount
== 0 && (inp
->inp_wantcnt
== WNT_STOPUSING
)) {
756 lck_mtx_unlock(so
->so_proto
->pr_domain
->dom_mtx
);
757 lck_rw_lock_exclusive(ripcbinfo
.mtx
);
759 lck_rw_done(ripcbinfo
.mtx
);
763 lck_mtx_unlock(so
->so_proto
->pr_domain
->dom_mtx
);
768 rip_pcblist SYSCTL_HANDLER_ARGS
771 struct inpcb
*inp
, **inp_list
;
776 * The process of preparing the TCB list is too time-consuming and
777 * resource-intensive to repeat twice on every request.
779 lck_rw_lock_exclusive(ripcbinfo
.mtx
);
780 if (req
->oldptr
== USER_ADDR_NULL
) {
781 n
= ripcbinfo
.ipi_count
;
782 req
->oldidx
= 2 * (sizeof xig
)
783 + (n
+ n
/8) * sizeof(struct xinpcb
);
784 lck_rw_done(ripcbinfo
.mtx
);
788 if (req
->newptr
!= USER_ADDR_NULL
) {
789 lck_rw_done(ripcbinfo
.mtx
);
794 * OK, now we're committed to doing something.
796 gencnt
= ripcbinfo
.ipi_gencnt
;
797 n
= ripcbinfo
.ipi_count
;
799 bzero(&xig
, sizeof(xig
));
800 xig
.xig_len
= sizeof xig
;
802 xig
.xig_gen
= gencnt
;
803 xig
.xig_sogen
= so_gencnt
;
804 error
= SYSCTL_OUT(req
, &xig
, sizeof xig
);
806 lck_rw_done(ripcbinfo
.mtx
);
810 * We are done if there is no pcb
813 lck_rw_done(ripcbinfo
.mtx
);
817 inp_list
= _MALLOC(n
* sizeof *inp_list
, M_TEMP
, M_WAITOK
);
819 lck_rw_done(ripcbinfo
.mtx
);
823 for (inp
= ripcbinfo
.listhead
->lh_first
, i
= 0; inp
&& i
< n
;
824 inp
= inp
->inp_list
.le_next
) {
825 if (inp
->inp_gencnt
<= gencnt
&& inp
->inp_state
!= INPCB_STATE_DEAD
)
831 for (i
= 0; i
< n
; i
++) {
833 if (inp
->inp_gencnt
<= gencnt
&& inp
->inp_state
!= INPCB_STATE_DEAD
) {
836 bzero(&xi
, sizeof(xi
));
837 xi
.xi_len
= sizeof xi
;
838 /* XXX should avoid extra copy */
839 inpcb_to_compat(inp
, &xi
.xi_inp
);
841 sotoxsocket(inp
->inp_socket
, &xi
.xi_socket
);
842 error
= SYSCTL_OUT(req
, &xi
, sizeof xi
);
847 * Give the user an updated idea of our state.
848 * If the generation differs from what we told
849 * her before, she knows that something happened
850 * while we were processing this request, and it
851 * might be necessary to retry.
853 bzero(&xig
, sizeof(xig
));
854 xig
.xig_len
= sizeof xig
;
855 xig
.xig_gen
= ripcbinfo
.ipi_gencnt
;
856 xig
.xig_sogen
= so_gencnt
;
857 xig
.xig_count
= ripcbinfo
.ipi_count
;
858 error
= SYSCTL_OUT(req
, &xig
, sizeof xig
);
860 FREE(inp_list
, M_TEMP
);
861 lck_rw_done(ripcbinfo
.mtx
);
865 SYSCTL_PROC(_net_inet_raw
, OID_AUTO
/*XXX*/, pcblist
, CTLFLAG_RD
, 0, 0,
866 rip_pcblist
, "S,xinpcb", "List of active raw IP sockets");
868 struct pr_usrreqs rip_usrreqs
= {
869 rip_abort
, pru_accept_notsupp
, rip_attach
, rip_bind
, rip_connect
,
870 pru_connect2_notsupp
, in_control
, rip_detach
, rip_disconnect
,
871 pru_listen_notsupp
, in_setpeeraddr
, pru_rcvd_notsupp
,
872 pru_rcvoob_notsupp
, rip_send
, pru_sense_null
, rip_shutdown
,
873 in_setsockaddr
, sosend
, soreceive
, pru_sopoll_notsupp