2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1982, 1986, 1988, 1993
24 * The Regents of the University of California. All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/malloc.h>
63 #include <sys/domain.h>
64 #include <sys/protosw.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/sysctl.h>
70 #include <vm/vm_zone.h>
74 #include <net/route.h>
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip_var.h>
83 #include <netinet/ip_mroute.h>
85 #include <netinet/ip_fw.h>
88 #include <netinet6/ipsec.h>
92 #include <netinet/ip_dummynet.h>
96 extern int ipsec_bypass
;
97 extern lck_mtx_t
*sadb_mutex
;
100 extern u_long route_generation
;
101 struct inpcbhead ripcb
;
102 struct inpcbinfo ripcbinfo
;
104 /* control hooks for ipfw and dummynet */
105 ip_fw_ctl_t
*ip_fw_ctl_ptr
;
107 ip_dn_ctl_t
*ip_dn_ctl_ptr
;
108 #endif /* DUMMYNET */
111 * Nominal space allocated to a raw ip socket.
117 * Raw interface to IP protocol.
121 * Initialize raw connection block q.
126 struct inpcbinfo
*pcbinfo
;
129 ripcbinfo
.listhead
= &ripcb
;
131 * XXX We don't use the hash list for raw IP, but it's easier
132 * to allocate a one entry hash list than it is to check all
133 * over the place for hashbase == NULL.
135 ripcbinfo
.hashbase
= hashinit(1, M_PCB
, &ripcbinfo
.hashmask
);
136 ripcbinfo
.porthashbase
= hashinit(1, M_PCB
, &ripcbinfo
.porthashmask
);
138 ripcbinfo
.ipi_zone
= (void *) zinit(sizeof(struct inpcb
),
139 (4096 * sizeof(struct inpcb
)),
142 pcbinfo
= &ripcbinfo
;
144 * allocate lock group attribute and group for udp pcb mutexes
146 pcbinfo
->mtx_grp_attr
= lck_grp_attr_alloc_init();
148 pcbinfo
->mtx_grp
= lck_grp_alloc_init("ripcb", pcbinfo
->mtx_grp_attr
);
151 * allocate the lock attribute for udp pcb mutexes
153 pcbinfo
->mtx_attr
= lck_attr_alloc_init();
155 if ((pcbinfo
->mtx
= lck_rw_alloc_init(pcbinfo
->mtx_grp
, pcbinfo
->mtx_attr
)) == NULL
)
156 return; /* pretty much dead if this fails... */
160 static struct sockaddr_in ripsrc
= { sizeof(ripsrc
), AF_INET
};
162 * Setup generic address and protocol structures
163 * for raw_input routine, then pass them along with
171 register struct ip
*ip
= mtod(m
, struct ip
*);
172 register struct inpcb
*inp
;
173 struct inpcb
*last
= 0;
174 struct mbuf
*opts
= 0;
177 ripsrc
.sin_addr
= ip
->ip_src
;
178 lck_rw_lock_shared(ripcbinfo
.mtx
);
179 LIST_FOREACH(inp
, &ripcb
, inp_list
) {
181 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
184 if (inp
->inp_ip_p
&& (inp
->inp_ip_p
!= ip
->ip_p
))
186 if (inp
->inp_laddr
.s_addr
&&
187 inp
->inp_laddr
.s_addr
!= ip
->ip_dst
.s_addr
)
189 if (inp
->inp_faddr
.s_addr
&&
190 inp
->inp_faddr
.s_addr
!= ip
->ip_src
.s_addr
)
193 struct mbuf
*n
= m_copy(m
, 0, (int)M_COPYALL
);
196 /* check AH/ESP integrity. */
198 if (ipsec_bypass
== 0 && n
) {
199 lck_mtx_lock(sadb_mutex
);
200 if (ipsec4_in_reject_so(n
, last
->inp_socket
)) {
202 ipsecstat
.in_polvio
++;
203 /* do not inject data to pcb */
206 lck_mtx_unlock(sadb_mutex
);
209 if (n
&& skipit
== 0) {
211 if (last
->inp_flags
& INP_CONTROLOPTS
||
212 last
->inp_socket
->so_options
& SO_TIMESTAMP
)
213 ip_savecontrol(last
, &opts
, ip
, n
);
214 if (last
->inp_flags
& INP_STRIPHDR
) {
216 n
->m_pkthdr
.len
-= iphlen
;
219 // ###LOCK need to lock that socket?
220 if (sbappendaddr(&last
->inp_socket
->so_rcv
,
221 (struct sockaddr
*)&ripsrc
, n
,
222 opts
, &error
) != 0) {
223 sorwakeup(last
->inp_socket
);
227 /* should notify about lost packet */
228 kprintf("rip_input can't append to socket\n");
236 lck_rw_done(ripcbinfo
.mtx
);
238 /* check AH/ESP integrity. */
240 if (ipsec_bypass
== 0 && last
) {
241 lck_mtx_lock(sadb_mutex
);
242 if (ipsec4_in_reject_so(m
, last
->inp_socket
)) {
244 ipsecstat
.in_polvio
++;
245 ipstat
.ips_delivered
--;
246 /* do not inject data to pcb */
249 lck_mtx_unlock(sadb_mutex
);
254 if (last
->inp_flags
& INP_CONTROLOPTS
||
255 last
->inp_socket
->so_options
& SO_TIMESTAMP
)
256 ip_savecontrol(last
, &opts
, ip
, m
);
257 if (last
->inp_flags
& INP_STRIPHDR
) {
259 m
->m_pkthdr
.len
-= iphlen
;
262 if (sbappendaddr(&last
->inp_socket
->so_rcv
,
263 (struct sockaddr
*)&ripsrc
, m
, opts
, NULL
) != 0) {
264 sorwakeup(last
->inp_socket
);
266 kprintf("rip_input(2) can't append to socket\n");
270 ipstat
.ips_noproto
++;
271 ipstat
.ips_delivered
--;
277 * Generate IP header and pass packet to ip_output.
278 * Tack on options user may have setup with control call.
281 rip_output(m
, so
, dst
)
282 register struct mbuf
*m
;
286 register struct ip
*ip
;
287 register struct inpcb
*inp
= sotoinpcb(so
);
288 int flags
= (so
->so_options
& SO_DONTROUTE
) | IP_ALLOWBROADCAST
;
291 * If the user handed us a complete IP packet, use it.
292 * Otherwise, allocate an mbuf for a header and fill it in.
294 if ((inp
->inp_flags
& INP_HDRINCL
) == 0) {
295 if (m
->m_pkthdr
.len
+ sizeof(struct ip
) > IP_MAXPACKET
) {
299 M_PREPEND(m
, sizeof(struct ip
), M_WAIT
);
300 ip
= mtod(m
, struct ip
*);
301 ip
->ip_tos
= inp
->inp_ip_tos
;
303 ip
->ip_p
= inp
->inp_ip_p
;
304 ip
->ip_len
= m
->m_pkthdr
.len
;
305 ip
->ip_src
= inp
->inp_laddr
;
306 ip
->ip_dst
.s_addr
= dst
;
307 ip
->ip_ttl
= inp
->inp_ip_ttl
;
309 if (m
->m_pkthdr
.len
> IP_MAXPACKET
) {
313 ip
= mtod(m
, struct ip
*);
314 /* don't allow both user specified and setsockopt options,
315 and don't allow packet length sizes that will crash */
316 if (((IP_VHL_HL(ip
->ip_vhl
) != (sizeof (*ip
) >> 2))
318 || (ip
->ip_len
> m
->m_pkthdr
.len
)
319 || (ip
->ip_len
< (IP_VHL_HL(ip
->ip_vhl
) << 2))) {
325 ip
->ip_id
= ip_randomid();
327 ip
->ip_id
= htons(ip_id
++);
329 /* XXX prevent ip_output from overwriting header fields */
330 flags
|= IP_RAWOUTPUT
;
335 if (ipsec_bypass
== 0 && ipsec_setsocket(m
, so
) != 0) {
341 if (inp
->inp_route
.ro_rt
&& inp
->inp_route
.ro_rt
->generation_id
!= route_generation
) {
342 rtfree(inp
->inp_route
.ro_rt
);
343 inp
->inp_route
.ro_rt
= (struct rtentry
*)0;
346 return (ip_output_list(m
, 0, inp
->inp_options
, &inp
->inp_route
, flags
,
358 if (!DUMMYNET_LOADED
)
360 #endif /* DUMMYNET */
363 return err
== 0 && ip_fw_ctl_ptr
== NULL
? -1 : err
;
367 * Raw IP socket option processing.
370 rip_ctloutput(so
, sopt
)
372 struct sockopt
*sopt
;
374 struct inpcb
*inp
= sotoinpcb(so
);
377 if (sopt
->sopt_level
!= IPPROTO_IP
)
382 switch (sopt
->sopt_dir
) {
384 switch (sopt
->sopt_name
) {
386 optval
= inp
->inp_flags
& INP_HDRINCL
;
387 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
391 optval
= inp
->inp_flags
& INP_STRIPHDR
;
392 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
399 if (ip_fw_ctl_ptr
== 0)
401 if (ip_fw_ctl_ptr
&& error
== 0)
402 error
= ip_fw_ctl_ptr(sopt
);
408 case IP_DUMMYNET_GET
:
410 error
= ip_dn_ctl_ptr(sopt
);
414 #endif /* DUMMYNET */
424 error
= ip_mrouter_get(so
, sopt
);
428 error
= ip_ctloutput(so
, sopt
);
434 switch (sopt
->sopt_name
) {
436 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
441 inp
->inp_flags
|= INP_HDRINCL
;
443 inp
->inp_flags
&= ~INP_HDRINCL
;
447 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
452 inp
->inp_flags
|= INP_STRIPHDR
;
454 inp
->inp_flags
&= ~INP_STRIPHDR
;
465 case IP_OLD_FW_FLUSH
:
467 case IP_OLD_FW_RESETLOG
:
468 if (ip_fw_ctl_ptr
== 0)
470 if (ip_fw_ctl_ptr
&& error
== 0)
471 error
= ip_fw_ctl_ptr(sopt
);
477 case IP_DUMMYNET_CONFIGURE
:
478 case IP_DUMMYNET_DEL
:
479 case IP_DUMMYNET_FLUSH
:
481 error
= ip_dn_ctl_ptr(sopt
);
483 error
= ENOPROTOOPT
;
488 error
= ip_rsvp_init(so
);
492 error
= ip_rsvp_done();
495 /* XXX - should be combined */
497 error
= ip_rsvp_vif_init(so
, sopt
);
500 case IP_RSVP_VIF_OFF
:
501 error
= ip_rsvp_vif_done(so
, sopt
);
512 error
= ip_mrouter_set(so
, sopt
);
516 error
= ip_ctloutput(so
, sopt
);
526 * This function exists solely to receive the PRC_IFDOWN messages which
527 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
528 * and calls in_ifadown() to remove all routes corresponding to that address.
529 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
533 rip_ctlinput(cmd
, sa
, vip
)
538 struct in_ifaddr
*ia
;
545 lck_mtx_lock(rt_mtx
);
546 for (ia
= in_ifaddrhead
.tqh_first
; ia
;
547 ia
= ia
->ia_link
.tqe_next
) {
548 if (ia
->ia_ifa
.ifa_addr
== sa
549 && (ia
->ia_flags
& IFA_ROUTE
)) {
551 * in_ifscrub kills the interface route.
553 in_ifscrub(ia
->ia_ifp
, ia
, 1);
555 * in_ifadown gets rid of all the rest of
556 * the routes. This is not quite the right
557 * thing to do, but at least if we are running
558 * a routing process they will come back.
560 in_ifadown(&ia
->ia_ifa
, 1);
564 lck_mtx_unlock(rt_mtx
);
568 lck_mtx_lock(rt_mtx
);
569 for (ia
= in_ifaddrhead
.tqh_first
; ia
;
570 ia
= ia
->ia_link
.tqe_next
) {
571 if (ia
->ia_ifa
.ifa_addr
== sa
)
574 if (ia
== 0 || (ia
->ia_flags
& IFA_ROUTE
)) {
575 lck_mtx_unlock(rt_mtx
);
579 ifp
= ia
->ia_ifa
.ifa_ifp
;
581 if ((ifp
->if_flags
& IFF_LOOPBACK
)
582 || (ifp
->if_flags
& IFF_POINTOPOINT
))
585 err
= rtinit_locked(&ia
->ia_ifa
, RTM_ADD
, flags
);
586 lck_mtx_unlock(rt_mtx
);
588 ia
->ia_flags
|= IFA_ROUTE
;
593 u_long rip_sendspace
= RIPSNDQ
;
594 u_long rip_recvspace
= RIPRCVQ
;
596 SYSCTL_INT(_net_inet_raw
, OID_AUTO
, maxdgram
, CTLFLAG_RW
,
597 &rip_sendspace
, 0, "Maximum outgoing raw IP datagram size");
598 SYSCTL_INT(_net_inet_raw
, OID_AUTO
, recvspace
, CTLFLAG_RW
,
599 &rip_recvspace
, 0, "Maximum incoming raw IP datagram size");
602 rip_attach(struct socket
*so
, int proto
, struct proc
*p
)
611 if ((so
->so_state
& SS_PRIV
) == 0)
614 if (p
&& (error
= suser(p
)) != 0)
618 error
= soreserve(so
, rip_sendspace
, rip_recvspace
);
622 error
= in_pcballoc(so
, &ripcbinfo
, p
);
626 inp
= (struct inpcb
*)so
->so_pcb
;
627 inp
->inp_vflag
|= INP_IPV4
;
628 inp
->inp_ip_p
= proto
;
629 inp
->inp_ip_ttl
= ip_defttl
;
633 __private_extern__
int
634 rip_detach(struct socket
*so
)
641 if (so
== ip_mrouter
)
643 ip_rsvp_force_done(so
);
650 __private_extern__
int
651 rip_abort(struct socket
*so
)
653 soisdisconnected(so
);
654 return rip_detach(so
);
657 __private_extern__
int
658 rip_disconnect(struct socket
*so
)
660 if ((so
->so_state
& SS_ISCONNECTED
) == 0)
662 return rip_abort(so
);
665 __private_extern__
int
666 rip_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
668 struct inpcb
*inp
= sotoinpcb(so
);
669 struct sockaddr_in
*addr
= (struct sockaddr_in
*)nam
;
670 struct ifaddr
*ifa
= NULL
;
672 if (nam
->sa_len
!= sizeof(*addr
))
675 if (TAILQ_EMPTY(&ifnet_head
) || ((addr
->sin_family
!= AF_INET
) &&
676 (addr
->sin_family
!= AF_IMPLINK
)) ||
677 (addr
->sin_addr
.s_addr
&&
678 (ifa
= ifa_ifwithaddr((struct sockaddr
*)addr
)) == 0)) {
679 return EADDRNOTAVAIL
;
685 inp
->inp_laddr
= addr
->sin_addr
;
689 __private_extern__
int
690 rip_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
692 struct inpcb
*inp
= sotoinpcb(so
);
693 struct sockaddr_in
*addr
= (struct sockaddr_in
*)nam
;
695 if (nam
->sa_len
!= sizeof(*addr
))
697 if (TAILQ_EMPTY(&ifnet_head
))
698 return EADDRNOTAVAIL
;
699 if ((addr
->sin_family
!= AF_INET
) &&
700 (addr
->sin_family
!= AF_IMPLINK
))
702 inp
->inp_faddr
= addr
->sin_addr
;
707 __private_extern__
int
708 rip_shutdown(struct socket
*so
)
714 __private_extern__
int
715 rip_send(struct socket
*so
, int flags
, struct mbuf
*m
, struct sockaddr
*nam
,
716 struct mbuf
*control
, struct proc
*p
)
718 struct inpcb
*inp
= sotoinpcb(so
);
721 if (so
->so_state
& SS_ISCONNECTED
) {
726 dst
= inp
->inp_faddr
.s_addr
;
732 dst
= ((struct sockaddr_in
*)nam
)->sin_addr
.s_addr
;
734 return rip_output(m
, so
, dst
);
737 /* note: rip_unlock is called from different protos instead of the generic socket_unlock,
738 * it will handle the socket dealloc on last reference
741 rip_unlock(struct socket
*so
, int refcount
, int debug
)
744 struct inpcb
*inp
= sotoinpcb(so
);
747 lr_saved
= (unsigned int) __builtin_return_address(0);
748 else lr_saved
= debug
;
751 if (so
->so_usecount
<= 0)
752 panic("rip_unlock: bad refoucnt so=%x val=%x\n", so
, so
->so_usecount
);
754 if (so
->so_usecount
== 0 && (inp
->inp_wantcnt
== WNT_STOPUSING
)) {
755 /* cleanup after last reference */
756 lck_mtx_unlock(so
->so_proto
->pr_domain
->dom_mtx
);
757 lck_rw_lock_exclusive(ripcbinfo
.mtx
);
759 lck_rw_done(ripcbinfo
.mtx
);
763 so
->unlock_lr
[so
->next_unlock_lr
] = (u_int
*)lr_saved
;
764 so
->next_unlock_lr
= (so
->next_unlock_lr
+1) % SO_LCKDBG_MAX
;
765 lck_mtx_unlock(so
->so_proto
->pr_domain
->dom_mtx
);
770 rip_pcblist SYSCTL_HANDLER_ARGS
773 struct inpcb
*inp
, **inp_list
;
778 * The process of preparing the TCB list is too time-consuming and
779 * resource-intensive to repeat twice on every request.
781 lck_rw_lock_exclusive(ripcbinfo
.mtx
);
782 if (req
->oldptr
== USER_ADDR_NULL
) {
783 n
= ripcbinfo
.ipi_count
;
784 req
->oldidx
= 2 * (sizeof xig
)
785 + (n
+ n
/8) * sizeof(struct xinpcb
);
786 lck_rw_done(ripcbinfo
.mtx
);
790 if (req
->newptr
!= USER_ADDR_NULL
) {
791 lck_rw_done(ripcbinfo
.mtx
);
796 * OK, now we're committed to doing something.
798 gencnt
= ripcbinfo
.ipi_gencnt
;
799 n
= ripcbinfo
.ipi_count
;
801 bzero(&xig
, sizeof(xig
));
802 xig
.xig_len
= sizeof xig
;
804 xig
.xig_gen
= gencnt
;
805 xig
.xig_sogen
= so_gencnt
;
806 error
= SYSCTL_OUT(req
, &xig
, sizeof xig
);
808 lck_rw_done(ripcbinfo
.mtx
);
812 * We are done if there is no pcb
815 lck_rw_done(ripcbinfo
.mtx
);
819 inp_list
= _MALLOC(n
* sizeof *inp_list
, M_TEMP
, M_WAITOK
);
821 lck_rw_done(ripcbinfo
.mtx
);
825 for (inp
= ripcbinfo
.listhead
->lh_first
, i
= 0; inp
&& i
< n
;
826 inp
= inp
->inp_list
.le_next
) {
827 if (inp
->inp_gencnt
<= gencnt
&& inp
->inp_state
!= INPCB_STATE_DEAD
)
833 for (i
= 0; i
< n
; i
++) {
835 if (inp
->inp_gencnt
<= gencnt
&& inp
->inp_state
!= INPCB_STATE_DEAD
) {
838 bzero(&xi
, sizeof(xi
));
839 xi
.xi_len
= sizeof xi
;
840 /* XXX should avoid extra copy */
841 inpcb_to_compat(inp
, &xi
.xi_inp
);
843 sotoxsocket(inp
->inp_socket
, &xi
.xi_socket
);
844 error
= SYSCTL_OUT(req
, &xi
, sizeof xi
);
849 * Give the user an updated idea of our state.
850 * If the generation differs from what we told
851 * her before, she knows that something happened
852 * while we were processing this request, and it
853 * might be necessary to retry.
855 bzero(&xig
, sizeof(xig
));
856 xig
.xig_len
= sizeof xig
;
857 xig
.xig_gen
= ripcbinfo
.ipi_gencnt
;
858 xig
.xig_sogen
= so_gencnt
;
859 xig
.xig_count
= ripcbinfo
.ipi_count
;
860 error
= SYSCTL_OUT(req
, &xig
, sizeof xig
);
862 FREE(inp_list
, M_TEMP
);
863 lck_rw_done(ripcbinfo
.mtx
);
867 SYSCTL_PROC(_net_inet_raw
, OID_AUTO
/*XXX*/, pcblist
, CTLFLAG_RD
, 0, 0,
868 rip_pcblist
, "S,xinpcb", "List of active raw IP sockets");
870 struct pr_usrreqs rip_usrreqs
= {
871 rip_abort
, pru_accept_notsupp
, rip_attach
, rip_bind
, rip_connect
,
872 pru_connect2_notsupp
, in_control
, rip_detach
, rip_disconnect
,
873 pru_listen_notsupp
, in_setpeeraddr
, pru_rcvd_notsupp
,
874 pru_rcvoob_notsupp
, rip_send
, pru_sense_null
, rip_shutdown
,
875 in_setsockaddr
, sosend
, soreceive
, pru_sopoll_notsupp