]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet/raw_ip.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / bsd / netinet / raw_ip.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
61 */
62/*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/kernel.h>
72#include <sys/malloc.h>
73#include <sys/mbuf.h>
74#include <sys/mcache.h>
75#include <sys/proc.h>
76#include <sys/domain.h>
77#include <sys/protosw.h>
78#include <sys/socket.h>
79#include <sys/socketvar.h>
80#include <sys/sysctl.h>
81#include <libkern/OSAtomic.h>
82#include <kern/zalloc.h>
83
84#include <pexpert/pexpert.h>
85
86#include <net/if.h>
87#include <net/net_api_stats.h>
88#include <net/route.h>
89
90#define _IP_VHL
91#include <netinet/in.h>
92#include <netinet/in_systm.h>
93#include <netinet/in_tclass.h>
94#include <netinet/ip.h>
95#include <netinet/in_pcb.h>
96#include <netinet/in_var.h>
97#include <netinet/ip_var.h>
98
99#if INET6
100#include <netinet6/in6_pcb.h>
101#endif /* INET6 */
102
103#include <netinet/ip_fw.h>
104
105#if IPSEC
106#include <netinet6/ipsec.h>
107#endif /*IPSEC*/
108
109#if DUMMYNET
110#include <netinet/ip_dummynet.h>
111#endif
112
113#if CONFIG_MACF_NET
114#include <security/mac_framework.h>
115#endif /* MAC_NET */
116
117int load_ipfw(void);
118int rip_detach(struct socket *);
119int rip_abort(struct socket *);
120int rip_disconnect(struct socket *);
121int rip_bind(struct socket *, struct sockaddr *, struct proc *);
122int rip_connect(struct socket *, struct sockaddr *, struct proc *);
123int rip_shutdown(struct socket *);
124
125struct inpcbhead ripcb;
126struct inpcbinfo ripcbinfo;
127
128/* control hooks for ipfw and dummynet */
129#if IPFIREWALL
130ip_fw_ctl_t *ip_fw_ctl_ptr;
131#endif /* IPFIREWALL */
132#if DUMMYNET
133ip_dn_ctl_t *ip_dn_ctl_ptr;
134#endif /* DUMMYNET */
135
136/*
137 * Nominal space allocated to a raw ip socket.
138 */
139#define RIPSNDQ 8192
140#define RIPRCVQ 8192
141
142/*
143 * Raw interface to IP protocol.
144 */
145
146/*
147 * Initialize raw connection block q.
148 */
149void
150rip_init(struct protosw *pp, struct domain *dp)
151{
152#pragma unused(dp)
153 static int rip_initialized = 0;
154 struct inpcbinfo *pcbinfo;
155
156 VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
157
158 if (rip_initialized)
159 return;
160 rip_initialized = 1;
161
162 LIST_INIT(&ripcb);
163 ripcbinfo.ipi_listhead = &ripcb;
164 /*
165 * XXX We don't use the hash list for raw IP, but it's easier
166 * to allocate a one entry hash list than it is to check all
167 * over the place for ipi_hashbase == NULL.
168 */
169 ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
170 ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);
171
172 ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
173 (4096 * sizeof(struct inpcb)), 4096, "ripzone");
174
175 pcbinfo = &ripcbinfo;
176 /*
177 * allocate lock group attribute and group for udp pcb mutexes
178 */
179 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
180 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);
181
182 /*
183 * allocate the lock attribute for udp pcb mutexes
184 */
185 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
186 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
187 pcbinfo->ipi_lock_attr)) == NULL) {
188 panic("%s: unable to allocate PCB lock\n", __func__);
189 /* NOTREACHED */
190 }
191
192 in_pcbinfo_attach(&ripcbinfo);
193}
194
195static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} };
196/*
197 * Setup generic address and protocol structures
198 * for raw_input routine, then pass them along with
199 * mbuf chain.
200 */
201void
202rip_input(struct mbuf *m, int iphlen)
203{
204 struct ip *ip = mtod(m, struct ip *);
205 struct inpcb *inp;
206 struct inpcb *last = 0;
207 struct mbuf *opts = 0;
208 int skipit = 0, ret = 0;
209 struct ifnet *ifp = m->m_pkthdr.rcvif;
210
211 /* Expect 32-bit aligned data pointer on strict-align platforms */
212 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
213
214 ripsrc.sin_addr = ip->ip_src;
215 lck_rw_lock_shared(ripcbinfo.ipi_lock);
216 LIST_FOREACH(inp, &ripcb, inp_list) {
217#if INET6
218 if ((inp->inp_vflag & INP_IPV4) == 0)
219 continue;
220#endif
221 if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p))
222 continue;
223 if (inp->inp_laddr.s_addr &&
224 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
225 continue;
226 if (inp->inp_faddr.s_addr &&
227 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
228 continue;
229 if (inp_restricted_recv(inp, ifp))
230 continue;
231 if (last) {
232 struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
233
234 skipit = 0;
235
236#if NECP
237 if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
238 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL)) {
239 m_freem(n);
240 /* do not inject data to pcb */
241 skipit = 1;
242 }
243#endif /* NECP */
244#if CONFIG_MACF_NET
245 if (n && skipit == 0) {
246 if (mac_inpcb_check_deliver(last, n, AF_INET,
247 SOCK_RAW) != 0) {
248 m_freem(n);
249 skipit = 1;
250 }
251 }
252#endif
253 if (n && skipit == 0) {
254 int error = 0;
255 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
256 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
257 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
258 ret = ip_savecontrol(last, &opts, ip, n);
259 if (ret != 0) {
260 m_freem(n);
261 m_freem(opts);
262 last = inp;
263 continue;
264 }
265 }
266 if (last->inp_flags & INP_STRIPHDR) {
267 n->m_len -= iphlen;
268 n->m_pkthdr.len -= iphlen;
269 n->m_data += iphlen;
270 }
271 so_recv_data_stat(last->inp_socket, m, 0);
272 if (sbappendaddr(&last->inp_socket->so_rcv,
273 (struct sockaddr *)&ripsrc, n,
274 opts, &error) != 0) {
275 sorwakeup(last->inp_socket);
276 } else {
277 if (error) {
278 /* should notify about lost packet */
279 ipstat.ips_raw_sappend_fail++;
280 }
281 }
282 opts = 0;
283 }
284 }
285 last = inp;
286 }
287
288 skipit = 0;
289#if NECP
290 if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
291 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL)) {
292 m_freem(m);
293 OSAddAtomic(1, &ipstat.ips_delivered);
294 /* do not inject data to pcb */
295 skipit = 1;
296 }
297#endif /* NECP */
298#if CONFIG_MACF_NET
299 if (last && skipit == 0) {
300 if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) {
301 skipit = 1;
302 m_freem(m);
303 }
304 }
305#endif
306 if (skipit == 0) {
307 if (last) {
308 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
309 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
310 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
311 ret = ip_savecontrol(last, &opts, ip, m);
312 if (ret != 0) {
313 m_freem(m);
314 m_freem(opts);
315 goto unlock;
316 }
317 }
318 if (last->inp_flags & INP_STRIPHDR) {
319 m->m_len -= iphlen;
320 m->m_pkthdr.len -= iphlen;
321 m->m_data += iphlen;
322 }
323 so_recv_data_stat(last->inp_socket, m, 0);
324 if (sbappendaddr(&last->inp_socket->so_rcv,
325 (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
326 sorwakeup(last->inp_socket);
327 } else {
328 ipstat.ips_raw_sappend_fail++;
329 }
330 } else {
331 m_freem(m);
332 OSAddAtomic(1, &ipstat.ips_noproto);
333 OSAddAtomic(-1, &ipstat.ips_delivered);
334 }
335 }
336unlock:
337 /*
338 * Keep the list locked because socket filter may force the socket lock
339 * to be released when calling sbappendaddr() -- see rdar://7627704
340 */
341 lck_rw_done(ripcbinfo.ipi_lock);
342}
343
344/*
345 * Generate IP header and pass packet to ip_output.
346 * Tack on options user may have setup with control call.
347 */
348int
349rip_output(
350 struct mbuf *m,
351 struct socket *so,
352 u_int32_t dst,
353 struct mbuf *control)
354{
355 struct ip *ip;
356 struct inpcb *inp = sotoinpcb(so);
357 int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
358 struct ip_out_args ipoa;
359 struct ip_moptions *imo;
360 int error = 0;
361
362 bzero(&ipoa, sizeof(ipoa));
363 ipoa.ipoa_boundif = IFSCOPE_NONE;
364 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
365
366 int sotc = SO_TC_UNSPEC;
367 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
368
369
370 if (control != NULL) {
371 sotc = so_tc_from_control(control, &netsvctype);
372
373 m_freem(control);
374 control = NULL;
375 }
376 if (sotc == SO_TC_UNSPEC) {
377 sotc = so->so_traffic_class;
378 netsvctype = so->so_netsvctype;
379 }
380
381 if (inp == NULL
382#if NECP
383 || (necp_socket_should_use_flow_divert(inp))
384#endif /* NECP */
385 ) {
386 if (m != NULL)
387 m_freem(m);
388 VERIFY(control == NULL);
389 return (inp == NULL ? EINVAL : EPROTOTYPE);
390 }
391
392 flags |= IP_OUTARGS;
393 /* If socket was bound to an ifindex, tell ip_output about it */
394 if (inp->inp_flags & INP_BOUND_IF) {
395 ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
396 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
397 }
398 if (INP_NO_CELLULAR(inp))
399 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
400 if (INP_NO_EXPENSIVE(inp))
401 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
402 if (INP_AWDL_UNRESTRICTED(inp))
403 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
404 ipoa.ipoa_sotc = sotc;
405 ipoa.ipoa_netsvctype = netsvctype;
406
407 if (inp->inp_flowhash == 0)
408 inp->inp_flowhash = inp_calc_flowhash(inp);
409
410 /*
411 * If the user handed us a complete IP packet, use it.
412 * Otherwise, allocate an mbuf for a header and fill it in.
413 */
414 if ((inp->inp_flags & INP_HDRINCL) == 0) {
415 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
416 m_freem(m);
417 return(EMSGSIZE);
418 }
419 M_PREPEND(m, sizeof(struct ip), M_WAIT, 1);
420 if (m == NULL)
421 return ENOBUFS;
422 ip = mtod(m, struct ip *);
423 ip->ip_tos = inp->inp_ip_tos;
424 ip->ip_off = 0;
425 ip->ip_p = inp->inp_ip_p;
426 ip->ip_len = m->m_pkthdr.len;
427 ip->ip_src = inp->inp_laddr;
428 ip->ip_dst.s_addr = dst;
429 ip->ip_ttl = inp->inp_ip_ttl;
430 } else {
431 if (m->m_pkthdr.len > IP_MAXPACKET) {
432 m_freem(m);
433 return(EMSGSIZE);
434 }
435 ip = mtod(m, struct ip *);
436 /* don't allow both user specified and setsockopt options,
437 and don't allow packet length sizes that will crash */
438 if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2))
439 && inp->inp_options)
440 || (ip->ip_len > m->m_pkthdr.len)
441 || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
442 m_freem(m);
443 return EINVAL;
444 }
445 if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))))
446 ip->ip_id = ip_randomid();
447 /* XXX prevent ip_output from overwriting header fields */
448 flags |= IP_RAWOUTPUT;
449 OSAddAtomic(1, &ipstat.ips_rawout);
450 }
451
452 if (inp->inp_laddr.s_addr != INADDR_ANY)
453 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
454
455#if NECP
456 {
457 necp_kernel_policy_id policy_id;
458 u_int32_t route_rule_id;
459
460 /*
461 * We need a route to perform NECP route rule checks
462 */
463 if (net_qos_policy_restricted != 0 &&
464 ROUTE_UNUSABLE(&inp->inp_route)) {
465 struct sockaddr_in to;
466 struct sockaddr_in from;
467 struct in_addr laddr = ip->ip_src;
468
469 ROUTE_RELEASE(&inp->inp_route);
470
471 bzero(&from, sizeof(struct sockaddr_in));
472 from.sin_family = AF_INET;
473 from.sin_len = sizeof(struct sockaddr_in);
474 from.sin_addr = laddr;
475
476 bzero(&to, sizeof(struct sockaddr_in));
477 to.sin_family = AF_INET;
478 to.sin_len = sizeof(struct sockaddr_in);
479 to.sin_addr.s_addr = ip->ip_dst.s_addr;
480
481 if ((error = in_pcbladdr(inp, (struct sockaddr *)&to,
482 &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) {
483 printf("%s in_pcbladdr(%p) error %d\n",
484 __func__, inp, error);
485 m_freem(m);
486 return (error);
487 }
488
489 inp_update_necp_policy(inp, (struct sockaddr *)&from,
490 (struct sockaddr *)&to, ipoa.ipoa_boundif);
491 inp->inp_policyresult.results.qos_marking_gencount = 0;
492 }
493
494 if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0,
495 &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id)) {
496 m_freem(m);
497 return(EHOSTUNREACH);
498 }
499
500 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id);
501
502 if (net_qos_policy_restricted != 0) {
503 struct ifnet *rt_ifp = NULL;
504
505 if (inp->inp_route.ro_rt != NULL)
506 rt_ifp = inp->inp_route.ro_rt->rt_ifp;
507
508 necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt,
509 NULL, route_rule_id);
510 }
511 }
512#endif /* NECP */
513 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED))
514 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
515
516#if IPSEC
517 if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) {
518 m_freem(m);
519 return ENOBUFS;
520 }
521#endif /*IPSEC*/
522
523 if (ROUTE_UNUSABLE(&inp->inp_route))
524 ROUTE_RELEASE(&inp->inp_route);
525
526 set_packet_service_class(m, so, sotc, 0);
527 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
528 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
529 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC |
530 PKTF_FLOW_RAWSOCK);
531 m->m_pkthdr.pkt_proto = inp->inp_ip_p;
532
533#if CONFIG_MACF_NET
534 mac_mbuf_label_associate_inpcb(inp, m);
535#endif
536
537 imo = inp->inp_moptions;
538 if (imo != NULL)
539 IMO_ADDREF(imo);
540 /*
541 * The domain lock is held across ip_output, so it is okay
542 * to pass the PCB cached route pointer directly to IP and
543 * the modules beneath it.
544 */
545 // TODO: PASS DOWN ROUTE RULE ID
546 error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
547 imo, &ipoa);
548
549 if (imo != NULL)
550 IMO_REMREF(imo);
551
552 if (inp->inp_route.ro_rt != NULL) {
553 struct rtentry *rt = inp->inp_route.ro_rt;
554 struct ifnet *outif;
555
556 if ((rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) ||
557 inp->inp_socket == NULL ||
558 !(inp->inp_socket->so_state & SS_ISCONNECTED)) {
559 rt = NULL; /* unusable */
560 }
561 /*
562 * Always discard the cached route for unconnected
563 * socket or if it is a multicast route.
564 */
565 if (rt == NULL)
566 ROUTE_RELEASE(&inp->inp_route);
567
568 /*
569 * If this is a connected socket and the destination
570 * route is unicast, update outif with that of the
571 * route interface used by IP.
572 */
573 if (rt != NULL &&
574 (outif = rt->rt_ifp) != inp->inp_last_outifp) {
575 inp->inp_last_outifp = outif;
576 }
577 } else {
578 ROUTE_RELEASE(&inp->inp_route);
579 }
580
581 /*
582 * If output interface was cellular/expensive, and this socket is
583 * denied access to it, generate an event.
584 */
585 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
586 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp)))
587 soevent(so, (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED));
588
589 return (error);
590}
591
592#if IPFIREWALL
593int
594load_ipfw(void)
595{
596 kern_return_t err;
597
598 ipfw_init();
599
600#if DUMMYNET
601 if (!DUMMYNET_LOADED)
602 ip_dn_init();
603#endif /* DUMMYNET */
604 err = 0;
605
606 return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
607}
608#endif /* IPFIREWALL */
609
610/*
611 * Raw IP socket option processing.
612 */
613int
614rip_ctloutput(struct socket *so, struct sockopt *sopt)
615{
616 struct inpcb *inp = sotoinpcb(so);
617 int error, optval;
618
619 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
620 if (sopt->sopt_level != IPPROTO_IP &&
621 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH))
622 return (EINVAL);
623
624 error = 0;
625
626 switch (sopt->sopt_dir) {
627 case SOPT_GET:
628 switch (sopt->sopt_name) {
629 case IP_HDRINCL:
630 optval = inp->inp_flags & INP_HDRINCL;
631 error = sooptcopyout(sopt, &optval, sizeof optval);
632 break;
633
634 case IP_STRIPHDR:
635 optval = inp->inp_flags & INP_STRIPHDR;
636 error = sooptcopyout(sopt, &optval, sizeof optval);
637 break;
638
639#if IPFIREWALL
640 case IP_FW_ADD:
641 case IP_FW_GET:
642 case IP_OLD_FW_ADD:
643 case IP_OLD_FW_GET:
644 if (ip_fw_ctl_ptr == 0)
645 error = load_ipfw();
646 if (ip_fw_ctl_ptr && error == 0)
647 error = ip_fw_ctl_ptr(sopt);
648 else
649 error = ENOPROTOOPT;
650 break;
651#endif /* IPFIREWALL */
652
653#if DUMMYNET
654 case IP_DUMMYNET_GET:
655 if (!DUMMYNET_LOADED)
656 ip_dn_init();
657 if (DUMMYNET_LOADED)
658 error = ip_dn_ctl_ptr(sopt);
659 else
660 error = ENOPROTOOPT;
661 break ;
662#endif /* DUMMYNET */
663
664 default:
665 error = ip_ctloutput(so, sopt);
666 break;
667 }
668 break;
669
670 case SOPT_SET:
671 switch (sopt->sopt_name) {
672 case IP_HDRINCL:
673 error = sooptcopyin(sopt, &optval, sizeof optval,
674 sizeof optval);
675 if (error)
676 break;
677 if (optval)
678 inp->inp_flags |= INP_HDRINCL;
679 else
680 inp->inp_flags &= ~INP_HDRINCL;
681 break;
682
683 case IP_STRIPHDR:
684 error = sooptcopyin(sopt, &optval, sizeof optval,
685 sizeof optval);
686 if (error)
687 break;
688 if (optval)
689 inp->inp_flags |= INP_STRIPHDR;
690 else
691 inp->inp_flags &= ~INP_STRIPHDR;
692 break;
693
694#if IPFIREWALL
695 case IP_FW_ADD:
696 case IP_FW_DEL:
697 case IP_FW_FLUSH:
698 case IP_FW_ZERO:
699 case IP_FW_RESETLOG:
700 case IP_OLD_FW_ADD:
701 case IP_OLD_FW_DEL:
702 case IP_OLD_FW_FLUSH:
703 case IP_OLD_FW_ZERO:
704 case IP_OLD_FW_RESETLOG:
705 if (ip_fw_ctl_ptr == 0)
706 error = load_ipfw();
707 if (ip_fw_ctl_ptr && error == 0)
708 error = ip_fw_ctl_ptr(sopt);
709 else
710 error = ENOPROTOOPT;
711 break;
712#endif /* IPFIREWALL */
713
714#if DUMMYNET
715 case IP_DUMMYNET_CONFIGURE:
716 case IP_DUMMYNET_DEL:
717 case IP_DUMMYNET_FLUSH:
718 if (!DUMMYNET_LOADED)
719 ip_dn_init();
720 if (DUMMYNET_LOADED)
721 error = ip_dn_ctl_ptr(sopt);
722 else
723 error = ENOPROTOOPT ;
724 break ;
725#endif
726
727 case SO_FLUSH:
728 if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
729 sizeof (optval))) != 0)
730 break;
731
732 error = inp_flush(inp, optval);
733 break;
734
735 default:
736 error = ip_ctloutput(so, sopt);
737 break;
738 }
739 break;
740 }
741
742 return (error);
743}
744
745/*
746 * This function exists solely to receive the PRC_IFDOWN messages which
747 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
748 * and calls in_ifadown() to remove all routes corresponding to that address.
749 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
750 * interface routes.
751 */
752void
753rip_ctlinput(
754 int cmd,
755 struct sockaddr *sa,
756 __unused void *vip,
757 __unused struct ifnet *ifp)
758{
759 struct in_ifaddr *ia = NULL;
760 struct ifnet *iaifp = NULL;
761 int err = 0;
762 int flags, done = 0;
763
764 switch (cmd) {
765 case PRC_IFDOWN:
766 lck_rw_lock_shared(in_ifaddr_rwlock);
767 for (ia = in_ifaddrhead.tqh_first; ia;
768 ia = ia->ia_link.tqe_next) {
769 IFA_LOCK(&ia->ia_ifa);
770 if (ia->ia_ifa.ifa_addr == sa &&
771 (ia->ia_flags & IFA_ROUTE)) {
772 done = 1;
773 IFA_ADDREF_LOCKED(&ia->ia_ifa);
774 IFA_UNLOCK(&ia->ia_ifa);
775 lck_rw_done(in_ifaddr_rwlock);
776 lck_mtx_lock(rnh_lock);
777 /*
778 * in_ifscrub kills the interface route.
779 */
780 in_ifscrub(ia->ia_ifp, ia, 1);
781 /*
782 * in_ifadown gets rid of all the rest of
783 * the routes. This is not quite the right
784 * thing to do, but at least if we are running
785 * a routing process they will come back.
786 */
787 in_ifadown(&ia->ia_ifa, 1);
788 lck_mtx_unlock(rnh_lock);
789 IFA_REMREF(&ia->ia_ifa);
790 break;
791 }
792 IFA_UNLOCK(&ia->ia_ifa);
793 }
794 if (!done)
795 lck_rw_done(in_ifaddr_rwlock);
796 break;
797
798 case PRC_IFUP:
799 lck_rw_lock_shared(in_ifaddr_rwlock);
800 for (ia = in_ifaddrhead.tqh_first; ia;
801 ia = ia->ia_link.tqe_next) {
802 IFA_LOCK(&ia->ia_ifa);
803 if (ia->ia_ifa.ifa_addr == sa) {
804 /* keep it locked */
805 break;
806 }
807 IFA_UNLOCK(&ia->ia_ifa);
808 }
809 if (ia == NULL || (ia->ia_flags & IFA_ROUTE) ||
810 (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) {
811 if (ia != NULL)
812 IFA_UNLOCK(&ia->ia_ifa);
813 lck_rw_done(in_ifaddr_rwlock);
814 return;
815 }
816 IFA_ADDREF_LOCKED(&ia->ia_ifa);
817 IFA_UNLOCK(&ia->ia_ifa);
818 lck_rw_done(in_ifaddr_rwlock);
819
820 flags = RTF_UP;
821 iaifp = ia->ia_ifa.ifa_ifp;
822
823 if ((iaifp->if_flags & IFF_LOOPBACK)
824 || (iaifp->if_flags & IFF_POINTOPOINT))
825 flags |= RTF_HOST;
826
827 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
828 if (err == 0) {
829 IFA_LOCK_SPIN(&ia->ia_ifa);
830 ia->ia_flags |= IFA_ROUTE;
831 IFA_UNLOCK(&ia->ia_ifa);
832 }
833 IFA_REMREF(&ia->ia_ifa);
834 break;
835 }
836}
837
838u_int32_t rip_sendspace = RIPSNDQ;
839u_int32_t rip_recvspace = RIPRCVQ;
840
841SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
842 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
843SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
844 &rip_recvspace, 0, "Maximum incoming raw IP datagram size");
845SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
846 &ripcbinfo.ipi_count, 0, "Number of active PCBs");
847
848static int
849rip_attach(struct socket *so, int proto, struct proc *p)
850{
851 struct inpcb *inp;
852 int error;
853
854 inp = sotoinpcb(so);
855 if (inp)
856 panic("rip_attach");
857 if ((so->so_state & SS_PRIV) == 0)
858 return (EPERM);
859
860 error = soreserve(so, rip_sendspace, rip_recvspace);
861 if (error)
862 return error;
863 error = in_pcballoc(so, &ripcbinfo, p);
864 if (error)
865 return error;
866 inp = (struct inpcb *)so->so_pcb;
867 inp->inp_vflag |= INP_IPV4;
868 inp->inp_ip_p = proto;
869 inp->inp_ip_ttl = ip_defttl;
870 return 0;
871}
872
873__private_extern__ int
874rip_detach(struct socket *so)
875{
876 struct inpcb *inp;
877
878 inp = sotoinpcb(so);
879 if (inp == 0)
880 panic("rip_detach");
881 in_pcbdetach(inp);
882 return 0;
883}
884
885__private_extern__ int
886rip_abort(struct socket *so)
887{
888 soisdisconnected(so);
889 return rip_detach(so);
890}
891
892__private_extern__ int
893rip_disconnect(struct socket *so)
894{
895 if ((so->so_state & SS_ISCONNECTED) == 0)
896 return ENOTCONN;
897 return rip_abort(so);
898}
899
900__private_extern__ int
901rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
902{
903#pragma unused(p)
904 struct inpcb *inp = sotoinpcb(so);
905 struct sockaddr_in sin;
906 struct ifaddr *ifa = NULL;
907 struct ifnet *outif = NULL;
908
909 if (inp == NULL
910#if NECP
911 || (necp_socket_should_use_flow_divert(inp))
912#endif /* NECP */
913 )
914 return (inp == NULL ? EINVAL : EPROTOTYPE);
915
916 if (nam->sa_len != sizeof (struct sockaddr_in))
917 return (EINVAL);
918
919 /* Sanitized local copy for interface address searches */
920 bzero(&sin, sizeof (sin));
921 sin.sin_family = AF_INET;
922 sin.sin_len = sizeof (struct sockaddr_in);
923 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
924
925 if (TAILQ_EMPTY(&ifnet_head) ||
926 (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) ||
927 (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) {
928 return (EADDRNOTAVAIL);
929 } else if (ifa) {
930 /*
931 * Opportunistically determine the outbound
932 * interface that may be used; this may not
933 * hold true if we end up using a route
934 * going over a different interface, e.g.
935 * when sending to a local address. This
936 * will get updated again after sending.
937 */
938 IFA_LOCK(ifa);
939 outif = ifa->ifa_ifp;
940 IFA_UNLOCK(ifa);
941 IFA_REMREF(ifa);
942 }
943 inp->inp_laddr = sin.sin_addr;
944 inp->inp_last_outifp = outif;
945
946 return (0);
947}
948
949__private_extern__ int
950rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
951{
952 struct inpcb *inp = sotoinpcb(so);
953 struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam;
954
955 if (inp == NULL
956#if NECP
957 || (necp_socket_should_use_flow_divert(inp))
958#endif /* NECP */
959 )
960 return (inp == NULL ? EINVAL : EPROTOTYPE);
961 if (nam->sa_len != sizeof(*addr))
962 return EINVAL;
963 if (TAILQ_EMPTY(&ifnet_head))
964 return EADDRNOTAVAIL;
965 if ((addr->sin_family != AF_INET) &&
966 (addr->sin_family != AF_IMPLINK))
967 return EAFNOSUPPORT;
968
969 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
970 so->so_flags1 |= SOF1_CONNECT_COUNTED;
971 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
972 }
973
974 inp->inp_faddr = addr->sin_addr;
975 soisconnected(so);
976
977 return 0;
978}
979
980__private_extern__ int
981rip_shutdown(struct socket *so)
982{
983 socantsendmore(so);
984 return 0;
985}
986
987__private_extern__ int
988rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
989 struct mbuf *control, struct proc *p)
990{
991#pragma unused(flags, p)
992 struct inpcb *inp = sotoinpcb(so);
993 u_int32_t dst;
994 int error = 0;
995
996 if (inp == NULL
997#if NECP
998 || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE))
999#endif /* NECP */
1000 ) {
1001 if (inp == NULL)
1002 error = EINVAL;
1003 else
1004 error = EPROTOTYPE;
1005 goto bad;
1006 }
1007
1008 if (so->so_state & SS_ISCONNECTED) {
1009 if (nam != NULL) {
1010 error = EISCONN;
1011 goto bad;
1012 }
1013 dst = inp->inp_faddr.s_addr;
1014 } else {
1015 if (nam == NULL) {
1016 error = ENOTCONN;
1017 goto bad;
1018 }
1019 dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr;
1020 }
1021 return (rip_output(m, so, dst, control));
1022
1023bad:
1024 VERIFY(error != 0);
1025
1026 if (m != NULL)
1027 m_freem(m);
1028 if (control != NULL)
1029 m_freem(control);
1030
1031 return (error);
1032}
1033
1034/* note: rip_unlock is called from different protos instead of the generic socket_unlock,
1035 * it will handle the socket dealloc on last reference
1036 * */
1037int
1038rip_unlock(struct socket *so, int refcount, void *debug)
1039{
1040 void *lr_saved;
1041 struct inpcb *inp = sotoinpcb(so);
1042
1043 if (debug == NULL)
1044 lr_saved = __builtin_return_address(0);
1045 else
1046 lr_saved = debug;
1047
1048 if (refcount) {
1049 if (so->so_usecount <= 0) {
1050 panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
1051 so, so->so_usecount, solockhistory_nr(so));
1052 /* NOTREACHED */
1053 }
1054 so->so_usecount--;
1055 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
1056 /* cleanup after last reference */
1057 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1058 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1059 if (inp->inp_state != INPCB_STATE_DEAD) {
1060#if INET6
1061 if (SOCK_CHECK_DOM(so, PF_INET6))
1062 in6_pcbdetach(inp);
1063 else
1064#endif /* INET6 */
1065 in_pcbdetach(inp);
1066 }
1067 in_pcbdispose(inp);
1068 lck_rw_done(ripcbinfo.ipi_lock);
1069 return(0);
1070 }
1071 }
1072 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1073 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1074 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1075 return(0);
1076}
1077
1078static int
1079rip_pcblist SYSCTL_HANDLER_ARGS
1080{
1081#pragma unused(oidp, arg1, arg2)
1082 int error, i, n;
1083 struct inpcb *inp, **inp_list;
1084 inp_gen_t gencnt;
1085 struct xinpgen xig;
1086
1087 /*
1088 * The process of preparing the TCB list is too time-consuming and
1089 * resource-intensive to repeat twice on every request.
1090 */
1091 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1092 if (req->oldptr == USER_ADDR_NULL) {
1093 n = ripcbinfo.ipi_count;
1094 req->oldidx = 2 * (sizeof xig)
1095 + (n + n/8) * sizeof(struct xinpcb);
1096 lck_rw_done(ripcbinfo.ipi_lock);
1097 return 0;
1098 }
1099
1100 if (req->newptr != USER_ADDR_NULL) {
1101 lck_rw_done(ripcbinfo.ipi_lock);
1102 return EPERM;
1103 }
1104
1105 /*
1106 * OK, now we're committed to doing something.
1107 */
1108 gencnt = ripcbinfo.ipi_gencnt;
1109 n = ripcbinfo.ipi_count;
1110
1111 bzero(&xig, sizeof(xig));
1112 xig.xig_len = sizeof xig;
1113 xig.xig_count = n;
1114 xig.xig_gen = gencnt;
1115 xig.xig_sogen = so_gencnt;
1116 error = SYSCTL_OUT(req, &xig, sizeof xig);
1117 if (error) {
1118 lck_rw_done(ripcbinfo.ipi_lock);
1119 return error;
1120 }
1121 /*
1122 * We are done if there is no pcb
1123 */
1124 if (n == 0) {
1125 lck_rw_done(ripcbinfo.ipi_lock);
1126 return 0;
1127 }
1128
1129 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1130 if (inp_list == 0) {
1131 lck_rw_done(ripcbinfo.ipi_lock);
1132 return ENOMEM;
1133 }
1134
1135 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1136 inp = inp->inp_list.le_next) {
1137 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1138 inp_list[i++] = inp;
1139 }
1140 n = i;
1141
1142 error = 0;
1143 for (i = 0; i < n; i++) {
1144 inp = inp_list[i];
1145 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1146 struct xinpcb xi;
1147
1148 bzero(&xi, sizeof(xi));
1149 xi.xi_len = sizeof xi;
1150 /* XXX should avoid extra copy */
1151 inpcb_to_compat(inp, &xi.xi_inp);
1152 if (inp->inp_socket)
1153 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1154 error = SYSCTL_OUT(req, &xi, sizeof xi);
1155 }
1156 }
1157 if (!error) {
1158 /*
1159 * Give the user an updated idea of our state.
1160 * If the generation differs from what we told
1161 * her before, she knows that something happened
1162 * while we were processing this request, and it
1163 * might be necessary to retry.
1164 */
1165 bzero(&xig, sizeof(xig));
1166 xig.xig_len = sizeof xig;
1167 xig.xig_gen = ripcbinfo.ipi_gencnt;
1168 xig.xig_sogen = so_gencnt;
1169 xig.xig_count = ripcbinfo.ipi_count;
1170 error = SYSCTL_OUT(req, &xig, sizeof xig);
1171 }
1172 FREE(inp_list, M_TEMP);
1173 lck_rw_done(ripcbinfo.ipi_lock);
1174 return error;
1175}
1176
1177SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1178 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1179 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1180
1181#if !CONFIG_EMBEDDED
1182
1183static int
1184rip_pcblist64 SYSCTL_HANDLER_ARGS
1185{
1186#pragma unused(oidp, arg1, arg2)
1187 int error, i, n;
1188 struct inpcb *inp, **inp_list;
1189 inp_gen_t gencnt;
1190 struct xinpgen xig;
1191
1192 /*
1193 * The process of preparing the TCB list is too time-consuming and
1194 * resource-intensive to repeat twice on every request.
1195 */
1196 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1197 if (req->oldptr == USER_ADDR_NULL) {
1198 n = ripcbinfo.ipi_count;
1199 req->oldidx = 2 * (sizeof xig)
1200 + (n + n/8) * sizeof(struct xinpcb64);
1201 lck_rw_done(ripcbinfo.ipi_lock);
1202 return 0;
1203 }
1204
1205 if (req->newptr != USER_ADDR_NULL) {
1206 lck_rw_done(ripcbinfo.ipi_lock);
1207 return EPERM;
1208 }
1209
1210 /*
1211 * OK, now we're committed to doing something.
1212 */
1213 gencnt = ripcbinfo.ipi_gencnt;
1214 n = ripcbinfo.ipi_count;
1215
1216 bzero(&xig, sizeof(xig));
1217 xig.xig_len = sizeof xig;
1218 xig.xig_count = n;
1219 xig.xig_gen = gencnt;
1220 xig.xig_sogen = so_gencnt;
1221 error = SYSCTL_OUT(req, &xig, sizeof xig);
1222 if (error) {
1223 lck_rw_done(ripcbinfo.ipi_lock);
1224 return error;
1225 }
1226 /*
1227 * We are done if there is no pcb
1228 */
1229 if (n == 0) {
1230 lck_rw_done(ripcbinfo.ipi_lock);
1231 return 0;
1232 }
1233
1234 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1235 if (inp_list == 0) {
1236 lck_rw_done(ripcbinfo.ipi_lock);
1237 return ENOMEM;
1238 }
1239
1240 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1241 inp = inp->inp_list.le_next) {
1242 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1243 inp_list[i++] = inp;
1244 }
1245 n = i;
1246
1247 error = 0;
1248 for (i = 0; i < n; i++) {
1249 inp = inp_list[i];
1250 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1251 struct xinpcb64 xi;
1252
1253 bzero(&xi, sizeof(xi));
1254 xi.xi_len = sizeof xi;
1255 inpcb_to_xinpcb64(inp, &xi);
1256 if (inp->inp_socket)
1257 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1258 error = SYSCTL_OUT(req, &xi, sizeof xi);
1259 }
1260 }
1261 if (!error) {
1262 /*
1263 * Give the user an updated idea of our state.
1264 * If the generation differs from what we told
1265 * her before, she knows that something happened
1266 * while we were processing this request, and it
1267 * might be necessary to retry.
1268 */
1269 bzero(&xig, sizeof(xig));
1270 xig.xig_len = sizeof xig;
1271 xig.xig_gen = ripcbinfo.ipi_gencnt;
1272 xig.xig_sogen = so_gencnt;
1273 xig.xig_count = ripcbinfo.ipi_count;
1274 error = SYSCTL_OUT(req, &xig, sizeof xig);
1275 }
1276 FREE(inp_list, M_TEMP);
1277 lck_rw_done(ripcbinfo.ipi_lock);
1278 return error;
1279}
1280
1281SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64,
1282 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1283 rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
1284
1285#endif /* !CONFIG_EMBEDDED */
1286
1287
1288static int
1289rip_pcblist_n SYSCTL_HANDLER_ARGS
1290{
1291#pragma unused(oidp, arg1, arg2)
1292 int error = 0;
1293
1294 error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo);
1295
1296 return error;
1297}
1298
1299SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n,
1300 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1301 rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets");
1302
1303struct pr_usrreqs rip_usrreqs = {
1304 .pru_abort = rip_abort,
1305 .pru_attach = rip_attach,
1306 .pru_bind = rip_bind,
1307 .pru_connect = rip_connect,
1308 .pru_control = in_control,
1309 .pru_detach = rip_detach,
1310 .pru_disconnect = rip_disconnect,
1311 .pru_peeraddr = in_getpeeraddr,
1312 .pru_send = rip_send,
1313 .pru_shutdown = rip_shutdown,
1314 .pru_sockaddr = in_getsockaddr,
1315 .pru_sosend = sosend,
1316 .pru_soreceive = soreceive,
1317};
1318/* DSEP Review Done pl-20051213-v02 @3253 */