]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/raw_ip.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / bsd / netinet / raw_ip.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/mcache.h>
75 #include <sys/proc.h>
76 #include <sys/domain.h>
77 #include <sys/protosw.h>
78 #include <sys/socket.h>
79 #include <sys/socketvar.h>
80 #include <sys/sysctl.h>
81 #include <libkern/OSAtomic.h>
82 #include <kern/zalloc.h>
83
84 #include <pexpert/pexpert.h>
85
86 #include <net/if.h>
87 #include <net/net_api_stats.h>
88 #include <net/route.h>
89
90 #define _IP_VHL
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/in_tclass.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip_var.h>
98
99 #if INET6
100 #include <netinet6/in6_pcb.h>
101 #endif /* INET6 */
102
103 #include <netinet/ip_fw.h>
104
105 #if IPSEC
106 #include <netinet6/ipsec.h>
107 #endif /*IPSEC*/
108
109 #if DUMMYNET
110 #include <netinet/ip_dummynet.h>
111 #endif
112
113 #if CONFIG_MACF_NET
114 #include <security/mac_framework.h>
115 #endif /* MAC_NET */
116
117 int load_ipfw(void);
118 int rip_detach(struct socket *);
119 int rip_abort(struct socket *);
120 int rip_disconnect(struct socket *);
121 int rip_bind(struct socket *, struct sockaddr *, struct proc *);
122 int rip_connect(struct socket *, struct sockaddr *, struct proc *);
123 int rip_shutdown(struct socket *);
124
125 struct inpcbhead ripcb;
126 struct inpcbinfo ripcbinfo;
127
128 /* control hooks for ipfw and dummynet */
129 #if IPFIREWALL
130 ip_fw_ctl_t *ip_fw_ctl_ptr;
131 #endif /* IPFIREWALL */
132 #if DUMMYNET
133 ip_dn_ctl_t *ip_dn_ctl_ptr;
134 #endif /* DUMMYNET */
135
136 /*
137 * Nominal space allocated to a raw ip socket.
138 */
139 #define RIPSNDQ 8192
140 #define RIPRCVQ 8192
141
142 /*
143 * Raw interface to IP protocol.
144 */
145
146 /*
147 * Initialize raw connection block q.
148 */
149 void
150 rip_init(struct protosw *pp, struct domain *dp)
151 {
152 #pragma unused(dp)
153 static int rip_initialized = 0;
154 struct inpcbinfo *pcbinfo;
155
156 VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
157
158 if (rip_initialized)
159 return;
160 rip_initialized = 1;
161
162 LIST_INIT(&ripcb);
163 ripcbinfo.ipi_listhead = &ripcb;
164 /*
165 * XXX We don't use the hash list for raw IP, but it's easier
166 * to allocate a one entry hash list than it is to check all
167 * over the place for ipi_hashbase == NULL.
168 */
169 ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
170 ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);
171
172 ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
173 (4096 * sizeof(struct inpcb)), 4096, "ripzone");
174
175 pcbinfo = &ripcbinfo;
176 /*
177 * allocate lock group attribute and group for udp pcb mutexes
178 */
179 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
180 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);
181
182 /*
183 * allocate the lock attribute for udp pcb mutexes
184 */
185 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
186 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
187 pcbinfo->ipi_lock_attr)) == NULL) {
188 panic("%s: unable to allocate PCB lock\n", __func__);
189 /* NOTREACHED */
190 }
191
192 in_pcbinfo_attach(&ripcbinfo);
193 }
194
195 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} };
196 /*
197 * Setup generic address and protocol structures
198 * for raw_input routine, then pass them along with
199 * mbuf chain.
200 */
201 void
202 rip_input(struct mbuf *m, int iphlen)
203 {
204 struct ip *ip = mtod(m, struct ip *);
205 struct inpcb *inp;
206 struct inpcb *last = 0;
207 struct mbuf *opts = 0;
208 int skipit = 0, ret = 0;
209 struct ifnet *ifp = m->m_pkthdr.rcvif;
210
211 /* Expect 32-bit aligned data pointer on strict-align platforms */
212 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
213
214 ripsrc.sin_addr = ip->ip_src;
215 lck_rw_lock_shared(ripcbinfo.ipi_lock);
216 LIST_FOREACH(inp, &ripcb, inp_list) {
217 #if INET6
218 if ((inp->inp_vflag & INP_IPV4) == 0)
219 continue;
220 #endif
221 if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p))
222 continue;
223 if (inp->inp_laddr.s_addr &&
224 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
225 continue;
226 if (inp->inp_faddr.s_addr &&
227 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
228 continue;
229 if (inp_restricted_recv(inp, ifp))
230 continue;
231 if (last) {
232 struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
233
234 skipit = 0;
235
236 #if NECP
237 if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
238 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL)) {
239 m_freem(n);
240 /* do not inject data to pcb */
241 skipit = 1;
242 }
243 #endif /* NECP */
244 #if CONFIG_MACF_NET
245 if (n && skipit == 0) {
246 if (mac_inpcb_check_deliver(last, n, AF_INET,
247 SOCK_RAW) != 0) {
248 m_freem(n);
249 skipit = 1;
250 }
251 }
252 #endif
253 if (n && skipit == 0) {
254 int error = 0;
255 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
256 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
257 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
258 ret = ip_savecontrol(last, &opts, ip, n);
259 if (ret != 0) {
260 m_freem(n);
261 m_freem(opts);
262 last = inp;
263 continue;
264 }
265 }
266 if (last->inp_flags & INP_STRIPHDR) {
267 n->m_len -= iphlen;
268 n->m_pkthdr.len -= iphlen;
269 n->m_data += iphlen;
270 }
271 so_recv_data_stat(last->inp_socket, m, 0);
272 if (sbappendaddr(&last->inp_socket->so_rcv,
273 (struct sockaddr *)&ripsrc, n,
274 opts, &error) != 0) {
275 sorwakeup(last->inp_socket);
276 } else {
277 if (error) {
278 /* should notify about lost packet */
279 ipstat.ips_raw_sappend_fail++;
280 }
281 }
282 opts = 0;
283 }
284 }
285 last = inp;
286 }
287
288 skipit = 0;
289 #if NECP
290 if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
291 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL)) {
292 m_freem(m);
293 OSAddAtomic(1, &ipstat.ips_delivered);
294 /* do not inject data to pcb */
295 skipit = 1;
296 }
297 #endif /* NECP */
298 #if CONFIG_MACF_NET
299 if (last && skipit == 0) {
300 if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) {
301 skipit = 1;
302 m_freem(m);
303 }
304 }
305 #endif
306 if (skipit == 0) {
307 if (last) {
308 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
309 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
310 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
311 ret = ip_savecontrol(last, &opts, ip, m);
312 if (ret != 0) {
313 m_freem(m);
314 m_freem(opts);
315 goto unlock;
316 }
317 }
318 if (last->inp_flags & INP_STRIPHDR) {
319 m->m_len -= iphlen;
320 m->m_pkthdr.len -= iphlen;
321 m->m_data += iphlen;
322 }
323 so_recv_data_stat(last->inp_socket, m, 0);
324 if (sbappendaddr(&last->inp_socket->so_rcv,
325 (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
326 sorwakeup(last->inp_socket);
327 } else {
328 ipstat.ips_raw_sappend_fail++;
329 }
330 } else {
331 m_freem(m);
332 OSAddAtomic(1, &ipstat.ips_noproto);
333 OSAddAtomic(-1, &ipstat.ips_delivered);
334 }
335 }
336 unlock:
337 /*
338 * Keep the list locked because socket filter may force the socket lock
339 * to be released when calling sbappendaddr() -- see rdar://7627704
340 */
341 lck_rw_done(ripcbinfo.ipi_lock);
342 }
343
344 /*
345 * Generate IP header and pass packet to ip_output.
346 * Tack on options user may have setup with control call.
347 */
348 int
349 rip_output(
350 struct mbuf *m,
351 struct socket *so,
352 u_int32_t dst,
353 struct mbuf *control)
354 {
355 struct ip *ip;
356 struct inpcb *inp = sotoinpcb(so);
357 int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
358 struct ip_out_args ipoa =
359 { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0, 0, 0 };
360 struct ip_moptions *imo;
361 int error = 0;
362 int sotc = SO_TC_UNSPEC;
363 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
364
365 if (control != NULL) {
366 sotc = so_tc_from_control(control, &netsvctype);
367
368 m_freem(control);
369 control = NULL;
370 }
371 if (sotc == SO_TC_UNSPEC) {
372 sotc = so->so_traffic_class;
373 netsvctype = so->so_netsvctype;
374 }
375
376 if (inp == NULL
377 #if NECP
378 || (necp_socket_should_use_flow_divert(inp))
379 #endif /* NECP */
380 ) {
381 if (m != NULL)
382 m_freem(m);
383 VERIFY(control == NULL);
384 return (inp == NULL ? EINVAL : EPROTOTYPE);
385 }
386
387 flags |= IP_OUTARGS;
388 /* If socket was bound to an ifindex, tell ip_output about it */
389 if (inp->inp_flags & INP_BOUND_IF) {
390 ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
391 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
392 }
393 if (INP_NO_CELLULAR(inp))
394 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
395 if (INP_NO_EXPENSIVE(inp))
396 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
397 if (INP_AWDL_UNRESTRICTED(inp))
398 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
399 ipoa.ipoa_sotc = sotc;
400 ipoa.ipoa_netsvctype = netsvctype;
401
402 if (inp->inp_flowhash == 0)
403 inp->inp_flowhash = inp_calc_flowhash(inp);
404
405 /*
406 * If the user handed us a complete IP packet, use it.
407 * Otherwise, allocate an mbuf for a header and fill it in.
408 */
409 if ((inp->inp_flags & INP_HDRINCL) == 0) {
410 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
411 m_freem(m);
412 return(EMSGSIZE);
413 }
414 M_PREPEND(m, sizeof(struct ip), M_WAIT, 1);
415 if (m == NULL)
416 return ENOBUFS;
417 ip = mtod(m, struct ip *);
418 ip->ip_tos = inp->inp_ip_tos;
419 ip->ip_off = 0;
420 ip->ip_p = inp->inp_ip_p;
421 ip->ip_len = m->m_pkthdr.len;
422 ip->ip_src = inp->inp_laddr;
423 ip->ip_dst.s_addr = dst;
424 ip->ip_ttl = inp->inp_ip_ttl;
425 } else {
426 if (m->m_pkthdr.len > IP_MAXPACKET) {
427 m_freem(m);
428 return(EMSGSIZE);
429 }
430 ip = mtod(m, struct ip *);
431 /* don't allow both user specified and setsockopt options,
432 and don't allow packet length sizes that will crash */
433 if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2))
434 && inp->inp_options)
435 || (ip->ip_len > m->m_pkthdr.len)
436 || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
437 m_freem(m);
438 return EINVAL;
439 }
440 if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))))
441 ip->ip_id = ip_randomid();
442 /* XXX prevent ip_output from overwriting header fields */
443 flags |= IP_RAWOUTPUT;
444 OSAddAtomic(1, &ipstat.ips_rawout);
445 }
446
447 if (inp->inp_laddr.s_addr != INADDR_ANY)
448 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
449
450 #if NECP
451 {
452 necp_kernel_policy_id policy_id;
453 u_int32_t route_rule_id;
454
455 /*
456 * We need a route to perform NECP route rule checks
457 */
458 if (net_qos_policy_restricted != 0 &&
459 ROUTE_UNUSABLE(&inp->inp_route)) {
460 struct sockaddr_in to;
461 struct sockaddr_in from;
462 struct in_addr laddr = ip->ip_src;
463
464 ROUTE_RELEASE(&inp->inp_route);
465
466 bzero(&from, sizeof(struct sockaddr_in));
467 from.sin_family = AF_INET;
468 from.sin_len = sizeof(struct sockaddr_in);
469 from.sin_addr = laddr;
470
471 bzero(&to, sizeof(struct sockaddr_in));
472 to.sin_family = AF_INET;
473 to.sin_len = sizeof(struct sockaddr_in);
474 to.sin_addr.s_addr = ip->ip_dst.s_addr;
475
476 if ((error = in_pcbladdr(inp, (struct sockaddr *)&to,
477 &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) {
478 printf("%s in_pcbladdr(%p) error %d\n",
479 __func__, inp, error);
480 m_freem(m);
481 return (error);
482 }
483
484 inp_update_necp_policy(inp, (struct sockaddr *)&from,
485 (struct sockaddr *)&to, ipoa.ipoa_boundif);
486 inp->inp_policyresult.results.qos_marking_gencount = 0;
487 }
488
489 if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0,
490 &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id)) {
491 m_freem(m);
492 return(EHOSTUNREACH);
493 }
494
495 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id);
496
497 if (net_qos_policy_restricted != 0) {
498 struct ifnet *rt_ifp = NULL;
499
500 if (inp->inp_route.ro_rt != NULL)
501 rt_ifp = inp->inp_route.ro_rt->rt_ifp;
502
503 necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt,
504 NULL, route_rule_id);
505 }
506 }
507 #endif /* NECP */
508 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED))
509 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
510
511 #if IPSEC
512 if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) {
513 m_freem(m);
514 return ENOBUFS;
515 }
516 #endif /*IPSEC*/
517
518 if (ROUTE_UNUSABLE(&inp->inp_route))
519 ROUTE_RELEASE(&inp->inp_route);
520
521 set_packet_service_class(m, so, sotc, 0);
522 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
523 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
524 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC |
525 PKTF_FLOW_RAWSOCK);
526 m->m_pkthdr.pkt_proto = inp->inp_ip_p;
527
528 #if CONFIG_MACF_NET
529 mac_mbuf_label_associate_inpcb(inp, m);
530 #endif
531
532 imo = inp->inp_moptions;
533 if (imo != NULL)
534 IMO_ADDREF(imo);
535 /*
536 * The domain lock is held across ip_output, so it is okay
537 * to pass the PCB cached route pointer directly to IP and
538 * the modules beneath it.
539 */
540 // TODO: PASS DOWN ROUTE RULE ID
541 error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
542 imo, &ipoa);
543
544 if (imo != NULL)
545 IMO_REMREF(imo);
546
547 if (inp->inp_route.ro_rt != NULL) {
548 struct rtentry *rt = inp->inp_route.ro_rt;
549 struct ifnet *outif;
550
551 if ((rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) ||
552 inp->inp_socket == NULL ||
553 !(inp->inp_socket->so_state & SS_ISCONNECTED)) {
554 rt = NULL; /* unusable */
555 }
556 /*
557 * Always discard the cached route for unconnected
558 * socket or if it is a multicast route.
559 */
560 if (rt == NULL)
561 ROUTE_RELEASE(&inp->inp_route);
562
563 /*
564 * If this is a connected socket and the destination
565 * route is unicast, update outif with that of the
566 * route interface used by IP.
567 */
568 if (rt != NULL &&
569 (outif = rt->rt_ifp) != inp->inp_last_outifp) {
570 inp->inp_last_outifp = outif;
571 }
572 } else {
573 ROUTE_RELEASE(&inp->inp_route);
574 }
575
576 /*
577 * If output interface was cellular/expensive, and this socket is
578 * denied access to it, generate an event.
579 */
580 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
581 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp)))
582 soevent(so, (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED));
583
584 return (error);
585 }
586
587 #if IPFIREWALL
588 int
589 load_ipfw(void)
590 {
591 kern_return_t err;
592
593 ipfw_init();
594
595 #if DUMMYNET
596 if (!DUMMYNET_LOADED)
597 ip_dn_init();
598 #endif /* DUMMYNET */
599 err = 0;
600
601 return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
602 }
603 #endif /* IPFIREWALL */
604
605 /*
606 * Raw IP socket option processing.
607 */
608 int
609 rip_ctloutput(struct socket *so, struct sockopt *sopt)
610 {
611 struct inpcb *inp = sotoinpcb(so);
612 int error, optval;
613
614 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
615 if (sopt->sopt_level != IPPROTO_IP &&
616 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH))
617 return (EINVAL);
618
619 error = 0;
620
621 switch (sopt->sopt_dir) {
622 case SOPT_GET:
623 switch (sopt->sopt_name) {
624 case IP_HDRINCL:
625 optval = inp->inp_flags & INP_HDRINCL;
626 error = sooptcopyout(sopt, &optval, sizeof optval);
627 break;
628
629 case IP_STRIPHDR:
630 optval = inp->inp_flags & INP_STRIPHDR;
631 error = sooptcopyout(sopt, &optval, sizeof optval);
632 break;
633
634 #if IPFIREWALL
635 case IP_FW_ADD:
636 case IP_FW_GET:
637 case IP_OLD_FW_ADD:
638 case IP_OLD_FW_GET:
639 if (ip_fw_ctl_ptr == 0)
640 error = load_ipfw();
641 if (ip_fw_ctl_ptr && error == 0)
642 error = ip_fw_ctl_ptr(sopt);
643 else
644 error = ENOPROTOOPT;
645 break;
646 #endif /* IPFIREWALL */
647
648 #if DUMMYNET
649 case IP_DUMMYNET_GET:
650 if (!DUMMYNET_LOADED)
651 ip_dn_init();
652 if (DUMMYNET_LOADED)
653 error = ip_dn_ctl_ptr(sopt);
654 else
655 error = ENOPROTOOPT;
656 break ;
657 #endif /* DUMMYNET */
658
659 default:
660 error = ip_ctloutput(so, sopt);
661 break;
662 }
663 break;
664
665 case SOPT_SET:
666 switch (sopt->sopt_name) {
667 case IP_HDRINCL:
668 error = sooptcopyin(sopt, &optval, sizeof optval,
669 sizeof optval);
670 if (error)
671 break;
672 if (optval)
673 inp->inp_flags |= INP_HDRINCL;
674 else
675 inp->inp_flags &= ~INP_HDRINCL;
676 break;
677
678 case IP_STRIPHDR:
679 error = sooptcopyin(sopt, &optval, sizeof optval,
680 sizeof optval);
681 if (error)
682 break;
683 if (optval)
684 inp->inp_flags |= INP_STRIPHDR;
685 else
686 inp->inp_flags &= ~INP_STRIPHDR;
687 break;
688
689 #if IPFIREWALL
690 case IP_FW_ADD:
691 case IP_FW_DEL:
692 case IP_FW_FLUSH:
693 case IP_FW_ZERO:
694 case IP_FW_RESETLOG:
695 case IP_OLD_FW_ADD:
696 case IP_OLD_FW_DEL:
697 case IP_OLD_FW_FLUSH:
698 case IP_OLD_FW_ZERO:
699 case IP_OLD_FW_RESETLOG:
700 if (ip_fw_ctl_ptr == 0)
701 error = load_ipfw();
702 if (ip_fw_ctl_ptr && error == 0)
703 error = ip_fw_ctl_ptr(sopt);
704 else
705 error = ENOPROTOOPT;
706 break;
707 #endif /* IPFIREWALL */
708
709 #if DUMMYNET
710 case IP_DUMMYNET_CONFIGURE:
711 case IP_DUMMYNET_DEL:
712 case IP_DUMMYNET_FLUSH:
713 if (!DUMMYNET_LOADED)
714 ip_dn_init();
715 if (DUMMYNET_LOADED)
716 error = ip_dn_ctl_ptr(sopt);
717 else
718 error = ENOPROTOOPT ;
719 break ;
720 #endif
721
722 case SO_FLUSH:
723 if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
724 sizeof (optval))) != 0)
725 break;
726
727 error = inp_flush(inp, optval);
728 break;
729
730 default:
731 error = ip_ctloutput(so, sopt);
732 break;
733 }
734 break;
735 }
736
737 return (error);
738 }
739
740 /*
741 * This function exists solely to receive the PRC_IFDOWN messages which
742 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
743 * and calls in_ifadown() to remove all routes corresponding to that address.
744 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
745 * interface routes.
746 */
747 void
748 rip_ctlinput(
749 int cmd,
750 struct sockaddr *sa,
751 __unused void *vip,
752 __unused struct ifnet *ifp)
753 {
754 struct in_ifaddr *ia = NULL;
755 struct ifnet *iaifp = NULL;
756 int err = 0;
757 int flags, done = 0;
758
759 switch (cmd) {
760 case PRC_IFDOWN:
761 lck_rw_lock_shared(in_ifaddr_rwlock);
762 for (ia = in_ifaddrhead.tqh_first; ia;
763 ia = ia->ia_link.tqe_next) {
764 IFA_LOCK(&ia->ia_ifa);
765 if (ia->ia_ifa.ifa_addr == sa &&
766 (ia->ia_flags & IFA_ROUTE)) {
767 done = 1;
768 IFA_ADDREF_LOCKED(&ia->ia_ifa);
769 IFA_UNLOCK(&ia->ia_ifa);
770 lck_rw_done(in_ifaddr_rwlock);
771 lck_mtx_lock(rnh_lock);
772 /*
773 * in_ifscrub kills the interface route.
774 */
775 in_ifscrub(ia->ia_ifp, ia, 1);
776 /*
777 * in_ifadown gets rid of all the rest of
778 * the routes. This is not quite the right
779 * thing to do, but at least if we are running
780 * a routing process they will come back.
781 */
782 in_ifadown(&ia->ia_ifa, 1);
783 lck_mtx_unlock(rnh_lock);
784 IFA_REMREF(&ia->ia_ifa);
785 break;
786 }
787 IFA_UNLOCK(&ia->ia_ifa);
788 }
789 if (!done)
790 lck_rw_done(in_ifaddr_rwlock);
791 break;
792
793 case PRC_IFUP:
794 lck_rw_lock_shared(in_ifaddr_rwlock);
795 for (ia = in_ifaddrhead.tqh_first; ia;
796 ia = ia->ia_link.tqe_next) {
797 IFA_LOCK(&ia->ia_ifa);
798 if (ia->ia_ifa.ifa_addr == sa) {
799 /* keep it locked */
800 break;
801 }
802 IFA_UNLOCK(&ia->ia_ifa);
803 }
804 if (ia == NULL || (ia->ia_flags & IFA_ROUTE) ||
805 (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) {
806 if (ia != NULL)
807 IFA_UNLOCK(&ia->ia_ifa);
808 lck_rw_done(in_ifaddr_rwlock);
809 return;
810 }
811 IFA_ADDREF_LOCKED(&ia->ia_ifa);
812 IFA_UNLOCK(&ia->ia_ifa);
813 lck_rw_done(in_ifaddr_rwlock);
814
815 flags = RTF_UP;
816 iaifp = ia->ia_ifa.ifa_ifp;
817
818 if ((iaifp->if_flags & IFF_LOOPBACK)
819 || (iaifp->if_flags & IFF_POINTOPOINT))
820 flags |= RTF_HOST;
821
822 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
823 if (err == 0) {
824 IFA_LOCK_SPIN(&ia->ia_ifa);
825 ia->ia_flags |= IFA_ROUTE;
826 IFA_UNLOCK(&ia->ia_ifa);
827 }
828 IFA_REMREF(&ia->ia_ifa);
829 break;
830 }
831 }
832
833 u_int32_t rip_sendspace = RIPSNDQ;
834 u_int32_t rip_recvspace = RIPRCVQ;
835
836 SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
837 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
838 SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
839 &rip_recvspace, 0, "Maximum incoming raw IP datagram size");
840 SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
841 &ripcbinfo.ipi_count, 0, "Number of active PCBs");
842
843 static int
844 rip_attach(struct socket *so, int proto, struct proc *p)
845 {
846 struct inpcb *inp;
847 int error;
848
849 inp = sotoinpcb(so);
850 if (inp)
851 panic("rip_attach");
852 if ((so->so_state & SS_PRIV) == 0)
853 return (EPERM);
854
855 error = soreserve(so, rip_sendspace, rip_recvspace);
856 if (error)
857 return error;
858 error = in_pcballoc(so, &ripcbinfo, p);
859 if (error)
860 return error;
861 inp = (struct inpcb *)so->so_pcb;
862 inp->inp_vflag |= INP_IPV4;
863 inp->inp_ip_p = proto;
864 inp->inp_ip_ttl = ip_defttl;
865 return 0;
866 }
867
868 __private_extern__ int
869 rip_detach(struct socket *so)
870 {
871 struct inpcb *inp;
872
873 inp = sotoinpcb(so);
874 if (inp == 0)
875 panic("rip_detach");
876 in_pcbdetach(inp);
877 return 0;
878 }
879
880 __private_extern__ int
881 rip_abort(struct socket *so)
882 {
883 soisdisconnected(so);
884 return rip_detach(so);
885 }
886
887 __private_extern__ int
888 rip_disconnect(struct socket *so)
889 {
890 if ((so->so_state & SS_ISCONNECTED) == 0)
891 return ENOTCONN;
892 return rip_abort(so);
893 }
894
895 __private_extern__ int
896 rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
897 {
898 #pragma unused(p)
899 struct inpcb *inp = sotoinpcb(so);
900 struct sockaddr_in sin;
901 struct ifaddr *ifa = NULL;
902 struct ifnet *outif = NULL;
903
904 if (inp == NULL
905 #if NECP
906 || (necp_socket_should_use_flow_divert(inp))
907 #endif /* NECP */
908 )
909 return (inp == NULL ? EINVAL : EPROTOTYPE);
910
911 if (nam->sa_len != sizeof (struct sockaddr_in))
912 return (EINVAL);
913
914 /* Sanitized local copy for interface address searches */
915 bzero(&sin, sizeof (sin));
916 sin.sin_family = AF_INET;
917 sin.sin_len = sizeof (struct sockaddr_in);
918 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
919
920 if (TAILQ_EMPTY(&ifnet_head) ||
921 (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) ||
922 (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) {
923 return (EADDRNOTAVAIL);
924 } else if (ifa) {
925 /*
926 * Opportunistically determine the outbound
927 * interface that may be used; this may not
928 * hold true if we end up using a route
929 * going over a different interface, e.g.
930 * when sending to a local address. This
931 * will get updated again after sending.
932 */
933 IFA_LOCK(ifa);
934 outif = ifa->ifa_ifp;
935 IFA_UNLOCK(ifa);
936 IFA_REMREF(ifa);
937 }
938 inp->inp_laddr = sin.sin_addr;
939 inp->inp_last_outifp = outif;
940
941 return (0);
942 }
943
944 __private_extern__ int
945 rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
946 {
947 struct inpcb *inp = sotoinpcb(so);
948 struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam;
949
950 if (inp == NULL
951 #if NECP
952 || (necp_socket_should_use_flow_divert(inp))
953 #endif /* NECP */
954 )
955 return (inp == NULL ? EINVAL : EPROTOTYPE);
956 if (nam->sa_len != sizeof(*addr))
957 return EINVAL;
958 if (TAILQ_EMPTY(&ifnet_head))
959 return EADDRNOTAVAIL;
960 if ((addr->sin_family != AF_INET) &&
961 (addr->sin_family != AF_IMPLINK))
962 return EAFNOSUPPORT;
963
964 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
965 so->so_flags1 |= SOF1_CONNECT_COUNTED;
966 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
967 }
968
969 inp->inp_faddr = addr->sin_addr;
970 soisconnected(so);
971
972 return 0;
973 }
974
975 __private_extern__ int
976 rip_shutdown(struct socket *so)
977 {
978 socantsendmore(so);
979 return 0;
980 }
981
982 __private_extern__ int
983 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
984 struct mbuf *control, struct proc *p)
985 {
986 #pragma unused(flags, p)
987 struct inpcb *inp = sotoinpcb(so);
988 u_int32_t dst;
989 int error = 0;
990
991 if (inp == NULL
992 #if NECP
993 || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE))
994 #endif /* NECP */
995 ) {
996 if (inp == NULL)
997 error = EINVAL;
998 else
999 error = EPROTOTYPE;
1000 goto bad;
1001 }
1002
1003 if (so->so_state & SS_ISCONNECTED) {
1004 if (nam != NULL) {
1005 error = EISCONN;
1006 goto bad;
1007 }
1008 dst = inp->inp_faddr.s_addr;
1009 } else {
1010 if (nam == NULL) {
1011 error = ENOTCONN;
1012 goto bad;
1013 }
1014 dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr;
1015 }
1016 return (rip_output(m, so, dst, control));
1017
1018 bad:
1019 VERIFY(error != 0);
1020
1021 if (m != NULL)
1022 m_freem(m);
1023 if (control != NULL)
1024 m_freem(control);
1025
1026 return (error);
1027 }
1028
1029 /* note: rip_unlock is called from different protos instead of the generic socket_unlock,
1030 * it will handle the socket dealloc on last reference
1031 * */
1032 int
1033 rip_unlock(struct socket *so, int refcount, void *debug)
1034 {
1035 void *lr_saved;
1036 struct inpcb *inp = sotoinpcb(so);
1037
1038 if (debug == NULL)
1039 lr_saved = __builtin_return_address(0);
1040 else
1041 lr_saved = debug;
1042
1043 if (refcount) {
1044 if (so->so_usecount <= 0) {
1045 panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
1046 so, so->so_usecount, solockhistory_nr(so));
1047 /* NOTREACHED */
1048 }
1049 so->so_usecount--;
1050 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
1051 /* cleanup after last reference */
1052 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1053 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1054 if (inp->inp_state != INPCB_STATE_DEAD) {
1055 #if INET6
1056 if (SOCK_CHECK_DOM(so, PF_INET6))
1057 in6_pcbdetach(inp);
1058 else
1059 #endif /* INET6 */
1060 in_pcbdetach(inp);
1061 }
1062 in_pcbdispose(inp);
1063 lck_rw_done(ripcbinfo.ipi_lock);
1064 return(0);
1065 }
1066 }
1067 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1068 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
1069 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1070 return(0);
1071 }
1072
1073 static int
1074 rip_pcblist SYSCTL_HANDLER_ARGS
1075 {
1076 #pragma unused(oidp, arg1, arg2)
1077 int error, i, n;
1078 struct inpcb *inp, **inp_list;
1079 inp_gen_t gencnt;
1080 struct xinpgen xig;
1081
1082 /*
1083 * The process of preparing the TCB list is too time-consuming and
1084 * resource-intensive to repeat twice on every request.
1085 */
1086 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1087 if (req->oldptr == USER_ADDR_NULL) {
1088 n = ripcbinfo.ipi_count;
1089 req->oldidx = 2 * (sizeof xig)
1090 + (n + n/8) * sizeof(struct xinpcb);
1091 lck_rw_done(ripcbinfo.ipi_lock);
1092 return 0;
1093 }
1094
1095 if (req->newptr != USER_ADDR_NULL) {
1096 lck_rw_done(ripcbinfo.ipi_lock);
1097 return EPERM;
1098 }
1099
1100 /*
1101 * OK, now we're committed to doing something.
1102 */
1103 gencnt = ripcbinfo.ipi_gencnt;
1104 n = ripcbinfo.ipi_count;
1105
1106 bzero(&xig, sizeof(xig));
1107 xig.xig_len = sizeof xig;
1108 xig.xig_count = n;
1109 xig.xig_gen = gencnt;
1110 xig.xig_sogen = so_gencnt;
1111 error = SYSCTL_OUT(req, &xig, sizeof xig);
1112 if (error) {
1113 lck_rw_done(ripcbinfo.ipi_lock);
1114 return error;
1115 }
1116 /*
1117 * We are done if there is no pcb
1118 */
1119 if (n == 0) {
1120 lck_rw_done(ripcbinfo.ipi_lock);
1121 return 0;
1122 }
1123
1124 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1125 if (inp_list == 0) {
1126 lck_rw_done(ripcbinfo.ipi_lock);
1127 return ENOMEM;
1128 }
1129
1130 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1131 inp = inp->inp_list.le_next) {
1132 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1133 inp_list[i++] = inp;
1134 }
1135 n = i;
1136
1137 error = 0;
1138 for (i = 0; i < n; i++) {
1139 inp = inp_list[i];
1140 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1141 struct xinpcb xi;
1142
1143 bzero(&xi, sizeof(xi));
1144 xi.xi_len = sizeof xi;
1145 /* XXX should avoid extra copy */
1146 inpcb_to_compat(inp, &xi.xi_inp);
1147 if (inp->inp_socket)
1148 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1149 error = SYSCTL_OUT(req, &xi, sizeof xi);
1150 }
1151 }
1152 if (!error) {
1153 /*
1154 * Give the user an updated idea of our state.
1155 * If the generation differs from what we told
1156 * her before, she knows that something happened
1157 * while we were processing this request, and it
1158 * might be necessary to retry.
1159 */
1160 bzero(&xig, sizeof(xig));
1161 xig.xig_len = sizeof xig;
1162 xig.xig_gen = ripcbinfo.ipi_gencnt;
1163 xig.xig_sogen = so_gencnt;
1164 xig.xig_count = ripcbinfo.ipi_count;
1165 error = SYSCTL_OUT(req, &xig, sizeof xig);
1166 }
1167 FREE(inp_list, M_TEMP);
1168 lck_rw_done(ripcbinfo.ipi_lock);
1169 return error;
1170 }
1171
1172 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1173 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1174 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1175
1176 #if !CONFIG_EMBEDDED
1177
1178 static int
1179 rip_pcblist64 SYSCTL_HANDLER_ARGS
1180 {
1181 #pragma unused(oidp, arg1, arg2)
1182 int error, i, n;
1183 struct inpcb *inp, **inp_list;
1184 inp_gen_t gencnt;
1185 struct xinpgen xig;
1186
1187 /*
1188 * The process of preparing the TCB list is too time-consuming and
1189 * resource-intensive to repeat twice on every request.
1190 */
1191 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1192 if (req->oldptr == USER_ADDR_NULL) {
1193 n = ripcbinfo.ipi_count;
1194 req->oldidx = 2 * (sizeof xig)
1195 + (n + n/8) * sizeof(struct xinpcb64);
1196 lck_rw_done(ripcbinfo.ipi_lock);
1197 return 0;
1198 }
1199
1200 if (req->newptr != USER_ADDR_NULL) {
1201 lck_rw_done(ripcbinfo.ipi_lock);
1202 return EPERM;
1203 }
1204
1205 /*
1206 * OK, now we're committed to doing something.
1207 */
1208 gencnt = ripcbinfo.ipi_gencnt;
1209 n = ripcbinfo.ipi_count;
1210
1211 bzero(&xig, sizeof(xig));
1212 xig.xig_len = sizeof xig;
1213 xig.xig_count = n;
1214 xig.xig_gen = gencnt;
1215 xig.xig_sogen = so_gencnt;
1216 error = SYSCTL_OUT(req, &xig, sizeof xig);
1217 if (error) {
1218 lck_rw_done(ripcbinfo.ipi_lock);
1219 return error;
1220 }
1221 /*
1222 * We are done if there is no pcb
1223 */
1224 if (n == 0) {
1225 lck_rw_done(ripcbinfo.ipi_lock);
1226 return 0;
1227 }
1228
1229 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1230 if (inp_list == 0) {
1231 lck_rw_done(ripcbinfo.ipi_lock);
1232 return ENOMEM;
1233 }
1234
1235 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1236 inp = inp->inp_list.le_next) {
1237 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1238 inp_list[i++] = inp;
1239 }
1240 n = i;
1241
1242 error = 0;
1243 for (i = 0; i < n; i++) {
1244 inp = inp_list[i];
1245 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1246 struct xinpcb64 xi;
1247
1248 bzero(&xi, sizeof(xi));
1249 xi.xi_len = sizeof xi;
1250 inpcb_to_xinpcb64(inp, &xi);
1251 if (inp->inp_socket)
1252 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1253 error = SYSCTL_OUT(req, &xi, sizeof xi);
1254 }
1255 }
1256 if (!error) {
1257 /*
1258 * Give the user an updated idea of our state.
1259 * If the generation differs from what we told
1260 * her before, she knows that something happened
1261 * while we were processing this request, and it
1262 * might be necessary to retry.
1263 */
1264 bzero(&xig, sizeof(xig));
1265 xig.xig_len = sizeof xig;
1266 xig.xig_gen = ripcbinfo.ipi_gencnt;
1267 xig.xig_sogen = so_gencnt;
1268 xig.xig_count = ripcbinfo.ipi_count;
1269 error = SYSCTL_OUT(req, &xig, sizeof xig);
1270 }
1271 FREE(inp_list, M_TEMP);
1272 lck_rw_done(ripcbinfo.ipi_lock);
1273 return error;
1274 }
1275
1276 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64,
1277 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1278 rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
1279
1280 #endif /* !CONFIG_EMBEDDED */
1281
1282
1283 static int
1284 rip_pcblist_n SYSCTL_HANDLER_ARGS
1285 {
1286 #pragma unused(oidp, arg1, arg2)
1287 int error = 0;
1288
1289 error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo);
1290
1291 return error;
1292 }
1293
1294 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n,
1295 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1296 rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets");
1297
1298 struct pr_usrreqs rip_usrreqs = {
1299 .pru_abort = rip_abort,
1300 .pru_attach = rip_attach,
1301 .pru_bind = rip_bind,
1302 .pru_connect = rip_connect,
1303 .pru_control = in_control,
1304 .pru_detach = rip_detach,
1305 .pru_disconnect = rip_disconnect,
1306 .pru_peeraddr = in_getpeeraddr,
1307 .pru_send = rip_send,
1308 .pru_shutdown = rip_shutdown,
1309 .pru_sockaddr = in_getsockaddr,
1310 .pru_sosend = sosend,
1311 .pru_soreceive = soreceive,
1312 };
1313 /* DSEP Review Done pl-20051213-v02 @3253 */