]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/raw_ip.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / netinet / raw_ip.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/mcache.h>
75 #include <sys/proc.h>
76 #include <sys/domain.h>
77 #include <sys/protosw.h>
78 #include <sys/socket.h>
79 #include <sys/socketvar.h>
80 #include <sys/sysctl.h>
81 #include <libkern/OSAtomic.h>
82 #include <kern/zalloc.h>
83
84 #include <pexpert/pexpert.h>
85
86 #include <net/if.h>
87 #include <net/net_api_stats.h>
88 #include <net/route.h>
89
90 #define _IP_VHL
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/in_tclass.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip_var.h>
98
99 #if INET6
100 #include <netinet6/in6_pcb.h>
101 #endif /* INET6 */
102
103 #include <netinet/ip_fw.h>
104
105 #if IPSEC
106 #include <netinet6/ipsec.h>
107 #endif /*IPSEC*/
108
109 #if DUMMYNET
110 #include <netinet/ip_dummynet.h>
111 #endif
112
113 #if CONFIG_MACF_NET
114 #include <security/mac_framework.h>
115 #endif /* MAC_NET */
116
117 int load_ipfw(void);
118 int rip_detach(struct socket *);
119 int rip_abort(struct socket *);
120 int rip_disconnect(struct socket *);
121 int rip_bind(struct socket *, struct sockaddr *, struct proc *);
122 int rip_connect(struct socket *, struct sockaddr *, struct proc *);
123 int rip_shutdown(struct socket *);
124
125 struct inpcbhead ripcb;
126 struct inpcbinfo ripcbinfo;
127
128 /* control hooks for ipfw and dummynet */
129 #if IPFIREWALL
130 ip_fw_ctl_t *ip_fw_ctl_ptr;
131 #endif /* IPFIREWALL */
132 #if DUMMYNET
133 ip_dn_ctl_t *ip_dn_ctl_ptr;
134 #endif /* DUMMYNET */
135
136 /*
137 * Nominal space allocated to a raw ip socket.
138 */
139 #define RIPSNDQ 8192
140 #define RIPRCVQ 8192
141
142 /*
143 * Raw interface to IP protocol.
144 */
145
146 /*
147 * Initialize raw connection block q.
148 */
149 void
150 rip_init(struct protosw *pp, struct domain *dp)
151 {
152 #pragma unused(dp)
153 static int rip_initialized = 0;
154 struct inpcbinfo *pcbinfo;
155
156 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
157
158 if (rip_initialized) {
159 return;
160 }
161 rip_initialized = 1;
162
163 LIST_INIT(&ripcb);
164 ripcbinfo.ipi_listhead = &ripcb;
165 /*
166 * XXX We don't use the hash list for raw IP, but it's easier
167 * to allocate a one entry hash list than it is to check all
168 * over the place for ipi_hashbase == NULL.
169 */
170 ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
171 ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);
172
173 ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
174 (4096 * sizeof(struct inpcb)), 4096, "ripzone");
175
176 pcbinfo = &ripcbinfo;
177 /*
178 * allocate lock group attribute and group for udp pcb mutexes
179 */
180 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
181 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);
182
183 /*
184 * allocate the lock attribute for udp pcb mutexes
185 */
186 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
187 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
188 pcbinfo->ipi_lock_attr)) == NULL) {
189 panic("%s: unable to allocate PCB lock\n", __func__);
190 /* NOTREACHED */
191 }
192
193 in_pcbinfo_attach(&ripcbinfo);
194 }
195
196 static struct sockaddr_in ripsrc = {
197 .sin_len = sizeof(ripsrc),
198 .sin_family = AF_INET,
199 .sin_port = 0,
200 .sin_addr = { .s_addr = 0 },
201 .sin_zero = {0, 0, 0, 0, 0, 0, 0, 0, }
202 };
203
204 /*
205 * Setup generic address and protocol structures
206 * for raw_input routine, then pass them along with
207 * mbuf chain.
208 */
209 void
210 rip_input(struct mbuf *m, int iphlen)
211 {
212 struct ip *ip = mtod(m, struct ip *);
213 struct inpcb *inp;
214 struct inpcb *last = 0;
215 struct mbuf *opts = 0;
216 int skipit = 0, ret = 0;
217 struct ifnet *ifp = m->m_pkthdr.rcvif;
218
219 /* Expect 32-bit aligned data pointer on strict-align platforms */
220 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
221
222 ripsrc.sin_addr = ip->ip_src;
223 lck_rw_lock_shared(ripcbinfo.ipi_lock);
224 LIST_FOREACH(inp, &ripcb, inp_list) {
225 #if INET6
226 if ((inp->inp_vflag & INP_IPV4) == 0) {
227 continue;
228 }
229 #endif
230 if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p)) {
231 continue;
232 }
233 if (inp->inp_laddr.s_addr &&
234 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) {
235 continue;
236 }
237 if (inp->inp_faddr.s_addr &&
238 inp->inp_faddr.s_addr != ip->ip_src.s_addr) {
239 continue;
240 }
241 if (inp_restricted_recv(inp, ifp)) {
242 continue;
243 }
244 if (last) {
245 struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
246
247 skipit = 0;
248
249 #if NECP
250 if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
251 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
252 m_freem(n);
253 /* do not inject data to pcb */
254 skipit = 1;
255 }
256 #endif /* NECP */
257 #if CONFIG_MACF_NET
258 if (n && skipit == 0) {
259 if (mac_inpcb_check_deliver(last, n, AF_INET,
260 SOCK_RAW) != 0) {
261 m_freem(n);
262 skipit = 1;
263 }
264 }
265 #endif
266 if (n && skipit == 0) {
267 int error = 0;
268 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
269 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
270 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
271 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
272 ret = ip_savecontrol(last, &opts, ip, n);
273 if (ret != 0) {
274 m_freem(n);
275 m_freem(opts);
276 last = inp;
277 continue;
278 }
279 }
280 if (last->inp_flags & INP_STRIPHDR) {
281 n->m_len -= iphlen;
282 n->m_pkthdr.len -= iphlen;
283 n->m_data += iphlen;
284 }
285 so_recv_data_stat(last->inp_socket, m, 0);
286 if (sbappendaddr(&last->inp_socket->so_rcv,
287 (struct sockaddr *)&ripsrc, n,
288 opts, &error) != 0) {
289 sorwakeup(last->inp_socket);
290 } else {
291 if (error) {
292 /* should notify about lost packet */
293 ipstat.ips_raw_sappend_fail++;
294 }
295 }
296 opts = 0;
297 }
298 }
299 last = inp;
300 }
301
302 skipit = 0;
303 #if NECP
304 if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
305 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
306 m_freem(m);
307 OSAddAtomic(1, &ipstat.ips_delivered);
308 /* do not inject data to pcb */
309 skipit = 1;
310 }
311 #endif /* NECP */
312 #if CONFIG_MACF_NET
313 if (last && skipit == 0) {
314 if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) {
315 skipit = 1;
316 m_freem(m);
317 }
318 }
319 #endif
320 if (skipit == 0) {
321 if (last) {
322 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
323 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
324 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
325 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
326 ret = ip_savecontrol(last, &opts, ip, m);
327 if (ret != 0) {
328 m_freem(m);
329 m_freem(opts);
330 goto unlock;
331 }
332 }
333 if (last->inp_flags & INP_STRIPHDR) {
334 m->m_len -= iphlen;
335 m->m_pkthdr.len -= iphlen;
336 m->m_data += iphlen;
337 }
338 so_recv_data_stat(last->inp_socket, m, 0);
339 if (sbappendaddr(&last->inp_socket->so_rcv,
340 (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
341 sorwakeup(last->inp_socket);
342 } else {
343 ipstat.ips_raw_sappend_fail++;
344 }
345 } else {
346 m_freem(m);
347 OSAddAtomic(1, &ipstat.ips_noproto);
348 OSAddAtomic(-1, &ipstat.ips_delivered);
349 }
350 }
351 unlock:
352 /*
353 * Keep the list locked because socket filter may force the socket lock
354 * to be released when calling sbappendaddr() -- see rdar://7627704
355 */
356 lck_rw_done(ripcbinfo.ipi_lock);
357 }
358
359 /*
360 * Generate IP header and pass packet to ip_output.
361 * Tack on options user may have setup with control call.
362 */
363 int
364 rip_output(
365 struct mbuf *m,
366 struct socket *so,
367 u_int32_t dst,
368 struct mbuf *control)
369 {
370 struct ip *ip;
371 struct inpcb *inp = sotoinpcb(so);
372 int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
373 struct ip_out_args ipoa;
374 struct ip_moptions *imo;
375 int error = 0;
376
377 bzero(&ipoa, sizeof(ipoa));
378 ipoa.ipoa_boundif = IFSCOPE_NONE;
379 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
380
381 int sotc = SO_TC_UNSPEC;
382 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
383
384
385 if (control != NULL) {
386 sotc = so_tc_from_control(control, &netsvctype);
387
388 m_freem(control);
389 control = NULL;
390 }
391 if (sotc == SO_TC_UNSPEC) {
392 sotc = so->so_traffic_class;
393 netsvctype = so->so_netsvctype;
394 }
395
396 if (inp == NULL
397 #if NECP
398 || (necp_socket_should_use_flow_divert(inp))
399 #endif /* NECP */
400 ) {
401 if (m != NULL) {
402 m_freem(m);
403 }
404 VERIFY(control == NULL);
405 return inp == NULL ? EINVAL : EPROTOTYPE;
406 }
407
408 flags |= IP_OUTARGS;
409 /* If socket was bound to an ifindex, tell ip_output about it */
410 if (inp->inp_flags & INP_BOUND_IF) {
411 ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
412 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
413 }
414 if (INP_NO_CELLULAR(inp)) {
415 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
416 }
417 if (INP_NO_EXPENSIVE(inp)) {
418 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
419 }
420 if (INP_NO_CONSTRAINED(inp)) {
421 ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED;
422 }
423 if (INP_AWDL_UNRESTRICTED(inp)) {
424 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
425 }
426 ipoa.ipoa_sotc = sotc;
427 ipoa.ipoa_netsvctype = netsvctype;
428
429 if (inp->inp_flowhash == 0) {
430 inp->inp_flowhash = inp_calc_flowhash(inp);
431 }
432
433 /*
434 * If the user handed us a complete IP packet, use it.
435 * Otherwise, allocate an mbuf for a header and fill it in.
436 */
437 if ((inp->inp_flags & INP_HDRINCL) == 0) {
438 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
439 m_freem(m);
440 return EMSGSIZE;
441 }
442 M_PREPEND(m, sizeof(struct ip), M_WAIT, 1);
443 if (m == NULL) {
444 return ENOBUFS;
445 }
446 ip = mtod(m, struct ip *);
447 ip->ip_tos = inp->inp_ip_tos;
448 ip->ip_off = 0;
449 ip->ip_p = inp->inp_ip_p;
450 ip->ip_len = m->m_pkthdr.len;
451 ip->ip_src = inp->inp_laddr;
452 ip->ip_dst.s_addr = dst;
453 ip->ip_ttl = inp->inp_ip_ttl;
454 } else {
455 if (m->m_pkthdr.len > IP_MAXPACKET) {
456 m_freem(m);
457 return EMSGSIZE;
458 }
459 ip = mtod(m, struct ip *);
460 /* don't allow both user specified and setsockopt options,
461 * and don't allow packet length sizes that will crash */
462 if (((IP_VHL_HL(ip->ip_vhl) != (sizeof(*ip) >> 2))
463 && inp->inp_options)
464 || (ip->ip_len > m->m_pkthdr.len)
465 || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
466 m_freem(m);
467 return EINVAL;
468 }
469 if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off)))) {
470 ip->ip_id = ip_randomid();
471 }
472 /* XXX prevent ip_output from overwriting header fields */
473 flags |= IP_RAWOUTPUT;
474 OSAddAtomic(1, &ipstat.ips_rawout);
475 }
476
477 if (inp->inp_laddr.s_addr != INADDR_ANY) {
478 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
479 }
480
481 #if NECP
482 {
483 necp_kernel_policy_id policy_id;
484 necp_kernel_policy_id skip_policy_id;
485 u_int32_t route_rule_id;
486
487 /*
488 * We need a route to perform NECP route rule checks
489 */
490 if (net_qos_policy_restricted != 0 &&
491 ROUTE_UNUSABLE(&inp->inp_route)) {
492 struct sockaddr_in to;
493 struct sockaddr_in from;
494 struct in_addr laddr = ip->ip_src;
495
496 ROUTE_RELEASE(&inp->inp_route);
497
498 bzero(&from, sizeof(struct sockaddr_in));
499 from.sin_family = AF_INET;
500 from.sin_len = sizeof(struct sockaddr_in);
501 from.sin_addr = laddr;
502
503 bzero(&to, sizeof(struct sockaddr_in));
504 to.sin_family = AF_INET;
505 to.sin_len = sizeof(struct sockaddr_in);
506 to.sin_addr.s_addr = ip->ip_dst.s_addr;
507
508 if ((error = in_pcbladdr(inp, (struct sockaddr *)&to,
509 &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) {
510 printf("%s in_pcbladdr(%p) error %d\n",
511 __func__, inp, error);
512 m_freem(m);
513 return error;
514 }
515
516 inp_update_necp_policy(inp, (struct sockaddr *)&from,
517 (struct sockaddr *)&to, ipoa.ipoa_boundif);
518 inp->inp_policyresult.results.qos_marking_gencount = 0;
519 }
520
521 if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0,
522 &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) {
523 m_freem(m);
524 return EHOSTUNREACH;
525 }
526
527 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id);
528
529 if (net_qos_policy_restricted != 0) {
530 struct ifnet *rt_ifp = NULL;
531
532 if (inp->inp_route.ro_rt != NULL) {
533 rt_ifp = inp->inp_route.ro_rt->rt_ifp;
534 }
535
536 necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt,
537 NULL, route_rule_id);
538 }
539 }
540 #endif /* NECP */
541 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
542 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
543 }
544
545 #if IPSEC
546 if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) {
547 m_freem(m);
548 return ENOBUFS;
549 }
550 #endif /*IPSEC*/
551
552 if (ROUTE_UNUSABLE(&inp->inp_route)) {
553 ROUTE_RELEASE(&inp->inp_route);
554 }
555
556 set_packet_service_class(m, so, sotc, 0);
557 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
558 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
559 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC |
560 PKTF_FLOW_RAWSOCK);
561 m->m_pkthdr.pkt_proto = inp->inp_ip_p;
562 m->m_pkthdr.tx_rawip_pid = so->last_pid;
563 m->m_pkthdr.tx_rawip_e_pid = so->e_pid;
564 if (so->so_flags & SOF_DELEGATED) {
565 m->m_pkthdr.tx_rawip_e_pid = so->e_pid;
566 } else {
567 m->m_pkthdr.tx_rawip_e_pid = 0;
568 }
569
570 #if CONFIG_MACF_NET
571 mac_mbuf_label_associate_inpcb(inp, m);
572 #endif
573
574 imo = inp->inp_moptions;
575 if (imo != NULL) {
576 IMO_ADDREF(imo);
577 }
578 /*
579 * The domain lock is held across ip_output, so it is okay
580 * to pass the PCB cached route pointer directly to IP and
581 * the modules beneath it.
582 */
583 // TODO: PASS DOWN ROUTE RULE ID
584 error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
585 imo, &ipoa);
586
587 if (imo != NULL) {
588 IMO_REMREF(imo);
589 }
590
591 if (inp->inp_route.ro_rt != NULL) {
592 struct rtentry *rt = inp->inp_route.ro_rt;
593 struct ifnet *outif;
594
595 if ((rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) ||
596 inp->inp_socket == NULL ||
597 !(inp->inp_socket->so_state & SS_ISCONNECTED)) {
598 rt = NULL; /* unusable */
599 }
600 /*
601 * Always discard the cached route for unconnected
602 * socket or if it is a multicast route.
603 */
604 if (rt == NULL) {
605 ROUTE_RELEASE(&inp->inp_route);
606 }
607
608 /*
609 * If this is a connected socket and the destination
610 * route is unicast, update outif with that of the
611 * route interface used by IP.
612 */
613 if (rt != NULL &&
614 (outif = rt->rt_ifp) != inp->inp_last_outifp) {
615 inp->inp_last_outifp = outif;
616 }
617 } else {
618 ROUTE_RELEASE(&inp->inp_route);
619 }
620
621 /*
622 * If output interface was cellular/expensive/constrained, and this socket is
623 * denied access to it, generate an event.
624 */
625 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
626 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) {
627 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
628 }
629
630 return error;
631 }
632
633 #if IPFIREWALL
634 int
635 load_ipfw(void)
636 {
637 kern_return_t err;
638
639 ipfw_init();
640
641 #if DUMMYNET
642 if (!DUMMYNET_LOADED) {
643 ip_dn_init();
644 }
645 #endif /* DUMMYNET */
646 err = 0;
647
648 return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
649 }
650 #endif /* IPFIREWALL */
651
652 /*
653 * Raw IP socket option processing.
654 */
655 int
656 rip_ctloutput(struct socket *so, struct sockopt *sopt)
657 {
658 struct inpcb *inp = sotoinpcb(so);
659 int error, optval;
660
661 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
662 if (sopt->sopt_level != IPPROTO_IP &&
663 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) {
664 return EINVAL;
665 }
666
667 error = 0;
668
669 switch (sopt->sopt_dir) {
670 case SOPT_GET:
671 switch (sopt->sopt_name) {
672 case IP_HDRINCL:
673 optval = inp->inp_flags & INP_HDRINCL;
674 error = sooptcopyout(sopt, &optval, sizeof optval);
675 break;
676
677 case IP_STRIPHDR:
678 optval = inp->inp_flags & INP_STRIPHDR;
679 error = sooptcopyout(sopt, &optval, sizeof optval);
680 break;
681
682 #if IPFIREWALL
683 case IP_FW_ADD:
684 case IP_FW_GET:
685 case IP_OLD_FW_ADD:
686 case IP_OLD_FW_GET:
687 if (ip_fw_ctl_ptr == 0) {
688 error = load_ipfw();
689 }
690 if (ip_fw_ctl_ptr && error == 0) {
691 error = ip_fw_ctl_ptr(sopt);
692 } else {
693 error = ENOPROTOOPT;
694 }
695 break;
696 #endif /* IPFIREWALL */
697
698 #if DUMMYNET
699 case IP_DUMMYNET_GET:
700 if (!DUMMYNET_LOADED) {
701 ip_dn_init();
702 }
703 if (DUMMYNET_LOADED) {
704 error = ip_dn_ctl_ptr(sopt);
705 } else {
706 error = ENOPROTOOPT;
707 }
708 break;
709 #endif /* DUMMYNET */
710
711 default:
712 error = ip_ctloutput(so, sopt);
713 break;
714 }
715 break;
716
717 case SOPT_SET:
718 switch (sopt->sopt_name) {
719 case IP_HDRINCL:
720 error = sooptcopyin(sopt, &optval, sizeof optval,
721 sizeof optval);
722 if (error) {
723 break;
724 }
725 if (optval) {
726 inp->inp_flags |= INP_HDRINCL;
727 } else {
728 inp->inp_flags &= ~INP_HDRINCL;
729 }
730 break;
731
732 case IP_STRIPHDR:
733 error = sooptcopyin(sopt, &optval, sizeof optval,
734 sizeof optval);
735 if (error) {
736 break;
737 }
738 if (optval) {
739 inp->inp_flags |= INP_STRIPHDR;
740 } else {
741 inp->inp_flags &= ~INP_STRIPHDR;
742 }
743 break;
744
745 #if IPFIREWALL
746 case IP_FW_ADD:
747 case IP_FW_DEL:
748 case IP_FW_FLUSH:
749 case IP_FW_ZERO:
750 case IP_FW_RESETLOG:
751 case IP_OLD_FW_ADD:
752 case IP_OLD_FW_DEL:
753 case IP_OLD_FW_FLUSH:
754 case IP_OLD_FW_ZERO:
755 case IP_OLD_FW_RESETLOG:
756 if (ip_fw_ctl_ptr == 0) {
757 error = load_ipfw();
758 }
759 if (ip_fw_ctl_ptr && error == 0) {
760 error = ip_fw_ctl_ptr(sopt);
761 } else {
762 error = ENOPROTOOPT;
763 }
764 break;
765 #endif /* IPFIREWALL */
766
767 #if DUMMYNET
768 case IP_DUMMYNET_CONFIGURE:
769 case IP_DUMMYNET_DEL:
770 case IP_DUMMYNET_FLUSH:
771 if (!DUMMYNET_LOADED) {
772 ip_dn_init();
773 }
774 if (DUMMYNET_LOADED) {
775 error = ip_dn_ctl_ptr(sopt);
776 } else {
777 error = ENOPROTOOPT;
778 }
779 break;
780 #endif
781
782 case SO_FLUSH:
783 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
784 sizeof(optval))) != 0) {
785 break;
786 }
787
788 error = inp_flush(inp, optval);
789 break;
790
791 default:
792 error = ip_ctloutput(so, sopt);
793 break;
794 }
795 break;
796 }
797
798 return error;
799 }
800
801 /*
802 * This function exists solely to receive the PRC_IFDOWN messages which
803 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
804 * and calls in_ifadown() to remove all routes corresponding to that address.
805 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
806 * interface routes.
807 */
808 void
809 rip_ctlinput(
810 int cmd,
811 struct sockaddr *sa,
812 __unused void *vip,
813 __unused struct ifnet *ifp)
814 {
815 struct in_ifaddr *ia = NULL;
816 struct ifnet *iaifp = NULL;
817 int err = 0;
818 int flags, done = 0;
819
820 switch (cmd) {
821 case PRC_IFDOWN:
822 lck_rw_lock_shared(in_ifaddr_rwlock);
823 for (ia = in_ifaddrhead.tqh_first; ia;
824 ia = ia->ia_link.tqe_next) {
825 IFA_LOCK(&ia->ia_ifa);
826 if (ia->ia_ifa.ifa_addr == sa &&
827 (ia->ia_flags & IFA_ROUTE)) {
828 done = 1;
829 IFA_ADDREF_LOCKED(&ia->ia_ifa);
830 IFA_UNLOCK(&ia->ia_ifa);
831 lck_rw_done(in_ifaddr_rwlock);
832 lck_mtx_lock(rnh_lock);
833 /*
834 * in_ifscrub kills the interface route.
835 */
836 in_ifscrub(ia->ia_ifp, ia, 1);
837 /*
838 * in_ifadown gets rid of all the rest of
839 * the routes. This is not quite the right
840 * thing to do, but at least if we are running
841 * a routing process they will come back.
842 */
843 in_ifadown(&ia->ia_ifa, 1);
844 lck_mtx_unlock(rnh_lock);
845 IFA_REMREF(&ia->ia_ifa);
846 break;
847 }
848 IFA_UNLOCK(&ia->ia_ifa);
849 }
850 if (!done) {
851 lck_rw_done(in_ifaddr_rwlock);
852 }
853 break;
854
855 case PRC_IFUP:
856 lck_rw_lock_shared(in_ifaddr_rwlock);
857 for (ia = in_ifaddrhead.tqh_first; ia;
858 ia = ia->ia_link.tqe_next) {
859 IFA_LOCK(&ia->ia_ifa);
860 if (ia->ia_ifa.ifa_addr == sa) {
861 /* keep it locked */
862 break;
863 }
864 IFA_UNLOCK(&ia->ia_ifa);
865 }
866 if (ia == NULL || (ia->ia_flags & IFA_ROUTE) ||
867 (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) {
868 if (ia != NULL) {
869 IFA_UNLOCK(&ia->ia_ifa);
870 }
871 lck_rw_done(in_ifaddr_rwlock);
872 return;
873 }
874 IFA_ADDREF_LOCKED(&ia->ia_ifa);
875 IFA_UNLOCK(&ia->ia_ifa);
876 lck_rw_done(in_ifaddr_rwlock);
877
878 flags = RTF_UP;
879 iaifp = ia->ia_ifa.ifa_ifp;
880
881 if ((iaifp->if_flags & IFF_LOOPBACK)
882 || (iaifp->if_flags & IFF_POINTOPOINT)) {
883 flags |= RTF_HOST;
884 }
885
886 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
887 if (err == 0) {
888 IFA_LOCK_SPIN(&ia->ia_ifa);
889 ia->ia_flags |= IFA_ROUTE;
890 IFA_UNLOCK(&ia->ia_ifa);
891 }
892 IFA_REMREF(&ia->ia_ifa);
893 break;
894 }
895 }
896
897 u_int32_t rip_sendspace = RIPSNDQ;
898 u_int32_t rip_recvspace = RIPRCVQ;
899
900 SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
901 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
902 SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
903 &rip_recvspace, 0, "Maximum incoming raw IP datagram size");
904 SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
905 &ripcbinfo.ipi_count, 0, "Number of active PCBs");
906
907 static int
908 rip_attach(struct socket *so, int proto, struct proc *p)
909 {
910 struct inpcb *inp;
911 int error;
912
913 inp = sotoinpcb(so);
914 if (inp) {
915 panic("rip_attach");
916 }
917 if ((so->so_state & SS_PRIV) == 0) {
918 return EPERM;
919 }
920
921 error = soreserve(so, rip_sendspace, rip_recvspace);
922 if (error) {
923 return error;
924 }
925 error = in_pcballoc(so, &ripcbinfo, p);
926 if (error) {
927 return error;
928 }
929 inp = (struct inpcb *)so->so_pcb;
930 inp->inp_vflag |= INP_IPV4;
931 inp->inp_ip_p = proto;
932 inp->inp_ip_ttl = ip_defttl;
933 return 0;
934 }
935
936 __private_extern__ int
937 rip_detach(struct socket *so)
938 {
939 struct inpcb *inp;
940
941 inp = sotoinpcb(so);
942 if (inp == 0) {
943 panic("rip_detach");
944 }
945 in_pcbdetach(inp);
946 return 0;
947 }
948
949 __private_extern__ int
950 rip_abort(struct socket *so)
951 {
952 soisdisconnected(so);
953 return rip_detach(so);
954 }
955
956 __private_extern__ int
957 rip_disconnect(struct socket *so)
958 {
959 if ((so->so_state & SS_ISCONNECTED) == 0) {
960 return ENOTCONN;
961 }
962 return rip_abort(so);
963 }
964
965 __private_extern__ int
966 rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
967 {
968 #pragma unused(p)
969 struct inpcb *inp = sotoinpcb(so);
970 struct sockaddr_in sin;
971 struct ifaddr *ifa = NULL;
972 struct ifnet *outif = NULL;
973
974 if (inp == NULL
975 #if NECP
976 || (necp_socket_should_use_flow_divert(inp))
977 #endif /* NECP */
978 ) {
979 return inp == NULL ? EINVAL : EPROTOTYPE;
980 }
981
982 if (nam->sa_len != sizeof(struct sockaddr_in)) {
983 return EINVAL;
984 }
985
986 /* Sanitized local copy for interface address searches */
987 bzero(&sin, sizeof(sin));
988 sin.sin_family = AF_INET;
989 sin.sin_len = sizeof(struct sockaddr_in);
990 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
991
992 if (TAILQ_EMPTY(&ifnet_head) ||
993 (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) ||
994 (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) {
995 return EADDRNOTAVAIL;
996 } else if (ifa) {
997 /*
998 * Opportunistically determine the outbound
999 * interface that may be used; this may not
1000 * hold true if we end up using a route
1001 * going over a different interface, e.g.
1002 * when sending to a local address. This
1003 * will get updated again after sending.
1004 */
1005 IFA_LOCK(ifa);
1006 outif = ifa->ifa_ifp;
1007 IFA_UNLOCK(ifa);
1008 IFA_REMREF(ifa);
1009 }
1010 inp->inp_laddr = sin.sin_addr;
1011 inp->inp_last_outifp = outif;
1012
1013 return 0;
1014 }
1015
1016 __private_extern__ int
1017 rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
1018 {
1019 struct inpcb *inp = sotoinpcb(so);
1020 struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam;
1021
1022 if (inp == NULL
1023 #if NECP
1024 || (necp_socket_should_use_flow_divert(inp))
1025 #endif /* NECP */
1026 ) {
1027 return inp == NULL ? EINVAL : EPROTOTYPE;
1028 }
1029 if (nam->sa_len != sizeof(*addr)) {
1030 return EINVAL;
1031 }
1032 if (TAILQ_EMPTY(&ifnet_head)) {
1033 return EADDRNOTAVAIL;
1034 }
1035 if ((addr->sin_family != AF_INET) &&
1036 (addr->sin_family != AF_IMPLINK)) {
1037 return EAFNOSUPPORT;
1038 }
1039
1040 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
1041 so->so_flags1 |= SOF1_CONNECT_COUNTED;
1042 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
1043 }
1044
1045 inp->inp_faddr = addr->sin_addr;
1046 soisconnected(so);
1047
1048 return 0;
1049 }
1050
1051 __private_extern__ int
1052 rip_shutdown(struct socket *so)
1053 {
1054 socantsendmore(so);
1055 return 0;
1056 }
1057
1058 __private_extern__ int
1059 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1060 struct mbuf *control, struct proc *p)
1061 {
1062 #pragma unused(flags, p)
1063 struct inpcb *inp = sotoinpcb(so);
1064 u_int32_t dst;
1065 int error = 0;
1066
1067 if (inp == NULL
1068 #if NECP
1069 || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE))
1070 #endif /* NECP */
1071 ) {
1072 if (inp == NULL) {
1073 error = EINVAL;
1074 } else {
1075 error = EPROTOTYPE;
1076 }
1077 goto bad;
1078 }
1079
1080 if (so->so_state & SS_ISCONNECTED) {
1081 if (nam != NULL) {
1082 error = EISCONN;
1083 goto bad;
1084 }
1085 dst = inp->inp_faddr.s_addr;
1086 } else {
1087 if (nam == NULL) {
1088 error = ENOTCONN;
1089 goto bad;
1090 }
1091 dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr;
1092 }
1093 return rip_output(m, so, dst, control);
1094
1095 bad:
1096 VERIFY(error != 0);
1097
1098 if (m != NULL) {
1099 m_freem(m);
1100 }
1101 if (control != NULL) {
1102 m_freem(control);
1103 }
1104
1105 return error;
1106 }
1107
1108 /* note: rip_unlock is called from different protos instead of the generic socket_unlock,
1109 * it will handle the socket dealloc on last reference
1110 * */
1111 int
1112 rip_unlock(struct socket *so, int refcount, void *debug)
1113 {
1114 void *lr_saved;
1115 struct inpcb *inp = sotoinpcb(so);
1116
1117 if (debug == NULL) {
1118 lr_saved = __builtin_return_address(0);
1119 } else {
1120 lr_saved = debug;
1121 }
1122
1123 if (refcount) {
1124 if (so->so_usecount <= 0) {
1125 panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
1126 so, so->so_usecount, solockhistory_nr(so));
1127 /* NOTREACHED */
1128 }
1129 so->so_usecount--;
1130 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
1131 /* cleanup after last reference */
1132 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1133 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1134 if (inp->inp_state != INPCB_STATE_DEAD) {
1135 #if INET6
1136 if (SOCK_CHECK_DOM(so, PF_INET6)) {
1137 in6_pcbdetach(inp);
1138 } else
1139 #endif /* INET6 */
1140 in_pcbdetach(inp);
1141 }
1142 in_pcbdispose(inp);
1143 lck_rw_done(ripcbinfo.ipi_lock);
1144 return 0;
1145 }
1146 }
1147 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1148 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
1149 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1150 return 0;
1151 }
1152
1153 static int
1154 rip_pcblist SYSCTL_HANDLER_ARGS
1155 {
1156 #pragma unused(oidp, arg1, arg2)
1157 int error, i, n;
1158 struct inpcb *inp, **inp_list;
1159 inp_gen_t gencnt;
1160 struct xinpgen xig;
1161
1162 /*
1163 * The process of preparing the TCB list is too time-consuming and
1164 * resource-intensive to repeat twice on every request.
1165 */
1166 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1167 if (req->oldptr == USER_ADDR_NULL) {
1168 n = ripcbinfo.ipi_count;
1169 req->oldidx = 2 * (sizeof xig)
1170 + (n + n / 8) * sizeof(struct xinpcb);
1171 lck_rw_done(ripcbinfo.ipi_lock);
1172 return 0;
1173 }
1174
1175 if (req->newptr != USER_ADDR_NULL) {
1176 lck_rw_done(ripcbinfo.ipi_lock);
1177 return EPERM;
1178 }
1179
1180 /*
1181 * OK, now we're committed to doing something.
1182 */
1183 gencnt = ripcbinfo.ipi_gencnt;
1184 n = ripcbinfo.ipi_count;
1185
1186 bzero(&xig, sizeof(xig));
1187 xig.xig_len = sizeof xig;
1188 xig.xig_count = n;
1189 xig.xig_gen = gencnt;
1190 xig.xig_sogen = so_gencnt;
1191 error = SYSCTL_OUT(req, &xig, sizeof xig);
1192 if (error) {
1193 lck_rw_done(ripcbinfo.ipi_lock);
1194 return error;
1195 }
1196 /*
1197 * We are done if there is no pcb
1198 */
1199 if (n == 0) {
1200 lck_rw_done(ripcbinfo.ipi_lock);
1201 return 0;
1202 }
1203
1204 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1205 if (inp_list == 0) {
1206 lck_rw_done(ripcbinfo.ipi_lock);
1207 return ENOMEM;
1208 }
1209
1210 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1211 inp = inp->inp_list.le_next) {
1212 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1213 inp_list[i++] = inp;
1214 }
1215 }
1216 n = i;
1217
1218 error = 0;
1219 for (i = 0; i < n; i++) {
1220 inp = inp_list[i];
1221 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1222 struct xinpcb xi;
1223
1224 bzero(&xi, sizeof(xi));
1225 xi.xi_len = sizeof xi;
1226 /* XXX should avoid extra copy */
1227 inpcb_to_compat(inp, &xi.xi_inp);
1228 if (inp->inp_socket) {
1229 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1230 }
1231 error = SYSCTL_OUT(req, &xi, sizeof xi);
1232 }
1233 }
1234 if (!error) {
1235 /*
1236 * Give the user an updated idea of our state.
1237 * If the generation differs from what we told
1238 * her before, she knows that something happened
1239 * while we were processing this request, and it
1240 * might be necessary to retry.
1241 */
1242 bzero(&xig, sizeof(xig));
1243 xig.xig_len = sizeof xig;
1244 xig.xig_gen = ripcbinfo.ipi_gencnt;
1245 xig.xig_sogen = so_gencnt;
1246 xig.xig_count = ripcbinfo.ipi_count;
1247 error = SYSCTL_OUT(req, &xig, sizeof xig);
1248 }
1249 FREE(inp_list, M_TEMP);
1250 lck_rw_done(ripcbinfo.ipi_lock);
1251 return error;
1252 }
1253
1254 SYSCTL_PROC(_net_inet_raw, OID_AUTO /*XXX*/, pcblist,
1255 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1256 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1257
1258 #if !CONFIG_EMBEDDED
1259
1260 static int
1261 rip_pcblist64 SYSCTL_HANDLER_ARGS
1262 {
1263 #pragma unused(oidp, arg1, arg2)
1264 int error, i, n;
1265 struct inpcb *inp, **inp_list;
1266 inp_gen_t gencnt;
1267 struct xinpgen xig;
1268
1269 /*
1270 * The process of preparing the TCB list is too time-consuming and
1271 * resource-intensive to repeat twice on every request.
1272 */
1273 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1274 if (req->oldptr == USER_ADDR_NULL) {
1275 n = ripcbinfo.ipi_count;
1276 req->oldidx = 2 * (sizeof xig)
1277 + (n + n / 8) * sizeof(struct xinpcb64);
1278 lck_rw_done(ripcbinfo.ipi_lock);
1279 return 0;
1280 }
1281
1282 if (req->newptr != USER_ADDR_NULL) {
1283 lck_rw_done(ripcbinfo.ipi_lock);
1284 return EPERM;
1285 }
1286
1287 /*
1288 * OK, now we're committed to doing something.
1289 */
1290 gencnt = ripcbinfo.ipi_gencnt;
1291 n = ripcbinfo.ipi_count;
1292
1293 bzero(&xig, sizeof(xig));
1294 xig.xig_len = sizeof xig;
1295 xig.xig_count = n;
1296 xig.xig_gen = gencnt;
1297 xig.xig_sogen = so_gencnt;
1298 error = SYSCTL_OUT(req, &xig, sizeof xig);
1299 if (error) {
1300 lck_rw_done(ripcbinfo.ipi_lock);
1301 return error;
1302 }
1303 /*
1304 * We are done if there is no pcb
1305 */
1306 if (n == 0) {
1307 lck_rw_done(ripcbinfo.ipi_lock);
1308 return 0;
1309 }
1310
1311 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1312 if (inp_list == 0) {
1313 lck_rw_done(ripcbinfo.ipi_lock);
1314 return ENOMEM;
1315 }
1316
1317 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1318 inp = inp->inp_list.le_next) {
1319 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1320 inp_list[i++] = inp;
1321 }
1322 }
1323 n = i;
1324
1325 error = 0;
1326 for (i = 0; i < n; i++) {
1327 inp = inp_list[i];
1328 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1329 struct xinpcb64 xi;
1330
1331 bzero(&xi, sizeof(xi));
1332 xi.xi_len = sizeof xi;
1333 inpcb_to_xinpcb64(inp, &xi);
1334 if (inp->inp_socket) {
1335 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1336 }
1337 error = SYSCTL_OUT(req, &xi, sizeof xi);
1338 }
1339 }
1340 if (!error) {
1341 /*
1342 * Give the user an updated idea of our state.
1343 * If the generation differs from what we told
1344 * her before, she knows that something happened
1345 * while we were processing this request, and it
1346 * might be necessary to retry.
1347 */
1348 bzero(&xig, sizeof(xig));
1349 xig.xig_len = sizeof xig;
1350 xig.xig_gen = ripcbinfo.ipi_gencnt;
1351 xig.xig_sogen = so_gencnt;
1352 xig.xig_count = ripcbinfo.ipi_count;
1353 error = SYSCTL_OUT(req, &xig, sizeof xig);
1354 }
1355 FREE(inp_list, M_TEMP);
1356 lck_rw_done(ripcbinfo.ipi_lock);
1357 return error;
1358 }
1359
1360 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64,
1361 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1362 rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
1363
1364 #endif /* !CONFIG_EMBEDDED */
1365
1366
1367 static int
1368 rip_pcblist_n SYSCTL_HANDLER_ARGS
1369 {
1370 #pragma unused(oidp, arg1, arg2)
1371 int error = 0;
1372
1373 error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo);
1374
1375 return error;
1376 }
1377
1378 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n,
1379 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1380 rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets");
1381
1382 struct pr_usrreqs rip_usrreqs = {
1383 .pru_abort = rip_abort,
1384 .pru_attach = rip_attach,
1385 .pru_bind = rip_bind,
1386 .pru_connect = rip_connect,
1387 .pru_control = in_control,
1388 .pru_detach = rip_detach,
1389 .pru_disconnect = rip_disconnect,
1390 .pru_peeraddr = in_getpeeraddr,
1391 .pru_send = rip_send,
1392 .pru_shutdown = rip_shutdown,
1393 .pru_sockaddr = in_getsockaddr,
1394 .pru_sosend = sosend,
1395 .pru_soreceive = soreceive,
1396 };
1397 /* DSEP Review Done pl-20051213-v02 @3253 */