]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/raw_ip.c
d88d42b992221e42f2b1b1b2a47d297bf17fd691
[apple/xnu.git] / bsd / netinet / raw_ip.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/mcache.h>
75 #include <sys/proc.h>
76 #include <sys/domain.h>
77 #include <sys/protosw.h>
78 #include <sys/socket.h>
79 #include <sys/socketvar.h>
80 #include <sys/sysctl.h>
81 #include <libkern/OSAtomic.h>
82 #include <kern/zalloc.h>
83
84 #include <pexpert/pexpert.h>
85
86 #include <net/if.h>
87 #include <net/net_api_stats.h>
88 #include <net/route.h>
89
90 #define _IP_VHL
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/in_tclass.h>
94 #include <netinet/ip.h>
95 #include <netinet/in_pcb.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip_var.h>
98
99 #if INET6
100 #include <netinet6/in6_pcb.h>
101 #endif /* INET6 */
102
103 #include <netinet/ip_fw.h>
104
105 #if IPSEC
106 #include <netinet6/ipsec.h>
107 #endif /*IPSEC*/
108
109 #if DUMMYNET
110 #include <netinet/ip_dummynet.h>
111 #endif
112
113 #if CONFIG_MACF_NET
114 #include <security/mac_framework.h>
115 #endif /* MAC_NET */
116
117 int load_ipfw(void);
118 int rip_detach(struct socket *);
119 int rip_abort(struct socket *);
120 int rip_disconnect(struct socket *);
121 int rip_bind(struct socket *, struct sockaddr *, struct proc *);
122 int rip_connect(struct socket *, struct sockaddr *, struct proc *);
123 int rip_shutdown(struct socket *);
124
125 struct inpcbhead ripcb;
126 struct inpcbinfo ripcbinfo;
127
128 /* control hooks for ipfw and dummynet */
129 #if IPFIREWALL
130 ip_fw_ctl_t *ip_fw_ctl_ptr;
131 #endif /* IPFIREWALL */
132 #if DUMMYNET
133 ip_dn_ctl_t *ip_dn_ctl_ptr;
134 #endif /* DUMMYNET */
135
136 /*
137 * Nominal space allocated to a raw ip socket.
138 */
139 #define RIPSNDQ 8192
140 #define RIPRCVQ 8192
141
142 /*
143 * Raw interface to IP protocol.
144 */
145
146 /*
147 * Initialize raw connection block q.
148 */
149 void
150 rip_init(struct protosw *pp, struct domain *dp)
151 {
152 #pragma unused(dp)
153 static int rip_initialized = 0;
154 struct inpcbinfo *pcbinfo;
155
156 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
157
158 if (rip_initialized) {
159 return;
160 }
161 rip_initialized = 1;
162
163 LIST_INIT(&ripcb);
164 ripcbinfo.ipi_listhead = &ripcb;
165 /*
166 * XXX We don't use the hash list for raw IP, but it's easier
167 * to allocate a one entry hash list than it is to check all
168 * over the place for ipi_hashbase == NULL.
169 */
170 ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
171 ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);
172
173 ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
174 (4096 * sizeof(struct inpcb)), 4096, "ripzone");
175
176 pcbinfo = &ripcbinfo;
177 /*
178 * allocate lock group attribute and group for udp pcb mutexes
179 */
180 pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
181 pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);
182
183 /*
184 * allocate the lock attribute for udp pcb mutexes
185 */
186 pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
187 if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
188 pcbinfo->ipi_lock_attr)) == NULL) {
189 panic("%s: unable to allocate PCB lock\n", __func__);
190 /* NOTREACHED */
191 }
192
193 in_pcbinfo_attach(&ripcbinfo);
194 }
195
196 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET, 0, {0}, {0, 0, 0, 0, 0, 0, 0, 0, } };
197 /*
198 * Setup generic address and protocol structures
199 * for raw_input routine, then pass them along with
200 * mbuf chain.
201 */
202 void
203 rip_input(struct mbuf *m, int iphlen)
204 {
205 struct ip *ip = mtod(m, struct ip *);
206 struct inpcb *inp;
207 struct inpcb *last = 0;
208 struct mbuf *opts = 0;
209 int skipit = 0, ret = 0;
210 struct ifnet *ifp = m->m_pkthdr.rcvif;
211
212 /* Expect 32-bit aligned data pointer on strict-align platforms */
213 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
214
215 ripsrc.sin_addr = ip->ip_src;
216 lck_rw_lock_shared(ripcbinfo.ipi_lock);
217 LIST_FOREACH(inp, &ripcb, inp_list) {
218 #if INET6
219 if ((inp->inp_vflag & INP_IPV4) == 0) {
220 continue;
221 }
222 #endif
223 if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p)) {
224 continue;
225 }
226 if (inp->inp_laddr.s_addr &&
227 inp->inp_laddr.s_addr != ip->ip_dst.s_addr) {
228 continue;
229 }
230 if (inp->inp_faddr.s_addr &&
231 inp->inp_faddr.s_addr != ip->ip_src.s_addr) {
232 continue;
233 }
234 if (inp_restricted_recv(inp, ifp)) {
235 continue;
236 }
237 if (last) {
238 struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
239
240 skipit = 0;
241
242 #if NECP
243 if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
244 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
245 m_freem(n);
246 /* do not inject data to pcb */
247 skipit = 1;
248 }
249 #endif /* NECP */
250 #if CONFIG_MACF_NET
251 if (n && skipit == 0) {
252 if (mac_inpcb_check_deliver(last, n, AF_INET,
253 SOCK_RAW) != 0) {
254 m_freem(n);
255 skipit = 1;
256 }
257 }
258 #endif
259 if (n && skipit == 0) {
260 int error = 0;
261 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
262 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
263 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
264 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
265 ret = ip_savecontrol(last, &opts, ip, n);
266 if (ret != 0) {
267 m_freem(n);
268 m_freem(opts);
269 last = inp;
270 continue;
271 }
272 }
273 if (last->inp_flags & INP_STRIPHDR) {
274 n->m_len -= iphlen;
275 n->m_pkthdr.len -= iphlen;
276 n->m_data += iphlen;
277 }
278 so_recv_data_stat(last->inp_socket, m, 0);
279 if (sbappendaddr(&last->inp_socket->so_rcv,
280 (struct sockaddr *)&ripsrc, n,
281 opts, &error) != 0) {
282 sorwakeup(last->inp_socket);
283 } else {
284 if (error) {
285 /* should notify about lost packet */
286 ipstat.ips_raw_sappend_fail++;
287 }
288 }
289 opts = 0;
290 }
291 }
292 last = inp;
293 }
294
295 skipit = 0;
296 #if NECP
297 if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
298 &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
299 m_freem(m);
300 OSAddAtomic(1, &ipstat.ips_delivered);
301 /* do not inject data to pcb */
302 skipit = 1;
303 }
304 #endif /* NECP */
305 #if CONFIG_MACF_NET
306 if (last && skipit == 0) {
307 if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) {
308 skipit = 1;
309 m_freem(m);
310 }
311 }
312 #endif
313 if (skipit == 0) {
314 if (last) {
315 if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
316 (last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
317 (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
318 (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
319 ret = ip_savecontrol(last, &opts, ip, m);
320 if (ret != 0) {
321 m_freem(m);
322 m_freem(opts);
323 goto unlock;
324 }
325 }
326 if (last->inp_flags & INP_STRIPHDR) {
327 m->m_len -= iphlen;
328 m->m_pkthdr.len -= iphlen;
329 m->m_data += iphlen;
330 }
331 so_recv_data_stat(last->inp_socket, m, 0);
332 if (sbappendaddr(&last->inp_socket->so_rcv,
333 (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
334 sorwakeup(last->inp_socket);
335 } else {
336 ipstat.ips_raw_sappend_fail++;
337 }
338 } else {
339 m_freem(m);
340 OSAddAtomic(1, &ipstat.ips_noproto);
341 OSAddAtomic(-1, &ipstat.ips_delivered);
342 }
343 }
344 unlock:
345 /*
346 * Keep the list locked because socket filter may force the socket lock
347 * to be released when calling sbappendaddr() -- see rdar://7627704
348 */
349 lck_rw_done(ripcbinfo.ipi_lock);
350 }
351
352 /*
353 * Generate IP header and pass packet to ip_output.
354 * Tack on options user may have setup with control call.
355 */
356 int
357 rip_output(
358 struct mbuf *m,
359 struct socket *so,
360 u_int32_t dst,
361 struct mbuf *control)
362 {
363 struct ip *ip;
364 struct inpcb *inp = sotoinpcb(so);
365 int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
366 struct ip_out_args ipoa;
367 struct ip_moptions *imo;
368 int error = 0;
369
370 bzero(&ipoa, sizeof(ipoa));
371 ipoa.ipoa_boundif = IFSCOPE_NONE;
372 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
373
374 int sotc = SO_TC_UNSPEC;
375 int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
376
377
378 if (control != NULL) {
379 sotc = so_tc_from_control(control, &netsvctype);
380
381 m_freem(control);
382 control = NULL;
383 }
384 if (sotc == SO_TC_UNSPEC) {
385 sotc = so->so_traffic_class;
386 netsvctype = so->so_netsvctype;
387 }
388
389 if (inp == NULL
390 #if NECP
391 || (necp_socket_should_use_flow_divert(inp))
392 #endif /* NECP */
393 ) {
394 if (m != NULL) {
395 m_freem(m);
396 }
397 VERIFY(control == NULL);
398 return inp == NULL ? EINVAL : EPROTOTYPE;
399 }
400
401 flags |= IP_OUTARGS;
402 /* If socket was bound to an ifindex, tell ip_output about it */
403 if (inp->inp_flags & INP_BOUND_IF) {
404 ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
405 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
406 }
407 if (INP_NO_CELLULAR(inp)) {
408 ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
409 }
410 if (INP_NO_EXPENSIVE(inp)) {
411 ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
412 }
413 if (INP_AWDL_UNRESTRICTED(inp)) {
414 ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
415 }
416 ipoa.ipoa_sotc = sotc;
417 ipoa.ipoa_netsvctype = netsvctype;
418
419 if (inp->inp_flowhash == 0) {
420 inp->inp_flowhash = inp_calc_flowhash(inp);
421 }
422
423 /*
424 * If the user handed us a complete IP packet, use it.
425 * Otherwise, allocate an mbuf for a header and fill it in.
426 */
427 if ((inp->inp_flags & INP_HDRINCL) == 0) {
428 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
429 m_freem(m);
430 return EMSGSIZE;
431 }
432 M_PREPEND(m, sizeof(struct ip), M_WAIT, 1);
433 if (m == NULL) {
434 return ENOBUFS;
435 }
436 ip = mtod(m, struct ip *);
437 ip->ip_tos = inp->inp_ip_tos;
438 ip->ip_off = 0;
439 ip->ip_p = inp->inp_ip_p;
440 ip->ip_len = m->m_pkthdr.len;
441 ip->ip_src = inp->inp_laddr;
442 ip->ip_dst.s_addr = dst;
443 ip->ip_ttl = inp->inp_ip_ttl;
444 } else {
445 if (m->m_pkthdr.len > IP_MAXPACKET) {
446 m_freem(m);
447 return EMSGSIZE;
448 }
449 ip = mtod(m, struct ip *);
450 /* don't allow both user specified and setsockopt options,
451 * and don't allow packet length sizes that will crash */
452 if (((IP_VHL_HL(ip->ip_vhl) != (sizeof(*ip) >> 2))
453 && inp->inp_options)
454 || (ip->ip_len > m->m_pkthdr.len)
455 || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
456 m_freem(m);
457 return EINVAL;
458 }
459 if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off)))) {
460 ip->ip_id = ip_randomid();
461 }
462 /* XXX prevent ip_output from overwriting header fields */
463 flags |= IP_RAWOUTPUT;
464 OSAddAtomic(1, &ipstat.ips_rawout);
465 }
466
467 if (inp->inp_laddr.s_addr != INADDR_ANY) {
468 ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
469 }
470
471 #if NECP
472 {
473 necp_kernel_policy_id policy_id;
474 necp_kernel_policy_id skip_policy_id;
475 u_int32_t route_rule_id;
476
477 /*
478 * We need a route to perform NECP route rule checks
479 */
480 if (net_qos_policy_restricted != 0 &&
481 ROUTE_UNUSABLE(&inp->inp_route)) {
482 struct sockaddr_in to;
483 struct sockaddr_in from;
484 struct in_addr laddr = ip->ip_src;
485
486 ROUTE_RELEASE(&inp->inp_route);
487
488 bzero(&from, sizeof(struct sockaddr_in));
489 from.sin_family = AF_INET;
490 from.sin_len = sizeof(struct sockaddr_in);
491 from.sin_addr = laddr;
492
493 bzero(&to, sizeof(struct sockaddr_in));
494 to.sin_family = AF_INET;
495 to.sin_len = sizeof(struct sockaddr_in);
496 to.sin_addr.s_addr = ip->ip_dst.s_addr;
497
498 if ((error = in_pcbladdr(inp, (struct sockaddr *)&to,
499 &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) {
500 printf("%s in_pcbladdr(%p) error %d\n",
501 __func__, inp, error);
502 m_freem(m);
503 return error;
504 }
505
506 inp_update_necp_policy(inp, (struct sockaddr *)&from,
507 (struct sockaddr *)&to, ipoa.ipoa_boundif);
508 inp->inp_policyresult.results.qos_marking_gencount = 0;
509 }
510
511 if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0,
512 &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) {
513 m_freem(m);
514 return EHOSTUNREACH;
515 }
516
517 necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id);
518
519 if (net_qos_policy_restricted != 0) {
520 struct ifnet *rt_ifp = NULL;
521
522 if (inp->inp_route.ro_rt != NULL) {
523 rt_ifp = inp->inp_route.ro_rt->rt_ifp;
524 }
525
526 necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt,
527 NULL, route_rule_id);
528 }
529 }
530 #endif /* NECP */
531 if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) {
532 ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
533 }
534
535 #if IPSEC
536 if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) {
537 m_freem(m);
538 return ENOBUFS;
539 }
540 #endif /*IPSEC*/
541
542 if (ROUTE_UNUSABLE(&inp->inp_route)) {
543 ROUTE_RELEASE(&inp->inp_route);
544 }
545
546 set_packet_service_class(m, so, sotc, 0);
547 m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
548 m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
549 m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC |
550 PKTF_FLOW_RAWSOCK);
551 m->m_pkthdr.pkt_proto = inp->inp_ip_p;
552 m->m_pkthdr.tx_rawip_pid = so->last_pid;
553 m->m_pkthdr.tx_rawip_e_pid = so->e_pid;
554 if (so->so_flags & SOF_DELEGATED) {
555 m->m_pkthdr.tx_rawip_e_pid = so->e_pid;
556 } else {
557 m->m_pkthdr.tx_rawip_e_pid = 0;
558 }
559
560 #if CONFIG_MACF_NET
561 mac_mbuf_label_associate_inpcb(inp, m);
562 #endif
563
564 imo = inp->inp_moptions;
565 if (imo != NULL) {
566 IMO_ADDREF(imo);
567 }
568 /*
569 * The domain lock is held across ip_output, so it is okay
570 * to pass the PCB cached route pointer directly to IP and
571 * the modules beneath it.
572 */
573 // TODO: PASS DOWN ROUTE RULE ID
574 error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
575 imo, &ipoa);
576
577 if (imo != NULL) {
578 IMO_REMREF(imo);
579 }
580
581 if (inp->inp_route.ro_rt != NULL) {
582 struct rtentry *rt = inp->inp_route.ro_rt;
583 struct ifnet *outif;
584
585 if ((rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) ||
586 inp->inp_socket == NULL ||
587 !(inp->inp_socket->so_state & SS_ISCONNECTED)) {
588 rt = NULL; /* unusable */
589 }
590 /*
591 * Always discard the cached route for unconnected
592 * socket or if it is a multicast route.
593 */
594 if (rt == NULL) {
595 ROUTE_RELEASE(&inp->inp_route);
596 }
597
598 /*
599 * If this is a connected socket and the destination
600 * route is unicast, update outif with that of the
601 * route interface used by IP.
602 */
603 if (rt != NULL &&
604 (outif = rt->rt_ifp) != inp->inp_last_outifp) {
605 inp->inp_last_outifp = outif;
606 }
607 } else {
608 ROUTE_RELEASE(&inp->inp_route);
609 }
610
611 /*
612 * If output interface was cellular/expensive, and this socket is
613 * denied access to it, generate an event.
614 */
615 if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
616 (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) {
617 soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED));
618 }
619
620 return error;
621 }
622
623 #if IPFIREWALL
624 int
625 load_ipfw(void)
626 {
627 kern_return_t err;
628
629 ipfw_init();
630
631 #if DUMMYNET
632 if (!DUMMYNET_LOADED) {
633 ip_dn_init();
634 }
635 #endif /* DUMMYNET */
636 err = 0;
637
638 return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
639 }
640 #endif /* IPFIREWALL */
641
642 /*
643 * Raw IP socket option processing.
644 */
645 int
646 rip_ctloutput(struct socket *so, struct sockopt *sopt)
647 {
648 struct inpcb *inp = sotoinpcb(so);
649 int error, optval;
650
651 /* Allow <SOL_SOCKET,SO_FLUSH> at this level */
652 if (sopt->sopt_level != IPPROTO_IP &&
653 !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) {
654 return EINVAL;
655 }
656
657 error = 0;
658
659 switch (sopt->sopt_dir) {
660 case SOPT_GET:
661 switch (sopt->sopt_name) {
662 case IP_HDRINCL:
663 optval = inp->inp_flags & INP_HDRINCL;
664 error = sooptcopyout(sopt, &optval, sizeof optval);
665 break;
666
667 case IP_STRIPHDR:
668 optval = inp->inp_flags & INP_STRIPHDR;
669 error = sooptcopyout(sopt, &optval, sizeof optval);
670 break;
671
672 #if IPFIREWALL
673 case IP_FW_ADD:
674 case IP_FW_GET:
675 case IP_OLD_FW_ADD:
676 case IP_OLD_FW_GET:
677 if (ip_fw_ctl_ptr == 0) {
678 error = load_ipfw();
679 }
680 if (ip_fw_ctl_ptr && error == 0) {
681 error = ip_fw_ctl_ptr(sopt);
682 } else {
683 error = ENOPROTOOPT;
684 }
685 break;
686 #endif /* IPFIREWALL */
687
688 #if DUMMYNET
689 case IP_DUMMYNET_GET:
690 if (!DUMMYNET_LOADED) {
691 ip_dn_init();
692 }
693 if (DUMMYNET_LOADED) {
694 error = ip_dn_ctl_ptr(sopt);
695 } else {
696 error = ENOPROTOOPT;
697 }
698 break;
699 #endif /* DUMMYNET */
700
701 default:
702 error = ip_ctloutput(so, sopt);
703 break;
704 }
705 break;
706
707 case SOPT_SET:
708 switch (sopt->sopt_name) {
709 case IP_HDRINCL:
710 error = sooptcopyin(sopt, &optval, sizeof optval,
711 sizeof optval);
712 if (error) {
713 break;
714 }
715 if (optval) {
716 inp->inp_flags |= INP_HDRINCL;
717 } else {
718 inp->inp_flags &= ~INP_HDRINCL;
719 }
720 break;
721
722 case IP_STRIPHDR:
723 error = sooptcopyin(sopt, &optval, sizeof optval,
724 sizeof optval);
725 if (error) {
726 break;
727 }
728 if (optval) {
729 inp->inp_flags |= INP_STRIPHDR;
730 } else {
731 inp->inp_flags &= ~INP_STRIPHDR;
732 }
733 break;
734
735 #if IPFIREWALL
736 case IP_FW_ADD:
737 case IP_FW_DEL:
738 case IP_FW_FLUSH:
739 case IP_FW_ZERO:
740 case IP_FW_RESETLOG:
741 case IP_OLD_FW_ADD:
742 case IP_OLD_FW_DEL:
743 case IP_OLD_FW_FLUSH:
744 case IP_OLD_FW_ZERO:
745 case IP_OLD_FW_RESETLOG:
746 if (ip_fw_ctl_ptr == 0) {
747 error = load_ipfw();
748 }
749 if (ip_fw_ctl_ptr && error == 0) {
750 error = ip_fw_ctl_ptr(sopt);
751 } else {
752 error = ENOPROTOOPT;
753 }
754 break;
755 #endif /* IPFIREWALL */
756
757 #if DUMMYNET
758 case IP_DUMMYNET_CONFIGURE:
759 case IP_DUMMYNET_DEL:
760 case IP_DUMMYNET_FLUSH:
761 if (!DUMMYNET_LOADED) {
762 ip_dn_init();
763 }
764 if (DUMMYNET_LOADED) {
765 error = ip_dn_ctl_ptr(sopt);
766 } else {
767 error = ENOPROTOOPT;
768 }
769 break;
770 #endif
771
772 case SO_FLUSH:
773 if ((error = sooptcopyin(sopt, &optval, sizeof(optval),
774 sizeof(optval))) != 0) {
775 break;
776 }
777
778 error = inp_flush(inp, optval);
779 break;
780
781 default:
782 error = ip_ctloutput(so, sopt);
783 break;
784 }
785 break;
786 }
787
788 return error;
789 }
790
791 /*
792 * This function exists solely to receive the PRC_IFDOWN messages which
793 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
794 * and calls in_ifadown() to remove all routes corresponding to that address.
795 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
796 * interface routes.
797 */
798 void
799 rip_ctlinput(
800 int cmd,
801 struct sockaddr *sa,
802 __unused void *vip,
803 __unused struct ifnet *ifp)
804 {
805 struct in_ifaddr *ia = NULL;
806 struct ifnet *iaifp = NULL;
807 int err = 0;
808 int flags, done = 0;
809
810 switch (cmd) {
811 case PRC_IFDOWN:
812 lck_rw_lock_shared(in_ifaddr_rwlock);
813 for (ia = in_ifaddrhead.tqh_first; ia;
814 ia = ia->ia_link.tqe_next) {
815 IFA_LOCK(&ia->ia_ifa);
816 if (ia->ia_ifa.ifa_addr == sa &&
817 (ia->ia_flags & IFA_ROUTE)) {
818 done = 1;
819 IFA_ADDREF_LOCKED(&ia->ia_ifa);
820 IFA_UNLOCK(&ia->ia_ifa);
821 lck_rw_done(in_ifaddr_rwlock);
822 lck_mtx_lock(rnh_lock);
823 /*
824 * in_ifscrub kills the interface route.
825 */
826 in_ifscrub(ia->ia_ifp, ia, 1);
827 /*
828 * in_ifadown gets rid of all the rest of
829 * the routes. This is not quite the right
830 * thing to do, but at least if we are running
831 * a routing process they will come back.
832 */
833 in_ifadown(&ia->ia_ifa, 1);
834 lck_mtx_unlock(rnh_lock);
835 IFA_REMREF(&ia->ia_ifa);
836 break;
837 }
838 IFA_UNLOCK(&ia->ia_ifa);
839 }
840 if (!done) {
841 lck_rw_done(in_ifaddr_rwlock);
842 }
843 break;
844
845 case PRC_IFUP:
846 lck_rw_lock_shared(in_ifaddr_rwlock);
847 for (ia = in_ifaddrhead.tqh_first; ia;
848 ia = ia->ia_link.tqe_next) {
849 IFA_LOCK(&ia->ia_ifa);
850 if (ia->ia_ifa.ifa_addr == sa) {
851 /* keep it locked */
852 break;
853 }
854 IFA_UNLOCK(&ia->ia_ifa);
855 }
856 if (ia == NULL || (ia->ia_flags & IFA_ROUTE) ||
857 (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) {
858 if (ia != NULL) {
859 IFA_UNLOCK(&ia->ia_ifa);
860 }
861 lck_rw_done(in_ifaddr_rwlock);
862 return;
863 }
864 IFA_ADDREF_LOCKED(&ia->ia_ifa);
865 IFA_UNLOCK(&ia->ia_ifa);
866 lck_rw_done(in_ifaddr_rwlock);
867
868 flags = RTF_UP;
869 iaifp = ia->ia_ifa.ifa_ifp;
870
871 if ((iaifp->if_flags & IFF_LOOPBACK)
872 || (iaifp->if_flags & IFF_POINTOPOINT)) {
873 flags |= RTF_HOST;
874 }
875
876 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
877 if (err == 0) {
878 IFA_LOCK_SPIN(&ia->ia_ifa);
879 ia->ia_flags |= IFA_ROUTE;
880 IFA_UNLOCK(&ia->ia_ifa);
881 }
882 IFA_REMREF(&ia->ia_ifa);
883 break;
884 }
885 }
886
887 u_int32_t rip_sendspace = RIPSNDQ;
888 u_int32_t rip_recvspace = RIPRCVQ;
889
890 SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
891 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
892 SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
893 &rip_recvspace, 0, "Maximum incoming raw IP datagram size");
894 SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
895 &ripcbinfo.ipi_count, 0, "Number of active PCBs");
896
897 static int
898 rip_attach(struct socket *so, int proto, struct proc *p)
899 {
900 struct inpcb *inp;
901 int error;
902
903 inp = sotoinpcb(so);
904 if (inp) {
905 panic("rip_attach");
906 }
907 if ((so->so_state & SS_PRIV) == 0) {
908 return EPERM;
909 }
910
911 error = soreserve(so, rip_sendspace, rip_recvspace);
912 if (error) {
913 return error;
914 }
915 error = in_pcballoc(so, &ripcbinfo, p);
916 if (error) {
917 return error;
918 }
919 inp = (struct inpcb *)so->so_pcb;
920 inp->inp_vflag |= INP_IPV4;
921 inp->inp_ip_p = proto;
922 inp->inp_ip_ttl = ip_defttl;
923 return 0;
924 }
925
926 __private_extern__ int
927 rip_detach(struct socket *so)
928 {
929 struct inpcb *inp;
930
931 inp = sotoinpcb(so);
932 if (inp == 0) {
933 panic("rip_detach");
934 }
935 in_pcbdetach(inp);
936 return 0;
937 }
938
939 __private_extern__ int
940 rip_abort(struct socket *so)
941 {
942 soisdisconnected(so);
943 return rip_detach(so);
944 }
945
946 __private_extern__ int
947 rip_disconnect(struct socket *so)
948 {
949 if ((so->so_state & SS_ISCONNECTED) == 0) {
950 return ENOTCONN;
951 }
952 return rip_abort(so);
953 }
954
955 __private_extern__ int
956 rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
957 {
958 #pragma unused(p)
959 struct inpcb *inp = sotoinpcb(so);
960 struct sockaddr_in sin;
961 struct ifaddr *ifa = NULL;
962 struct ifnet *outif = NULL;
963
964 if (inp == NULL
965 #if NECP
966 || (necp_socket_should_use_flow_divert(inp))
967 #endif /* NECP */
968 ) {
969 return inp == NULL ? EINVAL : EPROTOTYPE;
970 }
971
972 if (nam->sa_len != sizeof(struct sockaddr_in)) {
973 return EINVAL;
974 }
975
976 /* Sanitized local copy for interface address searches */
977 bzero(&sin, sizeof(sin));
978 sin.sin_family = AF_INET;
979 sin.sin_len = sizeof(struct sockaddr_in);
980 sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
981
982 if (TAILQ_EMPTY(&ifnet_head) ||
983 (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) ||
984 (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) {
985 return EADDRNOTAVAIL;
986 } else if (ifa) {
987 /*
988 * Opportunistically determine the outbound
989 * interface that may be used; this may not
990 * hold true if we end up using a route
991 * going over a different interface, e.g.
992 * when sending to a local address. This
993 * will get updated again after sending.
994 */
995 IFA_LOCK(ifa);
996 outif = ifa->ifa_ifp;
997 IFA_UNLOCK(ifa);
998 IFA_REMREF(ifa);
999 }
1000 inp->inp_laddr = sin.sin_addr;
1001 inp->inp_last_outifp = outif;
1002
1003 return 0;
1004 }
1005
1006 __private_extern__ int
1007 rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
1008 {
1009 struct inpcb *inp = sotoinpcb(so);
1010 struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam;
1011
1012 if (inp == NULL
1013 #if NECP
1014 || (necp_socket_should_use_flow_divert(inp))
1015 #endif /* NECP */
1016 ) {
1017 return inp == NULL ? EINVAL : EPROTOTYPE;
1018 }
1019 if (nam->sa_len != sizeof(*addr)) {
1020 return EINVAL;
1021 }
1022 if (TAILQ_EMPTY(&ifnet_head)) {
1023 return EADDRNOTAVAIL;
1024 }
1025 if ((addr->sin_family != AF_INET) &&
1026 (addr->sin_family != AF_IMPLINK)) {
1027 return EAFNOSUPPORT;
1028 }
1029
1030 if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
1031 so->so_flags1 |= SOF1_CONNECT_COUNTED;
1032 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
1033 }
1034
1035 inp->inp_faddr = addr->sin_addr;
1036 soisconnected(so);
1037
1038 return 0;
1039 }
1040
1041 __private_extern__ int
1042 rip_shutdown(struct socket *so)
1043 {
1044 socantsendmore(so);
1045 return 0;
1046 }
1047
1048 __private_extern__ int
1049 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1050 struct mbuf *control, struct proc *p)
1051 {
1052 #pragma unused(flags, p)
1053 struct inpcb *inp = sotoinpcb(so);
1054 u_int32_t dst;
1055 int error = 0;
1056
1057 if (inp == NULL
1058 #if NECP
1059 || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE))
1060 #endif /* NECP */
1061 ) {
1062 if (inp == NULL) {
1063 error = EINVAL;
1064 } else {
1065 error = EPROTOTYPE;
1066 }
1067 goto bad;
1068 }
1069
1070 if (so->so_state & SS_ISCONNECTED) {
1071 if (nam != NULL) {
1072 error = EISCONN;
1073 goto bad;
1074 }
1075 dst = inp->inp_faddr.s_addr;
1076 } else {
1077 if (nam == NULL) {
1078 error = ENOTCONN;
1079 goto bad;
1080 }
1081 dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr;
1082 }
1083 return rip_output(m, so, dst, control);
1084
1085 bad:
1086 VERIFY(error != 0);
1087
1088 if (m != NULL) {
1089 m_freem(m);
1090 }
1091 if (control != NULL) {
1092 m_freem(control);
1093 }
1094
1095 return error;
1096 }
1097
1098 /* note: rip_unlock is called from different protos instead of the generic socket_unlock,
1099 * it will handle the socket dealloc on last reference
1100 * */
1101 int
1102 rip_unlock(struct socket *so, int refcount, void *debug)
1103 {
1104 void *lr_saved;
1105 struct inpcb *inp = sotoinpcb(so);
1106
1107 if (debug == NULL) {
1108 lr_saved = __builtin_return_address(0);
1109 } else {
1110 lr_saved = debug;
1111 }
1112
1113 if (refcount) {
1114 if (so->so_usecount <= 0) {
1115 panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
1116 so, so->so_usecount, solockhistory_nr(so));
1117 /* NOTREACHED */
1118 }
1119 so->so_usecount--;
1120 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
1121 /* cleanup after last reference */
1122 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1123 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1124 if (inp->inp_state != INPCB_STATE_DEAD) {
1125 #if INET6
1126 if (SOCK_CHECK_DOM(so, PF_INET6)) {
1127 in6_pcbdetach(inp);
1128 } else
1129 #endif /* INET6 */
1130 in_pcbdetach(inp);
1131 }
1132 in_pcbdispose(inp);
1133 lck_rw_done(ripcbinfo.ipi_lock);
1134 return 0;
1135 }
1136 }
1137 so->unlock_lr[so->next_unlock_lr] = lr_saved;
1138 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
1139 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
1140 return 0;
1141 }
1142
1143 static int
1144 rip_pcblist SYSCTL_HANDLER_ARGS
1145 {
1146 #pragma unused(oidp, arg1, arg2)
1147 int error, i, n;
1148 struct inpcb *inp, **inp_list;
1149 inp_gen_t gencnt;
1150 struct xinpgen xig;
1151
1152 /*
1153 * The process of preparing the TCB list is too time-consuming and
1154 * resource-intensive to repeat twice on every request.
1155 */
1156 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1157 if (req->oldptr == USER_ADDR_NULL) {
1158 n = ripcbinfo.ipi_count;
1159 req->oldidx = 2 * (sizeof xig)
1160 + (n + n / 8) * sizeof(struct xinpcb);
1161 lck_rw_done(ripcbinfo.ipi_lock);
1162 return 0;
1163 }
1164
1165 if (req->newptr != USER_ADDR_NULL) {
1166 lck_rw_done(ripcbinfo.ipi_lock);
1167 return EPERM;
1168 }
1169
1170 /*
1171 * OK, now we're committed to doing something.
1172 */
1173 gencnt = ripcbinfo.ipi_gencnt;
1174 n = ripcbinfo.ipi_count;
1175
1176 bzero(&xig, sizeof(xig));
1177 xig.xig_len = sizeof xig;
1178 xig.xig_count = n;
1179 xig.xig_gen = gencnt;
1180 xig.xig_sogen = so_gencnt;
1181 error = SYSCTL_OUT(req, &xig, sizeof xig);
1182 if (error) {
1183 lck_rw_done(ripcbinfo.ipi_lock);
1184 return error;
1185 }
1186 /*
1187 * We are done if there is no pcb
1188 */
1189 if (n == 0) {
1190 lck_rw_done(ripcbinfo.ipi_lock);
1191 return 0;
1192 }
1193
1194 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1195 if (inp_list == 0) {
1196 lck_rw_done(ripcbinfo.ipi_lock);
1197 return ENOMEM;
1198 }
1199
1200 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1201 inp = inp->inp_list.le_next) {
1202 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1203 inp_list[i++] = inp;
1204 }
1205 }
1206 n = i;
1207
1208 error = 0;
1209 for (i = 0; i < n; i++) {
1210 inp = inp_list[i];
1211 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1212 struct xinpcb xi;
1213
1214 bzero(&xi, sizeof(xi));
1215 xi.xi_len = sizeof xi;
1216 /* XXX should avoid extra copy */
1217 inpcb_to_compat(inp, &xi.xi_inp);
1218 if (inp->inp_socket) {
1219 sotoxsocket(inp->inp_socket, &xi.xi_socket);
1220 }
1221 error = SYSCTL_OUT(req, &xi, sizeof xi);
1222 }
1223 }
1224 if (!error) {
1225 /*
1226 * Give the user an updated idea of our state.
1227 * If the generation differs from what we told
1228 * her before, she knows that something happened
1229 * while we were processing this request, and it
1230 * might be necessary to retry.
1231 */
1232 bzero(&xig, sizeof(xig));
1233 xig.xig_len = sizeof xig;
1234 xig.xig_gen = ripcbinfo.ipi_gencnt;
1235 xig.xig_sogen = so_gencnt;
1236 xig.xig_count = ripcbinfo.ipi_count;
1237 error = SYSCTL_OUT(req, &xig, sizeof xig);
1238 }
1239 FREE(inp_list, M_TEMP);
1240 lck_rw_done(ripcbinfo.ipi_lock);
1241 return error;
1242 }
1243
1244 SYSCTL_PROC(_net_inet_raw, OID_AUTO /*XXX*/, pcblist,
1245 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1246 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1247
1248 #if !CONFIG_EMBEDDED
1249
1250 static int
1251 rip_pcblist64 SYSCTL_HANDLER_ARGS
1252 {
1253 #pragma unused(oidp, arg1, arg2)
1254 int error, i, n;
1255 struct inpcb *inp, **inp_list;
1256 inp_gen_t gencnt;
1257 struct xinpgen xig;
1258
1259 /*
1260 * The process of preparing the TCB list is too time-consuming and
1261 * resource-intensive to repeat twice on every request.
1262 */
1263 lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
1264 if (req->oldptr == USER_ADDR_NULL) {
1265 n = ripcbinfo.ipi_count;
1266 req->oldidx = 2 * (sizeof xig)
1267 + (n + n / 8) * sizeof(struct xinpcb64);
1268 lck_rw_done(ripcbinfo.ipi_lock);
1269 return 0;
1270 }
1271
1272 if (req->newptr != USER_ADDR_NULL) {
1273 lck_rw_done(ripcbinfo.ipi_lock);
1274 return EPERM;
1275 }
1276
1277 /*
1278 * OK, now we're committed to doing something.
1279 */
1280 gencnt = ripcbinfo.ipi_gencnt;
1281 n = ripcbinfo.ipi_count;
1282
1283 bzero(&xig, sizeof(xig));
1284 xig.xig_len = sizeof xig;
1285 xig.xig_count = n;
1286 xig.xig_gen = gencnt;
1287 xig.xig_sogen = so_gencnt;
1288 error = SYSCTL_OUT(req, &xig, sizeof xig);
1289 if (error) {
1290 lck_rw_done(ripcbinfo.ipi_lock);
1291 return error;
1292 }
1293 /*
1294 * We are done if there is no pcb
1295 */
1296 if (n == 0) {
1297 lck_rw_done(ripcbinfo.ipi_lock);
1298 return 0;
1299 }
1300
1301 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1302 if (inp_list == 0) {
1303 lck_rw_done(ripcbinfo.ipi_lock);
1304 return ENOMEM;
1305 }
1306
1307 for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
1308 inp = inp->inp_list.le_next) {
1309 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1310 inp_list[i++] = inp;
1311 }
1312 }
1313 n = i;
1314
1315 error = 0;
1316 for (i = 0; i < n; i++) {
1317 inp = inp_list[i];
1318 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1319 struct xinpcb64 xi;
1320
1321 bzero(&xi, sizeof(xi));
1322 xi.xi_len = sizeof xi;
1323 inpcb_to_xinpcb64(inp, &xi);
1324 if (inp->inp_socket) {
1325 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1326 }
1327 error = SYSCTL_OUT(req, &xi, sizeof xi);
1328 }
1329 }
1330 if (!error) {
1331 /*
1332 * Give the user an updated idea of our state.
1333 * If the generation differs from what we told
1334 * her before, she knows that something happened
1335 * while we were processing this request, and it
1336 * might be necessary to retry.
1337 */
1338 bzero(&xig, sizeof(xig));
1339 xig.xig_len = sizeof xig;
1340 xig.xig_gen = ripcbinfo.ipi_gencnt;
1341 xig.xig_sogen = so_gencnt;
1342 xig.xig_count = ripcbinfo.ipi_count;
1343 error = SYSCTL_OUT(req, &xig, sizeof xig);
1344 }
1345 FREE(inp_list, M_TEMP);
1346 lck_rw_done(ripcbinfo.ipi_lock);
1347 return error;
1348 }
1349
1350 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64,
1351 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1352 rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
1353
1354 #endif /* !CONFIG_EMBEDDED */
1355
1356
1357 static int
1358 rip_pcblist_n SYSCTL_HANDLER_ARGS
1359 {
1360 #pragma unused(oidp, arg1, arg2)
1361 int error = 0;
1362
1363 error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo);
1364
1365 return error;
1366 }
1367
1368 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n,
1369 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
1370 rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets");
1371
1372 struct pr_usrreqs rip_usrreqs = {
1373 .pru_abort = rip_abort,
1374 .pru_attach = rip_attach,
1375 .pru_bind = rip_bind,
1376 .pru_connect = rip_connect,
1377 .pru_control = in_control,
1378 .pru_detach = rip_detach,
1379 .pru_disconnect = rip_disconnect,
1380 .pru_peeraddr = in_getpeeraddr,
1381 .pru_send = rip_send,
1382 .pru_shutdown = rip_shutdown,
1383 .pru_sockaddr = in_getsockaddr,
1384 .pru_sosend = sosend,
1385 .pru_soreceive = soreceive,
1386 };
1387 /* DSEP Review Done pl-20051213-v02 @3253 */