]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/raw_ip.c
xnu-1504.9.37.tar.gz
[apple/xnu.git] / bsd / netinet / raw_ip.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/proc.h>
75 #include <sys/domain.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <sys/sysctl.h>
80 #include <libkern/OSAtomic.h>
81 #include <kern/zalloc.h>
82
83 #include <pexpert/pexpert.h>
84
85 #include <net/if.h>
86 #include <net/route.h>
87
88 #define _IP_VHL
89 #include <netinet/in.h>
90 #include <netinet/in_systm.h>
91 #include <netinet/ip.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/in_var.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/ip_mroute.h>
96
97 #if INET6
98 #include <netinet6/in6_pcb.h>
99 #endif /* INET6 */
100
101 #include <netinet/ip_fw.h>
102
103 #if IPSEC
104 #include <netinet6/ipsec.h>
105 #endif /*IPSEC*/
106
107 #if DUMMYNET
108 #include <netinet/ip_dummynet.h>
109 #endif
110
111 #if CONFIG_MACF_NET
112 #include <security/mac_framework.h>
113 #endif /* MAC_NET */
114
115 int load_ipfw(void);
116 int rip_detach(struct socket *);
117 int rip_abort(struct socket *);
118 int rip_disconnect(struct socket *);
119 int rip_bind(struct socket *, struct sockaddr *, struct proc *);
120 int rip_connect(struct socket *, struct sockaddr *, struct proc *);
121 int rip_shutdown(struct socket *);
122
123 #if IPSEC
124 extern int ipsec_bypass;
125 #endif
126
127 struct inpcbhead ripcb;
128 struct inpcbinfo ripcbinfo;
129
130 /* control hooks for ipfw and dummynet */
131 #if IPFIREWALL
132 ip_fw_ctl_t *ip_fw_ctl_ptr;
133 #if DUMMYNET
134 ip_dn_ctl_t *ip_dn_ctl_ptr;
135 #endif /* DUMMYNET */
136 #endif /* IPFIREWALL */
137
138 /*
139 * Nominal space allocated to a raw ip socket.
140 */
141 #define RIPSNDQ 8192
142 #define RIPRCVQ 8192
143
144 /*
145 * Raw interface to IP protocol.
146 */
147
148 /*
149 * Initialize raw connection block q.
150 */
151 void
152 rip_init()
153 {
154 struct inpcbinfo *pcbinfo;
155
156 LIST_INIT(&ripcb);
157 ripcbinfo.listhead = &ripcb;
158 /*
159 * XXX We don't use the hash list for raw IP, but it's easier
160 * to allocate a one entry hash list than it is to check all
161 * over the place for hashbase == NULL.
162 */
163 ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask);
164 ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask);
165
166 ripcbinfo.ipi_zone = (void *) zinit(sizeof(struct inpcb),
167 (4096 * sizeof(struct inpcb)),
168 4096, "ripzone");
169
170 pcbinfo = &ripcbinfo;
171 /*
172 * allocate lock group attribute and group for udp pcb mutexes
173 */
174 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
175
176 pcbinfo->mtx_grp = lck_grp_alloc_init("ripcb", pcbinfo->mtx_grp_attr);
177
178 /*
179 * allocate the lock attribute for udp pcb mutexes
180 */
181 pcbinfo->mtx_attr = lck_attr_alloc_init();
182
183 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
184 return; /* pretty much dead if this fails... */
185
186 }
187
188 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} };
189 /*
190 * Setup generic address and protocol structures
191 * for raw_input routine, then pass them along with
192 * mbuf chain.
193 */
194 void
195 rip_input(m, iphlen)
196 struct mbuf *m;
197 int iphlen;
198 {
199 register struct ip *ip = mtod(m, struct ip *);
200 register struct inpcb *inp;
201 struct inpcb *last = 0;
202 struct mbuf *opts = 0;
203 int skipit;
204
205 ripsrc.sin_addr = ip->ip_src;
206 lck_rw_lock_shared(ripcbinfo.mtx);
207 LIST_FOREACH(inp, &ripcb, inp_list) {
208 #if INET6
209 if ((inp->inp_vflag & INP_IPV4) == 0)
210 continue;
211 #endif
212 if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p))
213 continue;
214 if (inp->inp_laddr.s_addr &&
215 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
216 continue;
217 if (inp->inp_faddr.s_addr &&
218 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
219 continue;
220 if (last) {
221 struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
222
223 #if IPSEC
224 /* check AH/ESP integrity. */
225 skipit = 0;
226 if (ipsec_bypass == 0 && n) {
227 if (ipsec4_in_reject_so(n, last->inp_socket)) {
228 m_freem(n);
229 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
230 /* do not inject data to pcb */
231 skipit = 1;
232 }
233 }
234 #endif /*IPSEC*/
235 #if CONFIG_MACF_NET
236 if (n && skipit == 0) {
237 if (mac_inpcb_check_deliver(last, n, AF_INET,
238 SOCK_RAW) != 0)
239 skipit = 1;
240 }
241 #endif
242 if (n && skipit == 0) {
243 int error = 0;
244 if (last->inp_flags & INP_CONTROLOPTS ||
245 last->inp_socket->so_options & SO_TIMESTAMP)
246 ip_savecontrol(last, &opts, ip, n);
247 if (last->inp_flags & INP_STRIPHDR) {
248 n->m_len -= iphlen;
249 n->m_pkthdr.len -= iphlen;
250 n->m_data += iphlen;
251 }
252 // ###LOCK need to lock that socket?
253 if (sbappendaddr(&last->inp_socket->so_rcv,
254 (struct sockaddr *)&ripsrc, n,
255 opts, &error) != 0) {
256 sorwakeup(last->inp_socket);
257 }
258 else {
259 if (error) {
260 /* should notify about lost packet */
261 kprintf("rip_input can't append to socket\n");
262 }
263 }
264 opts = 0;
265 }
266 }
267 last = inp;
268 }
269 lck_rw_done(ripcbinfo.mtx);
270 #if IPSEC
271 /* check AH/ESP integrity. */
272 skipit = 0;
273 if (ipsec_bypass == 0 && last) {
274 if (ipsec4_in_reject_so(m, last->inp_socket)) {
275 m_freem(m);
276 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
277 OSAddAtomic(1, &ipstat.ips_delivered);
278 /* do not inject data to pcb */
279 skipit = 1;
280 }
281 }
282 #endif /*IPSEC*/
283 #if CONFIG_MACF_NET
284 if (last && skipit == 0) {
285 if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0)
286 skipit = 1;
287 }
288 #endif
289 if (skipit == 0) {
290 if (last) {
291 if (last->inp_flags & INP_CONTROLOPTS ||
292 last->inp_socket->so_options & SO_TIMESTAMP)
293 ip_savecontrol(last, &opts, ip, m);
294 if (last->inp_flags & INP_STRIPHDR) {
295 m->m_len -= iphlen;
296 m->m_pkthdr.len -= iphlen;
297 m->m_data += iphlen;
298 }
299 if (sbappendaddr(&last->inp_socket->so_rcv,
300 (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
301 sorwakeup(last->inp_socket);
302 } else {
303 kprintf("rip_input(2) can't append to socket\n");
304 }
305 } else {
306 m_freem(m);
307 OSAddAtomic(1, &ipstat.ips_noproto);
308 OSAddAtomic(-1, &ipstat.ips_delivered);
309 }
310 }
311 }
312
313 /*
314 * Generate IP header and pass packet to ip_output.
315 * Tack on options user may have setup with control call.
316 */
317 int
318 rip_output(
319 struct mbuf *m,
320 struct socket *so,
321 u_int32_t dst,
322 struct mbuf *control)
323 {
324 register struct ip *ip;
325 register struct inpcb *inp = sotoinpcb(so);
326 int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
327 struct ip_out_args ipoa;
328 int error = 0;
329 #if PKT_PRIORITY
330 mbuf_traffic_class_t mtc = MBUF_TC_NONE;
331 #endif /* PKT_PRIORITY */
332
333 if (control != NULL) {
334 #if PKT_PRIORITY
335 mtc = mbuf_traffic_class_from_control(control);
336 #endif /* PKT_PRIORITY */
337
338 m_freem(control);
339 }
340 /* If socket was bound to an ifindex, tell ip_output about it */
341 ipoa.ipoa_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
342 inp->inp_boundif : IFSCOPE_NONE;
343 flags |= IP_OUTARGS;
344
345 /*
346 * If the user handed us a complete IP packet, use it.
347 * Otherwise, allocate an mbuf for a header and fill it in.
348 */
349 if ((inp->inp_flags & INP_HDRINCL) == 0) {
350 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
351 m_freem(m);
352 return(EMSGSIZE);
353 }
354 M_PREPEND(m, sizeof(struct ip), M_WAIT);
355 if (m == NULL)
356 return ENOBUFS;
357 ip = mtod(m, struct ip *);
358 ip->ip_tos = inp->inp_ip_tos;
359 ip->ip_off = 0;
360 ip->ip_p = inp->inp_ip_p;
361 ip->ip_len = m->m_pkthdr.len;
362 ip->ip_src = inp->inp_laddr;
363 ip->ip_dst.s_addr = dst;
364 ip->ip_ttl = inp->inp_ip_ttl;
365 } else {
366 if (m->m_pkthdr.len > IP_MAXPACKET) {
367 m_freem(m);
368 return(EMSGSIZE);
369 }
370 ip = mtod(m, struct ip *);
371 /* don't allow both user specified and setsockopt options,
372 and don't allow packet length sizes that will crash */
373 if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2))
374 && inp->inp_options)
375 || (ip->ip_len > m->m_pkthdr.len)
376 || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
377 m_freem(m);
378 return EINVAL;
379 }
380 if (ip->ip_id == 0)
381 #if RANDOM_IP_ID
382 ip->ip_id = ip_randomid();
383 #else
384 ip->ip_id = htons(ip_id++);
385 #endif
386 /* XXX prevent ip_output from overwriting header fields */
387 flags |= IP_RAWOUTPUT;
388 OSAddAtomic(1, &ipstat.ips_rawout);
389 }
390
391 #if IPSEC
392 if (ipsec_bypass == 0 && ipsec_setsocket(m, so) != 0) {
393 m_freem(m);
394 return ENOBUFS;
395 }
396 #endif /*IPSEC*/
397
398 if (inp->inp_route.ro_rt != NULL &&
399 inp->inp_route.ro_rt->generation_id != route_generation) {
400 rtfree(inp->inp_route.ro_rt);
401 inp->inp_route.ro_rt = NULL;
402 }
403
404 #if PKT_PRIORITY
405 set_traffic_class(m, so, mtc);
406 #endif /* PKT_PRIORITY */
407
408 #if CONFIG_MACF_NET
409 mac_mbuf_label_associate_inpcb(inp, m);
410 #endif
411
412 /*
413 * The domain lock is held across ip_output, so it is okay
414 * to pass the PCB cached route pointer directly to IP and
415 * the modules beneath it.
416 */
417 error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
418 inp->inp_moptions, &ipoa);
419
420 #if IFNET_ROUTE_REFCNT
421 /*
422 * Always discard the cached route for unconnected socket
423 * or if it is a non-unicast route.
424 */
425 if (inp->inp_route.ro_rt != NULL &&
426 ((inp->inp_route.ro_rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) ||
427 inp->inp_socket == NULL ||
428 inp->inp_socket->so_state != SS_ISCONNECTED)) {
429 rtfree(inp->inp_route.ro_rt);
430 inp->inp_route.ro_rt = NULL;
431 }
432 #endif /* IFNET_ROUTE_REFCNT */
433
434 return (error);
435 }
436
437 #if IPFIREWALL
438 int
439 load_ipfw(void)
440 {
441 kern_return_t err;
442
443 ipfw_init();
444
445 #if DUMMYNET
446 if (!DUMMYNET_LOADED)
447 ip_dn_init();
448 #endif /* DUMMYNET */
449 err = 0;
450
451 return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
452 }
453 #endif /* IPFIREWALL */
454
455 /*
456 * Raw IP socket option processing.
457 */
458 int
459 rip_ctloutput(so, sopt)
460 struct socket *so;
461 struct sockopt *sopt;
462 {
463 struct inpcb *inp = sotoinpcb(so);
464 int error, optval;
465
466 if (sopt->sopt_level != IPPROTO_IP)
467 return (EINVAL);
468
469 error = 0;
470
471 switch (sopt->sopt_dir) {
472 case SOPT_GET:
473 switch (sopt->sopt_name) {
474 case IP_HDRINCL:
475 optval = inp->inp_flags & INP_HDRINCL;
476 error = sooptcopyout(sopt, &optval, sizeof optval);
477 break;
478
479 case IP_STRIPHDR:
480 optval = inp->inp_flags & INP_STRIPHDR;
481 error = sooptcopyout(sopt, &optval, sizeof optval);
482 break;
483
484 #if IPFIREWALL
485 case IP_FW_ADD:
486 case IP_FW_GET:
487 case IP_OLD_FW_ADD:
488 case IP_OLD_FW_GET:
489 if (ip_fw_ctl_ptr == 0)
490 error = load_ipfw();
491 if (ip_fw_ctl_ptr && error == 0)
492 error = ip_fw_ctl_ptr(sopt);
493 else
494 error = ENOPROTOOPT;
495 break;
496 #endif /* IPFIREWALL */
497
498 #if DUMMYNET
499 case IP_DUMMYNET_GET:
500 if (DUMMYNET_LOADED)
501 error = ip_dn_ctl_ptr(sopt);
502 else
503 error = ENOPROTOOPT;
504 break ;
505 #endif /* DUMMYNET */
506
507 #if MROUTING
508 case MRT_INIT:
509 case MRT_DONE:
510 case MRT_ADD_VIF:
511 case MRT_DEL_VIF:
512 case MRT_ADD_MFC:
513 case MRT_DEL_MFC:
514 case MRT_VERSION:
515 case MRT_ASSERT:
516 error = ip_mrouter_get(so, sopt);
517 break;
518 #endif /* MROUTING */
519
520 default:
521 error = ip_ctloutput(so, sopt);
522 break;
523 }
524 break;
525
526 case SOPT_SET:
527 switch (sopt->sopt_name) {
528 case IP_HDRINCL:
529 error = sooptcopyin(sopt, &optval, sizeof optval,
530 sizeof optval);
531 if (error)
532 break;
533 if (optval)
534 inp->inp_flags |= INP_HDRINCL;
535 else
536 inp->inp_flags &= ~INP_HDRINCL;
537 break;
538
539 case IP_STRIPHDR:
540 error = sooptcopyin(sopt, &optval, sizeof optval,
541 sizeof optval);
542 if (error)
543 break;
544 if (optval)
545 inp->inp_flags |= INP_STRIPHDR;
546 else
547 inp->inp_flags &= ~INP_STRIPHDR;
548 break;
549
550
551 #if IPFIREWALL
552 case IP_FW_ADD:
553 case IP_FW_DEL:
554 case IP_FW_FLUSH:
555 case IP_FW_ZERO:
556 case IP_FW_RESETLOG:
557 case IP_OLD_FW_ADD:
558 case IP_OLD_FW_DEL:
559 case IP_OLD_FW_FLUSH:
560 case IP_OLD_FW_ZERO:
561 case IP_OLD_FW_RESETLOG:
562 if (ip_fw_ctl_ptr == 0)
563 error = load_ipfw();
564 if (ip_fw_ctl_ptr && error == 0)
565 error = ip_fw_ctl_ptr(sopt);
566 else
567 error = ENOPROTOOPT;
568 break;
569 #endif /* IPFIREWALL */
570
571 #if DUMMYNET
572 case IP_DUMMYNET_CONFIGURE:
573 case IP_DUMMYNET_DEL:
574 case IP_DUMMYNET_FLUSH:
575 if (DUMMYNET_LOADED)
576 error = ip_dn_ctl_ptr(sopt);
577 else
578 error = ENOPROTOOPT ;
579 break ;
580 #endif
581
582 #if MROUTING
583 case IP_RSVP_ON:
584 error = ip_rsvp_init(so);
585 break;
586
587 case IP_RSVP_OFF:
588 error = ip_rsvp_done();
589 break;
590
591 /* XXX - should be combined */
592 case IP_RSVP_VIF_ON:
593 error = ip_rsvp_vif_init(so, sopt);
594 break;
595
596 case IP_RSVP_VIF_OFF:
597 error = ip_rsvp_vif_done(so, sopt);
598 break;
599
600 case MRT_INIT:
601 case MRT_DONE:
602 case MRT_ADD_VIF:
603 case MRT_DEL_VIF:
604 case MRT_ADD_MFC:
605 case MRT_DEL_MFC:
606 case MRT_VERSION:
607 case MRT_ASSERT:
608 error = ip_mrouter_set(so, sopt);
609 break;
610 #endif /* MROUTING */
611
612 default:
613 error = ip_ctloutput(so, sopt);
614 break;
615 }
616 break;
617 }
618
619 return (error);
620 }
621
622 /*
623 * This function exists solely to receive the PRC_IFDOWN messages which
624 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
625 * and calls in_ifadown() to remove all routes corresponding to that address.
626 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
627 * interface routes.
628 */
629 void
630 rip_ctlinput(
631 int cmd,
632 struct sockaddr *sa,
633 __unused void *vip)
634 {
635 struct in_ifaddr *ia;
636 struct ifnet *ifp;
637 int err;
638 int flags, done = 0;
639
640 switch (cmd) {
641 case PRC_IFDOWN:
642 lck_rw_lock_shared(in_ifaddr_rwlock);
643 for (ia = in_ifaddrhead.tqh_first; ia;
644 ia = ia->ia_link.tqe_next) {
645 if (ia->ia_ifa.ifa_addr == sa
646 && (ia->ia_flags & IFA_ROUTE)) {
647 done = 1;
648 ifaref(&ia->ia_ifa);
649 lck_rw_done(in_ifaddr_rwlock);
650 lck_mtx_lock(rnh_lock);
651 /*
652 * in_ifscrub kills the interface route.
653 */
654 in_ifscrub(ia->ia_ifp, ia, 1);
655 /*
656 * in_ifadown gets rid of all the rest of
657 * the routes. This is not quite the right
658 * thing to do, but at least if we are running
659 * a routing process they will come back.
660 */
661 in_ifadown(&ia->ia_ifa, 1);
662 lck_mtx_unlock(rnh_lock);
663 ifafree(&ia->ia_ifa);
664 break;
665 }
666 }
667 if (!done)
668 lck_rw_done(in_ifaddr_rwlock);
669 break;
670
671 case PRC_IFUP:
672 lck_rw_lock_shared(in_ifaddr_rwlock);
673 for (ia = in_ifaddrhead.tqh_first; ia;
674 ia = ia->ia_link.tqe_next) {
675 if (ia->ia_ifa.ifa_addr == sa)
676 break;
677 }
678 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) {
679 lck_rw_done(in_ifaddr_rwlock);
680 return;
681 }
682 ifaref(&ia->ia_ifa);
683 lck_rw_done(in_ifaddr_rwlock);
684
685 flags = RTF_UP;
686 ifp = ia->ia_ifa.ifa_ifp;
687
688 if ((ifp->if_flags & IFF_LOOPBACK)
689 || (ifp->if_flags & IFF_POINTOPOINT))
690 flags |= RTF_HOST;
691
692 err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
693 if (err == 0)
694 ia->ia_flags |= IFA_ROUTE;
695 ifafree(&ia->ia_ifa);
696 break;
697 }
698 }
699
700 u_int32_t rip_sendspace = RIPSNDQ;
701 u_int32_t rip_recvspace = RIPRCVQ;
702
703 SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
704 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
705 SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
706 &rip_recvspace, 0, "Maximum incoming raw IP datagram size");
707
708 static int
709 rip_attach(struct socket *so, int proto, struct proc *p)
710 {
711 struct inpcb *inp;
712 int error;
713
714 inp = sotoinpcb(so);
715 if (inp)
716 panic("rip_attach");
717 if ((so->so_state & SS_PRIV) == 0)
718 return (EPERM);
719
720 error = soreserve(so, rip_sendspace, rip_recvspace);
721 if (error)
722 return error;
723 error = in_pcballoc(so, &ripcbinfo, p);
724 if (error)
725 return error;
726 inp = (struct inpcb *)so->so_pcb;
727 inp->inp_vflag |= INP_IPV4;
728 inp->inp_ip_p = proto;
729 inp->inp_ip_ttl = ip_defttl;
730 return 0;
731 }
732
733 __private_extern__ int
734 rip_detach(struct socket *so)
735 {
736 struct inpcb *inp;
737
738 inp = sotoinpcb(so);
739 if (inp == 0)
740 panic("rip_detach");
741 #if MROUTING
742 if (so == ip_mrouter)
743 ip_mrouter_done();
744 ip_rsvp_force_done(so);
745 if (so == ip_rsvpd)
746 ip_rsvp_done();
747 #endif /* MROUTING */
748 in_pcbdetach(inp);
749 return 0;
750 }
751
752 __private_extern__ int
753 rip_abort(struct socket *so)
754 {
755 soisdisconnected(so);
756 return rip_detach(so);
757 }
758
759 __private_extern__ int
760 rip_disconnect(struct socket *so)
761 {
762 if ((so->so_state & SS_ISCONNECTED) == 0)
763 return ENOTCONN;
764 return rip_abort(so);
765 }
766
767 __private_extern__ int
768 rip_bind(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
769 {
770 struct inpcb *inp = sotoinpcb(so);
771 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
772 struct ifaddr *ifa = NULL;
773
774 if (nam->sa_len != sizeof(*addr))
775 return EINVAL;
776
777 if (TAILQ_EMPTY(&ifnet_head) || ((addr->sin_family != AF_INET) &&
778 (addr->sin_family != AF_IMPLINK)) ||
779 (addr->sin_addr.s_addr &&
780 (ifa = ifa_ifwithaddr((struct sockaddr *)addr)) == 0)) {
781 return EADDRNOTAVAIL;
782 }
783 else if (ifa) {
784 ifafree(ifa);
785 ifa = NULL;
786 }
787 inp->inp_laddr = addr->sin_addr;
788 return 0;
789 }
790
791 __private_extern__ int
792 rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
793 {
794 struct inpcb *inp = sotoinpcb(so);
795 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
796
797 if (nam->sa_len != sizeof(*addr))
798 return EINVAL;
799 if (TAILQ_EMPTY(&ifnet_head))
800 return EADDRNOTAVAIL;
801 if ((addr->sin_family != AF_INET) &&
802 (addr->sin_family != AF_IMPLINK))
803 return EAFNOSUPPORT;
804 inp->inp_faddr = addr->sin_addr;
805 soisconnected(so);
806 return 0;
807 }
808
809 __private_extern__ int
810 rip_shutdown(struct socket *so)
811 {
812 socantsendmore(so);
813 return 0;
814 }
815
816 __private_extern__ int
817 rip_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *nam,
818 __unused struct mbuf *control, __unused struct proc *p)
819 {
820 struct inpcb *inp = sotoinpcb(so);
821 register u_int32_t dst;
822
823 if (so->so_state & SS_ISCONNECTED) {
824 if (nam) {
825 m_freem(m);
826 return EISCONN;
827 }
828 dst = inp->inp_faddr.s_addr;
829 } else {
830 if (nam == NULL) {
831 m_freem(m);
832 return ENOTCONN;
833 }
834 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
835 }
836 return rip_output(m, so, dst, control);
837 }
838
839 /* note: rip_unlock is called from different protos instead of the generic socket_unlock,
840 * it will handle the socket dealloc on last reference
841 * */
842 int
843 rip_unlock(struct socket *so, int refcount, void *debug)
844 {
845 void *lr_saved;
846 struct inpcb *inp = sotoinpcb(so);
847
848 if (debug == NULL)
849 lr_saved = __builtin_return_address(0);
850 else
851 lr_saved = debug;
852
853 if (refcount) {
854 if (so->so_usecount <= 0) {
855 panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
856 so, so->so_usecount, solockhistory_nr(so));
857 /* NOTREACHED */
858 }
859 so->so_usecount--;
860 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
861 /* cleanup after last reference */
862 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
863 lck_rw_lock_exclusive(ripcbinfo.mtx);
864 if (inp->inp_state != INPCB_STATE_DEAD) {
865 #if INET6
866 if (INP_CHECK_SOCKAF(so, AF_INET6))
867 in6_pcbdetach(inp);
868 else
869 #endif /* INET6 */
870 in_pcbdetach(inp);
871 }
872 in_pcbdispose(inp);
873 lck_rw_done(ripcbinfo.mtx);
874 return(0);
875 }
876 }
877 so->unlock_lr[so->next_unlock_lr] = lr_saved;
878 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
879 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
880 return(0);
881 }
882
883 static int
884 rip_pcblist SYSCTL_HANDLER_ARGS
885 {
886 #pragma unused(oidp, arg1, arg2)
887 int error, i, n;
888 struct inpcb *inp, **inp_list;
889 inp_gen_t gencnt;
890 struct xinpgen xig;
891
892 /*
893 * The process of preparing the TCB list is too time-consuming and
894 * resource-intensive to repeat twice on every request.
895 */
896 lck_rw_lock_exclusive(ripcbinfo.mtx);
897 if (req->oldptr == USER_ADDR_NULL) {
898 n = ripcbinfo.ipi_count;
899 req->oldidx = 2 * (sizeof xig)
900 + (n + n/8) * sizeof(struct xinpcb);
901 lck_rw_done(ripcbinfo.mtx);
902 return 0;
903 }
904
905 if (req->newptr != USER_ADDR_NULL) {
906 lck_rw_done(ripcbinfo.mtx);
907 return EPERM;
908 }
909
910 /*
911 * OK, now we're committed to doing something.
912 */
913 gencnt = ripcbinfo.ipi_gencnt;
914 n = ripcbinfo.ipi_count;
915
916 bzero(&xig, sizeof(xig));
917 xig.xig_len = sizeof xig;
918 xig.xig_count = n;
919 xig.xig_gen = gencnt;
920 xig.xig_sogen = so_gencnt;
921 error = SYSCTL_OUT(req, &xig, sizeof xig);
922 if (error) {
923 lck_rw_done(ripcbinfo.mtx);
924 return error;
925 }
926 /*
927 * We are done if there is no pcb
928 */
929 if (n == 0) {
930 lck_rw_done(ripcbinfo.mtx);
931 return 0;
932 }
933
934 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
935 if (inp_list == 0) {
936 lck_rw_done(ripcbinfo.mtx);
937 return ENOMEM;
938 }
939
940 for (inp = ripcbinfo.listhead->lh_first, i = 0; inp && i < n;
941 inp = inp->inp_list.le_next) {
942 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
943 inp_list[i++] = inp;
944 }
945 n = i;
946
947 error = 0;
948 for (i = 0; i < n; i++) {
949 inp = inp_list[i];
950 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
951 struct xinpcb xi;
952
953 bzero(&xi, sizeof(xi));
954 xi.xi_len = sizeof xi;
955 /* XXX should avoid extra copy */
956 inpcb_to_compat(inp, &xi.xi_inp);
957 if (inp->inp_socket)
958 sotoxsocket(inp->inp_socket, &xi.xi_socket);
959 error = SYSCTL_OUT(req, &xi, sizeof xi);
960 }
961 }
962 if (!error) {
963 /*
964 * Give the user an updated idea of our state.
965 * If the generation differs from what we told
966 * her before, she knows that something happened
967 * while we were processing this request, and it
968 * might be necessary to retry.
969 */
970 bzero(&xig, sizeof(xig));
971 xig.xig_len = sizeof xig;
972 xig.xig_gen = ripcbinfo.ipi_gencnt;
973 xig.xig_sogen = so_gencnt;
974 xig.xig_count = ripcbinfo.ipi_count;
975 error = SYSCTL_OUT(req, &xig, sizeof xig);
976 }
977 FREE(inp_list, M_TEMP);
978 lck_rw_done(ripcbinfo.mtx);
979 return error;
980 }
981
982 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
983 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
984
985 #if !CONFIG_EMBEDDED
986
987 static int
988 rip_pcblist64 SYSCTL_HANDLER_ARGS
989 {
990 #pragma unused(oidp, arg1, arg2)
991 int error, i, n;
992 struct inpcb *inp, **inp_list;
993 inp_gen_t gencnt;
994 struct xinpgen xig;
995
996 /*
997 * The process of preparing the TCB list is too time-consuming and
998 * resource-intensive to repeat twice on every request.
999 */
1000 lck_rw_lock_exclusive(ripcbinfo.mtx);
1001 if (req->oldptr == USER_ADDR_NULL) {
1002 n = ripcbinfo.ipi_count;
1003 req->oldidx = 2 * (sizeof xig)
1004 + (n + n/8) * sizeof(struct xinpcb64);
1005 lck_rw_done(ripcbinfo.mtx);
1006 return 0;
1007 }
1008
1009 if (req->newptr != USER_ADDR_NULL) {
1010 lck_rw_done(ripcbinfo.mtx);
1011 return EPERM;
1012 }
1013
1014 /*
1015 * OK, now we're committed to doing something.
1016 */
1017 gencnt = ripcbinfo.ipi_gencnt;
1018 n = ripcbinfo.ipi_count;
1019
1020 bzero(&xig, sizeof(xig));
1021 xig.xig_len = sizeof xig;
1022 xig.xig_count = n;
1023 xig.xig_gen = gencnt;
1024 xig.xig_sogen = so_gencnt;
1025 error = SYSCTL_OUT(req, &xig, sizeof xig);
1026 if (error) {
1027 lck_rw_done(ripcbinfo.mtx);
1028 return error;
1029 }
1030 /*
1031 * We are done if there is no pcb
1032 */
1033 if (n == 0) {
1034 lck_rw_done(ripcbinfo.mtx);
1035 return 0;
1036 }
1037
1038 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1039 if (inp_list == 0) {
1040 lck_rw_done(ripcbinfo.mtx);
1041 return ENOMEM;
1042 }
1043
1044 for (inp = ripcbinfo.listhead->lh_first, i = 0; inp && i < n;
1045 inp = inp->inp_list.le_next) {
1046 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
1047 inp_list[i++] = inp;
1048 }
1049 n = i;
1050
1051 error = 0;
1052 for (i = 0; i < n; i++) {
1053 inp = inp_list[i];
1054 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
1055 struct xinpcb64 xi;
1056
1057 bzero(&xi, sizeof(xi));
1058 xi.xi_len = sizeof xi;
1059 inpcb_to_xinpcb64(inp, &xi);
1060 if (inp->inp_socket)
1061 sotoxsocket64(inp->inp_socket, &xi.xi_socket);
1062 error = SYSCTL_OUT(req, &xi, sizeof xi);
1063 }
1064 }
1065 if (!error) {
1066 /*
1067 * Give the user an updated idea of our state.
1068 * If the generation differs from what we told
1069 * her before, she knows that something happened
1070 * while we were processing this request, and it
1071 * might be necessary to retry.
1072 */
1073 bzero(&xig, sizeof(xig));
1074 xig.xig_len = sizeof xig;
1075 xig.xig_gen = ripcbinfo.ipi_gencnt;
1076 xig.xig_sogen = so_gencnt;
1077 xig.xig_count = ripcbinfo.ipi_count;
1078 error = SYSCTL_OUT(req, &xig, sizeof xig);
1079 }
1080 FREE(inp_list, M_TEMP);
1081 lck_rw_done(ripcbinfo.mtx);
1082 return error;
1083 }
1084
1085 SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64, CTLFLAG_RD, 0, 0,
1086 rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
1087
1088 #endif /* !CONFIG_EMBEDDED */
1089
1090 struct pr_usrreqs rip_usrreqs = {
1091 rip_abort, pru_accept_notsupp, rip_attach, rip_bind, rip_connect,
1092 pru_connect2_notsupp, in_control, rip_detach, rip_disconnect,
1093 pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp,
1094 pru_rcvoob_notsupp, rip_send, pru_sense_null, rip_shutdown,
1095 in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
1096 };
1097 /* DSEP Review Done pl-20051213-v02 @3253 */