]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/raw_ip.c
4028d12917e9023579d7ada51c00a38e278c6cb3
[apple/xnu.git] / bsd / netinet / raw_ip.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1982, 1986, 1988, 1993
24 * The Regents of the University of California. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
55 */
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/proc.h>
63 #include <sys/domain.h>
64 #include <sys/protosw.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/sysctl.h>
68
69 #if __FreeBSD__
70 #include <vm/vm_zone.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/route.h>
75
76 #define _IP_VHL
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip_var.h>
83 #include <netinet/ip_mroute.h>
84
85 #include <netinet/ip_fw.h>
86
87 #if IPSEC
88 #include <netinet6/ipsec.h>
89 #endif /*IPSEC*/
90
91 #if DUMMYNET
92 #include <netinet/ip_dummynet.h>
93 #endif
94
95 #if IPSEC
96 extern int ipsec_bypass;
97 extern lck_mtx_t *sadb_mutex;
98 #endif
99
100 extern u_long route_generation;
101 struct inpcbhead ripcb;
102 struct inpcbinfo ripcbinfo;
103
104 /* control hooks for ipfw and dummynet */
105 ip_fw_ctl_t *ip_fw_ctl_ptr;
106 #if DUMMYNET
107 ip_dn_ctl_t *ip_dn_ctl_ptr;
108 #endif /* DUMMYNET */
109
110 /*
111 * Nominal space allocated to a raw ip socket.
112 */
113 #define RIPSNDQ 8192
114 #define RIPRCVQ 8192
115
116 /*
117 * Raw interface to IP protocol.
118 */
119
120 /*
121 * Initialize raw connection block q.
122 */
123 void
124 rip_init()
125 {
126 struct inpcbinfo *pcbinfo;
127
128 LIST_INIT(&ripcb);
129 ripcbinfo.listhead = &ripcb;
130 /*
131 * XXX We don't use the hash list for raw IP, but it's easier
132 * to allocate a one entry hash list than it is to check all
133 * over the place for hashbase == NULL.
134 */
135 ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask);
136 ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask);
137
138 ripcbinfo.ipi_zone = (void *) zinit(sizeof(struct inpcb),
139 (4096 * sizeof(struct inpcb)),
140 4096, "ripzone");
141
142 pcbinfo = &ripcbinfo;
143 /*
144 * allocate lock group attribute and group for udp pcb mutexes
145 */
146 pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init();
147
148 pcbinfo->mtx_grp = lck_grp_alloc_init("ripcb", pcbinfo->mtx_grp_attr);
149
150 /*
151 * allocate the lock attribute for udp pcb mutexes
152 */
153 pcbinfo->mtx_attr = lck_attr_alloc_init();
154
155 if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL)
156 return; /* pretty much dead if this fails... */
157
158 }
159
160 static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET };
161 /*
162 * Setup generic address and protocol structures
163 * for raw_input routine, then pass them along with
164 * mbuf chain.
165 */
166 void
167 rip_input(m, iphlen)
168 struct mbuf *m;
169 int iphlen;
170 {
171 register struct ip *ip = mtod(m, struct ip *);
172 register struct inpcb *inp;
173 struct inpcb *last = 0;
174 struct mbuf *opts = 0;
175 int skipit;
176
177 ripsrc.sin_addr = ip->ip_src;
178 lck_rw_lock_shared(ripcbinfo.mtx);
179 LIST_FOREACH(inp, &ripcb, inp_list) {
180 #if INET6
181 if ((inp->inp_vflag & INP_IPV4) == 0)
182 continue;
183 #endif
184 if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p))
185 continue;
186 if (inp->inp_laddr.s_addr &&
187 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
188 continue;
189 if (inp->inp_faddr.s_addr &&
190 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
191 continue;
192 if (last) {
193 struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
194
195 #if IPSEC
196 /* check AH/ESP integrity. */
197 skipit = 0;
198 if (ipsec_bypass == 0 && n) {
199 lck_mtx_lock(sadb_mutex);
200 if (ipsec4_in_reject_so(n, last->inp_socket)) {
201 m_freem(n);
202 ipsecstat.in_polvio++;
203 /* do not inject data to pcb */
204 skipit = 1;
205 }
206 lck_mtx_unlock(sadb_mutex);
207 }
208 #endif /*IPSEC*/
209 if (n && skipit == 0) {
210 int error = 0;
211 if (last->inp_flags & INP_CONTROLOPTS ||
212 last->inp_socket->so_options & SO_TIMESTAMP)
213 ip_savecontrol(last, &opts, ip, n);
214 if (last->inp_flags & INP_STRIPHDR) {
215 n->m_len -= iphlen;
216 n->m_pkthdr.len -= iphlen;
217 n->m_data += iphlen;
218 }
219 // ###LOCK need to lock that socket?
220 if (sbappendaddr(&last->inp_socket->so_rcv,
221 (struct sockaddr *)&ripsrc, n,
222 opts, &error) != 0) {
223 sorwakeup(last->inp_socket);
224 }
225 else {
226 if (error) {
227 /* should notify about lost packet */
228 kprintf("rip_input can't append to socket\n");
229 }
230 }
231 opts = 0;
232 }
233 }
234 last = inp;
235 }
236 lck_rw_done(ripcbinfo.mtx);
237 #if IPSEC
238 /* check AH/ESP integrity. */
239 skipit = 0;
240 if (ipsec_bypass == 0 && last) {
241 lck_mtx_lock(sadb_mutex);
242 if (ipsec4_in_reject_so(m, last->inp_socket)) {
243 m_freem(m);
244 ipsecstat.in_polvio++;
245 ipstat.ips_delivered--;
246 /* do not inject data to pcb */
247 skipit = 1;
248 }
249 lck_mtx_unlock(sadb_mutex);
250 }
251 #endif /*IPSEC*/
252 if (skipit == 0) {
253 if (last) {
254 if (last->inp_flags & INP_CONTROLOPTS ||
255 last->inp_socket->so_options & SO_TIMESTAMP)
256 ip_savecontrol(last, &opts, ip, m);
257 if (last->inp_flags & INP_STRIPHDR) {
258 m->m_len -= iphlen;
259 m->m_pkthdr.len -= iphlen;
260 m->m_data += iphlen;
261 }
262 if (sbappendaddr(&last->inp_socket->so_rcv,
263 (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
264 sorwakeup(last->inp_socket);
265 } else {
266 kprintf("rip_input(2) can't append to socket\n");
267 }
268 } else {
269 m_freem(m);
270 ipstat.ips_noproto++;
271 ipstat.ips_delivered--;
272 }
273 }
274 }
275
276 /*
277 * Generate IP header and pass packet to ip_output.
278 * Tack on options user may have setup with control call.
279 */
280 int
281 rip_output(m, so, dst)
282 register struct mbuf *m;
283 struct socket *so;
284 u_long dst;
285 {
286 register struct ip *ip;
287 register struct inpcb *inp = sotoinpcb(so);
288 int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
289
290 /*
291 * If the user handed us a complete IP packet, use it.
292 * Otherwise, allocate an mbuf for a header and fill it in.
293 */
294 if ((inp->inp_flags & INP_HDRINCL) == 0) {
295 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
296 m_freem(m);
297 return(EMSGSIZE);
298 }
299 M_PREPEND(m, sizeof(struct ip), M_WAIT);
300 ip = mtod(m, struct ip *);
301 ip->ip_tos = inp->inp_ip_tos;
302 ip->ip_off = 0;
303 ip->ip_p = inp->inp_ip_p;
304 ip->ip_len = m->m_pkthdr.len;
305 ip->ip_src = inp->inp_laddr;
306 ip->ip_dst.s_addr = dst;
307 ip->ip_ttl = inp->inp_ip_ttl;
308 } else {
309 if (m->m_pkthdr.len > IP_MAXPACKET) {
310 m_freem(m);
311 return(EMSGSIZE);
312 }
313 ip = mtod(m, struct ip *);
314 /* don't allow both user specified and setsockopt options,
315 and don't allow packet length sizes that will crash */
316 if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2))
317 && inp->inp_options)
318 || (ip->ip_len > m->m_pkthdr.len)
319 || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
320 m_freem(m);
321 return EINVAL;
322 }
323 if (ip->ip_id == 0)
324 #if RANDOM_IP_ID
325 ip->ip_id = ip_randomid();
326 #else
327 ip->ip_id = htons(ip_id++);
328 #endif
329 /* XXX prevent ip_output from overwriting header fields */
330 flags |= IP_RAWOUTPUT;
331 ipstat.ips_rawout++;
332 }
333
334 #if IPSEC
335 if (ipsec_bypass == 0 && ipsec_setsocket(m, so) != 0) {
336 m_freem(m);
337 return ENOBUFS;
338 }
339 #endif /*IPSEC*/
340
341 if (inp->inp_route.ro_rt && inp->inp_route.ro_rt->generation_id != route_generation) {
342 rtfree(inp->inp_route.ro_rt);
343 inp->inp_route.ro_rt = (struct rtentry *)0;
344 }
345
346 return (ip_output_list(m, 0, inp->inp_options, &inp->inp_route, flags,
347 inp->inp_moptions));
348 }
349
350 extern int
351 load_ipfw()
352 {
353 kern_return_t err;
354
355 ipfw_init();
356
357 #if DUMMYNET
358 if (!DUMMYNET_LOADED)
359 ip_dn_init();
360 #endif /* DUMMYNET */
361 err = 0;
362
363 return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
364 }
365
366 /*
367 * Raw IP socket option processing.
368 */
369 int
370 rip_ctloutput(so, sopt)
371 struct socket *so;
372 struct sockopt *sopt;
373 {
374 struct inpcb *inp = sotoinpcb(so);
375 int error, optval;
376
377 if (sopt->sopt_level != IPPROTO_IP)
378 return (EINVAL);
379
380 error = 0;
381
382 switch (sopt->sopt_dir) {
383 case SOPT_GET:
384 switch (sopt->sopt_name) {
385 case IP_HDRINCL:
386 optval = inp->inp_flags & INP_HDRINCL;
387 error = sooptcopyout(sopt, &optval, sizeof optval);
388 break;
389
390 case IP_STRIPHDR:
391 optval = inp->inp_flags & INP_STRIPHDR;
392 error = sooptcopyout(sopt, &optval, sizeof optval);
393 break;
394
395 case IP_FW_ADD:
396 case IP_FW_GET:
397 case IP_OLD_FW_ADD:
398 case IP_OLD_FW_GET:
399 if (ip_fw_ctl_ptr == 0)
400 error = load_ipfw();
401 if (ip_fw_ctl_ptr && error == 0)
402 error = ip_fw_ctl_ptr(sopt);
403 else
404 error = ENOPROTOOPT;
405 break;
406
407 #if DUMMYNET
408 case IP_DUMMYNET_GET:
409 if (DUMMYNET_LOADED)
410 error = ip_dn_ctl_ptr(sopt);
411 else
412 error = ENOPROTOOPT;
413 break ;
414 #endif /* DUMMYNET */
415
416 case MRT_INIT:
417 case MRT_DONE:
418 case MRT_ADD_VIF:
419 case MRT_DEL_VIF:
420 case MRT_ADD_MFC:
421 case MRT_DEL_MFC:
422 case MRT_VERSION:
423 case MRT_ASSERT:
424 error = ip_mrouter_get(so, sopt);
425 break;
426
427 default:
428 error = ip_ctloutput(so, sopt);
429 break;
430 }
431 break;
432
433 case SOPT_SET:
434 switch (sopt->sopt_name) {
435 case IP_HDRINCL:
436 error = sooptcopyin(sopt, &optval, sizeof optval,
437 sizeof optval);
438 if (error)
439 break;
440 if (optval)
441 inp->inp_flags |= INP_HDRINCL;
442 else
443 inp->inp_flags &= ~INP_HDRINCL;
444 break;
445
446 case IP_STRIPHDR:
447 error = sooptcopyin(sopt, &optval, sizeof optval,
448 sizeof optval);
449 if (error)
450 break;
451 if (optval)
452 inp->inp_flags |= INP_STRIPHDR;
453 else
454 inp->inp_flags &= ~INP_STRIPHDR;
455 break;
456
457
458 case IP_FW_ADD:
459 case IP_FW_DEL:
460 case IP_FW_FLUSH:
461 case IP_FW_ZERO:
462 case IP_FW_RESETLOG:
463 case IP_OLD_FW_ADD:
464 case IP_OLD_FW_DEL:
465 case IP_OLD_FW_FLUSH:
466 case IP_OLD_FW_ZERO:
467 case IP_OLD_FW_RESETLOG:
468 if (ip_fw_ctl_ptr == 0)
469 error = load_ipfw();
470 if (ip_fw_ctl_ptr && error == 0)
471 error = ip_fw_ctl_ptr(sopt);
472 else
473 error = ENOPROTOOPT;
474 break;
475
476 #if DUMMYNET
477 case IP_DUMMYNET_CONFIGURE:
478 case IP_DUMMYNET_DEL:
479 case IP_DUMMYNET_FLUSH:
480 if (DUMMYNET_LOADED)
481 error = ip_dn_ctl_ptr(sopt);
482 else
483 error = ENOPROTOOPT ;
484 break ;
485 #endif
486
487 case IP_RSVP_ON:
488 error = ip_rsvp_init(so);
489 break;
490
491 case IP_RSVP_OFF:
492 error = ip_rsvp_done();
493 break;
494
495 /* XXX - should be combined */
496 case IP_RSVP_VIF_ON:
497 error = ip_rsvp_vif_init(so, sopt);
498 break;
499
500 case IP_RSVP_VIF_OFF:
501 error = ip_rsvp_vif_done(so, sopt);
502 break;
503
504 case MRT_INIT:
505 case MRT_DONE:
506 case MRT_ADD_VIF:
507 case MRT_DEL_VIF:
508 case MRT_ADD_MFC:
509 case MRT_DEL_MFC:
510 case MRT_VERSION:
511 case MRT_ASSERT:
512 error = ip_mrouter_set(so, sopt);
513 break;
514
515 default:
516 error = ip_ctloutput(so, sopt);
517 break;
518 }
519 break;
520 }
521
522 return (error);
523 }
524
525 /*
526 * This function exists solely to receive the PRC_IFDOWN messages which
527 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
528 * and calls in_ifadown() to remove all routes corresponding to that address.
529 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
530 * interface routes.
531 */
532 void
533 rip_ctlinput(cmd, sa, vip)
534 int cmd;
535 struct sockaddr *sa;
536 void *vip;
537 {
538 struct in_ifaddr *ia;
539 struct ifnet *ifp;
540 int err;
541 int flags;
542
543 switch (cmd) {
544 case PRC_IFDOWN:
545 lck_mtx_lock(rt_mtx);
546 for (ia = in_ifaddrhead.tqh_first; ia;
547 ia = ia->ia_link.tqe_next) {
548 if (ia->ia_ifa.ifa_addr == sa
549 && (ia->ia_flags & IFA_ROUTE)) {
550 /*
551 * in_ifscrub kills the interface route.
552 */
553 in_ifscrub(ia->ia_ifp, ia, 1);
554 /*
555 * in_ifadown gets rid of all the rest of
556 * the routes. This is not quite the right
557 * thing to do, but at least if we are running
558 * a routing process they will come back.
559 */
560 in_ifadown(&ia->ia_ifa, 1);
561 break;
562 }
563 }
564 lck_mtx_unlock(rt_mtx);
565 break;
566
567 case PRC_IFUP:
568 lck_mtx_lock(rt_mtx);
569 for (ia = in_ifaddrhead.tqh_first; ia;
570 ia = ia->ia_link.tqe_next) {
571 if (ia->ia_ifa.ifa_addr == sa)
572 break;
573 }
574 if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) {
575 lck_mtx_unlock(rt_mtx);
576 return;
577 }
578 flags = RTF_UP;
579 ifp = ia->ia_ifa.ifa_ifp;
580
581 if ((ifp->if_flags & IFF_LOOPBACK)
582 || (ifp->if_flags & IFF_POINTOPOINT))
583 flags |= RTF_HOST;
584
585 err = rtinit_locked(&ia->ia_ifa, RTM_ADD, flags);
586 lck_mtx_unlock(rt_mtx);
587 if (err == 0)
588 ia->ia_flags |= IFA_ROUTE;
589 break;
590 }
591 }
592
593 u_long rip_sendspace = RIPSNDQ;
594 u_long rip_recvspace = RIPRCVQ;
595
596 SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
597 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
598 SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
599 &rip_recvspace, 0, "Maximum incoming raw IP datagram size");
600
601 static int
602 rip_attach(struct socket *so, int proto, struct proc *p)
603 {
604 struct inpcb *inp;
605 int error, s;
606
607 inp = sotoinpcb(so);
608 if (inp)
609 panic("rip_attach");
610 #if __APPLE__
611 if ((so->so_state & SS_PRIV) == 0)
612 return (EPERM);
613 #else
614 if (p && (error = suser(p)) != 0)
615 return error;
616 #endif
617
618 error = soreserve(so, rip_sendspace, rip_recvspace);
619 if (error)
620 return error;
621 s = splnet();
622 error = in_pcballoc(so, &ripcbinfo, p);
623 splx(s);
624 if (error)
625 return error;
626 inp = (struct inpcb *)so->so_pcb;
627 inp->inp_vflag |= INP_IPV4;
628 inp->inp_ip_p = proto;
629 inp->inp_ip_ttl = ip_defttl;
630 return 0;
631 }
632
633 __private_extern__ int
634 rip_detach(struct socket *so)
635 {
636 struct inpcb *inp;
637
638 inp = sotoinpcb(so);
639 if (inp == 0)
640 panic("rip_detach");
641 if (so == ip_mrouter)
642 ip_mrouter_done();
643 ip_rsvp_force_done(so);
644 if (so == ip_rsvpd)
645 ip_rsvp_done();
646 in_pcbdetach(inp);
647 return 0;
648 }
649
650 __private_extern__ int
651 rip_abort(struct socket *so)
652 {
653 soisdisconnected(so);
654 return rip_detach(so);
655 }
656
657 __private_extern__ int
658 rip_disconnect(struct socket *so)
659 {
660 if ((so->so_state & SS_ISCONNECTED) == 0)
661 return ENOTCONN;
662 return rip_abort(so);
663 }
664
665 __private_extern__ int
666 rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
667 {
668 struct inpcb *inp = sotoinpcb(so);
669 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
670 struct ifaddr *ifa = NULL;
671
672 if (nam->sa_len != sizeof(*addr))
673 return EINVAL;
674
675 if (TAILQ_EMPTY(&ifnet_head) || ((addr->sin_family != AF_INET) &&
676 (addr->sin_family != AF_IMPLINK)) ||
677 (addr->sin_addr.s_addr &&
678 (ifa = ifa_ifwithaddr((struct sockaddr *)addr)) == 0)) {
679 return EADDRNOTAVAIL;
680 }
681 else if (ifa) {
682 ifafree(ifa);
683 ifa = NULL;
684 }
685 inp->inp_laddr = addr->sin_addr;
686 return 0;
687 }
688
689 __private_extern__ int
690 rip_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
691 {
692 struct inpcb *inp = sotoinpcb(so);
693 struct sockaddr_in *addr = (struct sockaddr_in *)nam;
694
695 if (nam->sa_len != sizeof(*addr))
696 return EINVAL;
697 if (TAILQ_EMPTY(&ifnet_head))
698 return EADDRNOTAVAIL;
699 if ((addr->sin_family != AF_INET) &&
700 (addr->sin_family != AF_IMPLINK))
701 return EAFNOSUPPORT;
702 inp->inp_faddr = addr->sin_addr;
703 soisconnected(so);
704 return 0;
705 }
706
707 __private_extern__ int
708 rip_shutdown(struct socket *so)
709 {
710 socantsendmore(so);
711 return 0;
712 }
713
714 __private_extern__ int
715 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
716 struct mbuf *control, struct proc *p)
717 {
718 struct inpcb *inp = sotoinpcb(so);
719 register u_long dst;
720
721 if (so->so_state & SS_ISCONNECTED) {
722 if (nam) {
723 m_freem(m);
724 return EISCONN;
725 }
726 dst = inp->inp_faddr.s_addr;
727 } else {
728 if (nam == NULL) {
729 m_freem(m);
730 return ENOTCONN;
731 }
732 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
733 }
734 return rip_output(m, so, dst);
735 }
736
737 /* note: rip_unlock is called from different protos instead of the generic socket_unlock,
738 * it will handle the socket dealloc on last reference
739 * */
740 int
741 rip_unlock(struct socket *so, int refcount, int debug)
742 {
743 int lr_saved;
744 struct inpcb *inp = sotoinpcb(so);
745
746 if (debug == 0)
747 lr_saved = (unsigned int) __builtin_return_address(0);
748 else lr_saved = debug;
749
750 if (refcount) {
751 if (so->so_usecount <= 0)
752 panic("rip_unlock: bad refoucnt so=%x val=%x\n", so, so->so_usecount);
753 so->so_usecount--;
754 if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
755 /* cleanup after last reference */
756 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
757 lck_rw_lock_exclusive(ripcbinfo.mtx);
758 in_pcbdispose(inp);
759 lck_rw_done(ripcbinfo.mtx);
760 return(0);
761 }
762 }
763 so->unlock_lr[so->next_unlock_lr] = (u_int *)lr_saved;
764 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
765 lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
766 return(0);
767 }
768
769 static int
770 rip_pcblist SYSCTL_HANDLER_ARGS
771 {
772 int error, i, n, s;
773 struct inpcb *inp, **inp_list;
774 inp_gen_t gencnt;
775 struct xinpgen xig;
776
777 /*
778 * The process of preparing the TCB list is too time-consuming and
779 * resource-intensive to repeat twice on every request.
780 */
781 lck_rw_lock_exclusive(ripcbinfo.mtx);
782 if (req->oldptr == USER_ADDR_NULL) {
783 n = ripcbinfo.ipi_count;
784 req->oldidx = 2 * (sizeof xig)
785 + (n + n/8) * sizeof(struct xinpcb);
786 lck_rw_done(ripcbinfo.mtx);
787 return 0;
788 }
789
790 if (req->newptr != USER_ADDR_NULL) {
791 lck_rw_done(ripcbinfo.mtx);
792 return EPERM;
793 }
794
795 /*
796 * OK, now we're committed to doing something.
797 */
798 gencnt = ripcbinfo.ipi_gencnt;
799 n = ripcbinfo.ipi_count;
800
801 bzero(&xig, sizeof(xig));
802 xig.xig_len = sizeof xig;
803 xig.xig_count = n;
804 xig.xig_gen = gencnt;
805 xig.xig_sogen = so_gencnt;
806 error = SYSCTL_OUT(req, &xig, sizeof xig);
807 if (error) {
808 lck_rw_done(ripcbinfo.mtx);
809 return error;
810 }
811 /*
812 * We are done if there is no pcb
813 */
814 if (n == 0) {
815 lck_rw_done(ripcbinfo.mtx);
816 return 0;
817 }
818
819 inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
820 if (inp_list == 0) {
821 lck_rw_done(ripcbinfo.mtx);
822 return ENOMEM;
823 }
824
825 for (inp = ripcbinfo.listhead->lh_first, i = 0; inp && i < n;
826 inp = inp->inp_list.le_next) {
827 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
828 inp_list[i++] = inp;
829 }
830 n = i;
831
832 error = 0;
833 for (i = 0; i < n; i++) {
834 inp = inp_list[i];
835 if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
836 struct xinpcb xi;
837
838 bzero(&xi, sizeof(xi));
839 xi.xi_len = sizeof xi;
840 /* XXX should avoid extra copy */
841 inpcb_to_compat(inp, &xi.xi_inp);
842 if (inp->inp_socket)
843 sotoxsocket(inp->inp_socket, &xi.xi_socket);
844 error = SYSCTL_OUT(req, &xi, sizeof xi);
845 }
846 }
847 if (!error) {
848 /*
849 * Give the user an updated idea of our state.
850 * If the generation differs from what we told
851 * her before, she knows that something happened
852 * while we were processing this request, and it
853 * might be necessary to retry.
854 */
855 bzero(&xig, sizeof(xig));
856 xig.xig_len = sizeof xig;
857 xig.xig_gen = ripcbinfo.ipi_gencnt;
858 xig.xig_sogen = so_gencnt;
859 xig.xig_count = ripcbinfo.ipi_count;
860 error = SYSCTL_OUT(req, &xig, sizeof xig);
861 }
862 FREE(inp_list, M_TEMP);
863 lck_rw_done(ripcbinfo.mtx);
864 return error;
865 }
866
867 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
868 rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
869
870 struct pr_usrreqs rip_usrreqs = {
871 rip_abort, pru_accept_notsupp, rip_attach, rip_bind, rip_connect,
872 pru_connect2_notsupp, in_control, rip_detach, rip_disconnect,
873 pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp,
874 pru_rcvoob_notsupp, rip_send, pru_sense_null, rip_shutdown,
875 in_setsockaddr, sosend, soreceive, pru_sopoll_notsupp
876 };