2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1982, 1986, 1988, 1993
24 * The Regents of the University of California. All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
29 * 1. Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * 2. Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 * 3. All advertising materials mentioning features or use of this software
35 * must display the following acknowledgement:
36 * This product includes software developed by the University of
37 * California, Berkeley and its contributors.
38 * 4. Neither the name of the University nor the names of its contributors
39 * may be used to endorse or promote products derived from this software
40 * without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/malloc.h>
63 #include <sys/domain.h>
64 #include <sys/protosw.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/sysctl.h>
70 #include <vm/vm_zone.h>
74 #include <net/route.h>
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip_var.h>
83 #include <netinet/ip_mroute.h>
85 #include <netinet/ip_fw.h>
88 #include <netinet6/ipsec.h>
92 #include <netinet/ip_dummynet.h>
96 extern int ipsec_bypass
;
97 extern lck_mtx_t
*sadb_mutex
;
100 extern u_long route_generation
;
101 struct inpcbhead ripcb
;
102 struct inpcbinfo ripcbinfo
;
104 /* control hooks for ipfw and dummynet */
105 ip_fw_ctl_t
*ip_fw_ctl_ptr
;
107 ip_dn_ctl_t
*ip_dn_ctl_ptr
;
108 #endif /* DUMMYNET */
111 * Nominal space allocated to a raw ip socket.
117 * Raw interface to IP protocol.
121 * Initialize raw connection block q.
126 struct inpcbinfo
*pcbinfo
;
129 ripcbinfo
.listhead
= &ripcb
;
131 * XXX We don't use the hash list for raw IP, but it's easier
132 * to allocate a one entry hash list than it is to check all
133 * over the place for hashbase == NULL.
135 ripcbinfo
.hashbase
= hashinit(1, M_PCB
, &ripcbinfo
.hashmask
);
136 ripcbinfo
.porthashbase
= hashinit(1, M_PCB
, &ripcbinfo
.porthashmask
);
138 ripcbinfo
.ipi_zone
= (void *) zinit(sizeof(struct inpcb
),
139 (4096 * sizeof(struct inpcb
)),
142 pcbinfo
= &ripcbinfo
;
144 * allocate lock group attribute and group for udp pcb mutexes
146 pcbinfo
->mtx_grp_attr
= lck_grp_attr_alloc_init();
147 lck_grp_attr_setdefault(pcbinfo
->mtx_grp_attr
);
149 pcbinfo
->mtx_grp
= lck_grp_alloc_init("ripcb", pcbinfo
->mtx_grp_attr
);
152 * allocate the lock attribute for udp pcb mutexes
154 pcbinfo
->mtx_attr
= lck_attr_alloc_init();
155 lck_attr_setdefault(pcbinfo
->mtx_attr
);
157 if ((pcbinfo
->mtx
= lck_rw_alloc_init(pcbinfo
->mtx_grp
, pcbinfo
->mtx_attr
)) == NULL
)
158 return; /* pretty much dead if this fails... */
162 static struct sockaddr_in ripsrc
= { sizeof(ripsrc
), AF_INET
};
164 * Setup generic address and protocol structures
165 * for raw_input routine, then pass them along with
173 register struct ip
*ip
= mtod(m
, struct ip
*);
174 register struct inpcb
*inp
;
175 struct inpcb
*last
= 0;
176 struct mbuf
*opts
= 0;
179 ripsrc
.sin_addr
= ip
->ip_src
;
180 lck_rw_lock_shared(ripcbinfo
.mtx
);
181 LIST_FOREACH(inp
, &ripcb
, inp_list
) {
183 if ((inp
->inp_vflag
& INP_IPV4
) == 0)
186 if (inp
->inp_ip_p
&& (inp
->inp_ip_p
!= ip
->ip_p
))
188 if (inp
->inp_laddr
.s_addr
&&
189 inp
->inp_laddr
.s_addr
!= ip
->ip_dst
.s_addr
)
191 if (inp
->inp_faddr
.s_addr
&&
192 inp
->inp_faddr
.s_addr
!= ip
->ip_src
.s_addr
)
195 struct mbuf
*n
= m_copy(m
, 0, (int)M_COPYALL
);
198 /* check AH/ESP integrity. */
200 if (ipsec_bypass
== 0 && n
) {
201 lck_mtx_lock(sadb_mutex
);
202 if (ipsec4_in_reject_so(n
, last
->inp_socket
)) {
204 ipsecstat
.in_polvio
++;
205 /* do not inject data to pcb */
208 lck_mtx_unlock(sadb_mutex
);
211 if (n
&& skipit
== 0) {
213 if (last
->inp_flags
& INP_CONTROLOPTS
||
214 last
->inp_socket
->so_options
& SO_TIMESTAMP
)
215 ip_savecontrol(last
, &opts
, ip
, n
);
216 if (last
->inp_flags
& INP_STRIPHDR
) {
218 n
->m_pkthdr
.len
-= iphlen
;
221 // ###LOCK need to lock that socket?
222 if (sbappendaddr(&last
->inp_socket
->so_rcv
,
223 (struct sockaddr
*)&ripsrc
, n
,
224 opts
, &error
) != 0) {
225 sorwakeup(last
->inp_socket
);
229 /* should notify about lost packet */
230 kprintf("rip_input can't append to socket\n");
238 lck_rw_done(ripcbinfo
.mtx
);
240 /* check AH/ESP integrity. */
242 if (ipsec_bypass
== 0 && last
) {
243 lck_mtx_lock(sadb_mutex
);
244 if (ipsec4_in_reject_so(m
, last
->inp_socket
)) {
246 ipsecstat
.in_polvio
++;
247 ipstat
.ips_delivered
--;
248 /* do not inject data to pcb */
251 lck_mtx_unlock(sadb_mutex
);
256 if (last
->inp_flags
& INP_CONTROLOPTS
||
257 last
->inp_socket
->so_options
& SO_TIMESTAMP
)
258 ip_savecontrol(last
, &opts
, ip
, m
);
259 if (last
->inp_flags
& INP_STRIPHDR
) {
261 m
->m_pkthdr
.len
-= iphlen
;
264 if (sbappendaddr(&last
->inp_socket
->so_rcv
,
265 (struct sockaddr
*)&ripsrc
, m
, opts
, NULL
) != 0) {
266 sorwakeup(last
->inp_socket
);
268 kprintf("rip_input(2) can't append to socket\n");
272 ipstat
.ips_noproto
++;
273 ipstat
.ips_delivered
--;
279 * Generate IP header and pass packet to ip_output.
280 * Tack on options user may have setup with control call.
283 rip_output(m
, so
, dst
)
284 register struct mbuf
*m
;
288 register struct ip
*ip
;
289 register struct inpcb
*inp
= sotoinpcb(so
);
290 int flags
= (so
->so_options
& SO_DONTROUTE
) | IP_ALLOWBROADCAST
;
293 * If the user handed us a complete IP packet, use it.
294 * Otherwise, allocate an mbuf for a header and fill it in.
296 if ((inp
->inp_flags
& INP_HDRINCL
) == 0) {
297 if (m
->m_pkthdr
.len
+ sizeof(struct ip
) > IP_MAXPACKET
) {
301 M_PREPEND(m
, sizeof(struct ip
), M_WAIT
);
302 ip
= mtod(m
, struct ip
*);
303 ip
->ip_tos
= inp
->inp_ip_tos
;
305 ip
->ip_p
= inp
->inp_ip_p
;
306 ip
->ip_len
= m
->m_pkthdr
.len
;
307 ip
->ip_src
= inp
->inp_laddr
;
308 ip
->ip_dst
.s_addr
= dst
;
309 ip
->ip_ttl
= inp
->inp_ip_ttl
;
311 if (m
->m_pkthdr
.len
> IP_MAXPACKET
) {
315 ip
= mtod(m
, struct ip
*);
316 /* don't allow both user specified and setsockopt options,
317 and don't allow packet length sizes that will crash */
318 if (((IP_VHL_HL(ip
->ip_vhl
) != (sizeof (*ip
) >> 2))
320 || (ip
->ip_len
> m
->m_pkthdr
.len
)
321 || (ip
->ip_len
< (IP_VHL_HL(ip
->ip_vhl
) << 2))) {
327 ip
->ip_id
= ip_randomid();
329 ip
->ip_id
= htons(ip_id
++);
331 /* XXX prevent ip_output from overwriting header fields */
332 flags
|= IP_RAWOUTPUT
;
337 if (ipsec_bypass
== 0 && ipsec_setsocket(m
, so
) != 0) {
343 if (inp
->inp_route
.ro_rt
&& inp
->inp_route
.ro_rt
->generation_id
!= route_generation
) {
344 rtfree(inp
->inp_route
.ro_rt
);
345 inp
->inp_route
.ro_rt
= (struct rtentry
*)0;
348 return (ip_output_list(m
, 0, inp
->inp_options
, &inp
->inp_route
, flags
,
360 if (!DUMMYNET_LOADED
)
362 #endif /* DUMMYNET */
365 return err
== 0 && ip_fw_ctl_ptr
== NULL
? -1 : err
;
369 * Raw IP socket option processing.
372 rip_ctloutput(so
, sopt
)
374 struct sockopt
*sopt
;
376 struct inpcb
*inp
= sotoinpcb(so
);
379 if (sopt
->sopt_level
!= IPPROTO_IP
)
384 switch (sopt
->sopt_dir
) {
386 switch (sopt
->sopt_name
) {
388 optval
= inp
->inp_flags
& INP_HDRINCL
;
389 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
393 optval
= inp
->inp_flags
& INP_STRIPHDR
;
394 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
401 if (ip_fw_ctl_ptr
== 0)
403 if (ip_fw_ctl_ptr
&& error
== 0)
404 error
= ip_fw_ctl_ptr(sopt
);
410 case IP_DUMMYNET_GET
:
412 error
= ip_dn_ctl_ptr(sopt
);
416 #endif /* DUMMYNET */
426 error
= ip_mrouter_get(so
, sopt
);
430 error
= ip_ctloutput(so
, sopt
);
436 switch (sopt
->sopt_name
) {
438 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
443 inp
->inp_flags
|= INP_HDRINCL
;
445 inp
->inp_flags
&= ~INP_HDRINCL
;
449 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
454 inp
->inp_flags
|= INP_STRIPHDR
;
456 inp
->inp_flags
&= ~INP_STRIPHDR
;
467 case IP_OLD_FW_FLUSH
:
469 case IP_OLD_FW_RESETLOG
:
470 if (ip_fw_ctl_ptr
== 0)
472 if (ip_fw_ctl_ptr
&& error
== 0)
473 error
= ip_fw_ctl_ptr(sopt
);
479 case IP_DUMMYNET_CONFIGURE
:
480 case IP_DUMMYNET_DEL
:
481 case IP_DUMMYNET_FLUSH
:
483 error
= ip_dn_ctl_ptr(sopt
);
485 error
= ENOPROTOOPT
;
490 error
= ip_rsvp_init(so
);
494 error
= ip_rsvp_done();
497 /* XXX - should be combined */
499 error
= ip_rsvp_vif_init(so
, sopt
);
502 case IP_RSVP_VIF_OFF
:
503 error
= ip_rsvp_vif_done(so
, sopt
);
514 error
= ip_mrouter_set(so
, sopt
);
518 error
= ip_ctloutput(so
, sopt
);
528 * This function exists solely to receive the PRC_IFDOWN messages which
529 * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
530 * and calls in_ifadown() to remove all routes corresponding to that address.
531 * It also receives the PRC_IFUP messages from if_up() and reinstalls the
535 rip_ctlinput(cmd
, sa
, vip
)
540 struct in_ifaddr
*ia
;
547 lck_mtx_lock(rt_mtx
);
548 for (ia
= in_ifaddrhead
.tqh_first
; ia
;
549 ia
= ia
->ia_link
.tqe_next
) {
550 if (ia
->ia_ifa
.ifa_addr
== sa
551 && (ia
->ia_flags
& IFA_ROUTE
)) {
553 * in_ifscrub kills the interface route.
555 in_ifscrub(ia
->ia_ifp
, ia
, 1);
557 * in_ifadown gets rid of all the rest of
558 * the routes. This is not quite the right
559 * thing to do, but at least if we are running
560 * a routing process they will come back.
562 in_ifadown(&ia
->ia_ifa
, 1);
566 lck_mtx_unlock(rt_mtx
);
570 lck_mtx_lock(rt_mtx
);
571 for (ia
= in_ifaddrhead
.tqh_first
; ia
;
572 ia
= ia
->ia_link
.tqe_next
) {
573 if (ia
->ia_ifa
.ifa_addr
== sa
)
576 if (ia
== 0 || (ia
->ia_flags
& IFA_ROUTE
)) {
577 lck_mtx_unlock(rt_mtx
);
581 ifp
= ia
->ia_ifa
.ifa_ifp
;
583 if ((ifp
->if_flags
& IFF_LOOPBACK
)
584 || (ifp
->if_flags
& IFF_POINTOPOINT
))
587 err
= rtinit_locked(&ia
->ia_ifa
, RTM_ADD
, flags
);
588 lck_mtx_unlock(rt_mtx
);
590 ia
->ia_flags
|= IFA_ROUTE
;
595 u_long rip_sendspace
= RIPSNDQ
;
596 u_long rip_recvspace
= RIPRCVQ
;
598 SYSCTL_INT(_net_inet_raw
, OID_AUTO
, maxdgram
, CTLFLAG_RW
,
599 &rip_sendspace
, 0, "Maximum outgoing raw IP datagram size");
600 SYSCTL_INT(_net_inet_raw
, OID_AUTO
, recvspace
, CTLFLAG_RW
,
601 &rip_recvspace
, 0, "Maximum incoming raw IP datagram size");
604 rip_attach(struct socket
*so
, int proto
, struct proc
*p
)
613 if ((so
->so_state
& SS_PRIV
) == 0)
616 if (p
&& (error
= suser(p
)) != 0)
620 error
= soreserve(so
, rip_sendspace
, rip_recvspace
);
624 error
= in_pcballoc(so
, &ripcbinfo
, p
);
628 inp
= (struct inpcb
*)so
->so_pcb
;
629 inp
->inp_vflag
|= INP_IPV4
;
630 inp
->inp_ip_p
= proto
;
631 inp
->inp_ip_ttl
= ip_defttl
;
635 __private_extern__
int
636 rip_detach(struct socket
*so
)
643 if (so
== ip_mrouter
)
645 ip_rsvp_force_done(so
);
652 __private_extern__
int
653 rip_abort(struct socket
*so
)
655 soisdisconnected(so
);
656 return rip_detach(so
);
659 __private_extern__
int
660 rip_disconnect(struct socket
*so
)
662 if ((so
->so_state
& SS_ISCONNECTED
) == 0)
664 return rip_abort(so
);
667 __private_extern__
int
668 rip_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
670 struct inpcb
*inp
= sotoinpcb(so
);
671 struct sockaddr_in
*addr
= (struct sockaddr_in
*)nam
;
672 struct ifaddr
*ifa
= NULL
;
674 if (nam
->sa_len
!= sizeof(*addr
))
677 if (TAILQ_EMPTY(&ifnet_head
) || ((addr
->sin_family
!= AF_INET
) &&
678 (addr
->sin_family
!= AF_IMPLINK
)) ||
679 (addr
->sin_addr
.s_addr
&&
680 (ifa
= ifa_ifwithaddr((struct sockaddr
*)addr
)) == 0)) {
681 return EADDRNOTAVAIL
;
687 inp
->inp_laddr
= addr
->sin_addr
;
691 __private_extern__
int
692 rip_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
694 struct inpcb
*inp
= sotoinpcb(so
);
695 struct sockaddr_in
*addr
= (struct sockaddr_in
*)nam
;
697 if (nam
->sa_len
!= sizeof(*addr
))
699 if (TAILQ_EMPTY(&ifnet_head
))
700 return EADDRNOTAVAIL
;
701 if ((addr
->sin_family
!= AF_INET
) &&
702 (addr
->sin_family
!= AF_IMPLINK
))
704 inp
->inp_faddr
= addr
->sin_addr
;
709 __private_extern__
int
710 rip_shutdown(struct socket
*so
)
716 __private_extern__
int
717 rip_send(struct socket
*so
, int flags
, struct mbuf
*m
, struct sockaddr
*nam
,
718 struct mbuf
*control
, struct proc
*p
)
720 struct inpcb
*inp
= sotoinpcb(so
);
723 if (so
->so_state
& SS_ISCONNECTED
) {
728 dst
= inp
->inp_faddr
.s_addr
;
734 dst
= ((struct sockaddr_in
*)nam
)->sin_addr
.s_addr
;
736 return rip_output(m
, so
, dst
);
740 rip_unlock(struct socket
*so
, int refcount
, int debug
)
743 struct inpcb
*inp
= sotoinpcb(so
);
746 __asm__
volatile("mflr %0" : "=r" (lr_saved
));
748 else lr_saved
= debug
;
751 if (so
->so_usecount
<= 0)
752 panic("rip_unlock: bad refoucnt so=%x val=%x\n", so
, so
->so_usecount
);
754 if (so
->so_usecount
== 0 && (inp
->inp_wantcnt
== WNT_STOPUSING
)) {
755 lck_mtx_unlock(so
->so_proto
->pr_domain
->dom_mtx
);
756 lck_rw_lock_exclusive(ripcbinfo
.mtx
);
758 lck_rw_done(ripcbinfo
.mtx
);
762 lck_mtx_unlock(so
->so_proto
->pr_domain
->dom_mtx
);
767 rip_pcblist SYSCTL_HANDLER_ARGS
770 struct inpcb
*inp
, **inp_list
;
775 * The process of preparing the TCB list is too time-consuming and
776 * resource-intensive to repeat twice on every request.
778 lck_rw_lock_exclusive(ripcbinfo
.mtx
);
779 if (req
->oldptr
== USER_ADDR_NULL
) {
780 n
= ripcbinfo
.ipi_count
;
781 req
->oldidx
= 2 * (sizeof xig
)
782 + (n
+ n
/8) * sizeof(struct xinpcb
);
783 lck_rw_done(ripcbinfo
.mtx
);
787 if (req
->newptr
!= USER_ADDR_NULL
) {
788 lck_rw_done(ripcbinfo
.mtx
);
793 * OK, now we're committed to doing something.
795 gencnt
= ripcbinfo
.ipi_gencnt
;
796 n
= ripcbinfo
.ipi_count
;
798 xig
.xig_len
= sizeof xig
;
800 xig
.xig_gen
= gencnt
;
801 xig
.xig_sogen
= so_gencnt
;
802 error
= SYSCTL_OUT(req
, &xig
, sizeof xig
);
804 lck_rw_done(ripcbinfo
.mtx
);
808 * We are done if there is no pcb
811 lck_rw_done(ripcbinfo
.mtx
);
815 inp_list
= _MALLOC(n
* sizeof *inp_list
, M_TEMP
, M_WAITOK
);
817 lck_rw_done(ripcbinfo
.mtx
);
821 for (inp
= ripcbinfo
.listhead
->lh_first
, i
= 0; inp
&& i
< n
;
822 inp
= inp
->inp_list
.le_next
) {
823 if (inp
->inp_gencnt
<= gencnt
&& inp
->inp_state
!= INPCB_STATE_DEAD
)
829 for (i
= 0; i
< n
; i
++) {
831 if (inp
->inp_gencnt
<= gencnt
&& inp
->inp_state
!= INPCB_STATE_DEAD
) {
833 xi
.xi_len
= sizeof xi
;
834 /* XXX should avoid extra copy */
835 inpcb_to_compat(inp
, &xi
.xi_inp
);
837 sotoxsocket(inp
->inp_socket
, &xi
.xi_socket
);
838 error
= SYSCTL_OUT(req
, &xi
, sizeof xi
);
843 * Give the user an updated idea of our state.
844 * If the generation differs from what we told
845 * her before, she knows that something happened
846 * while we were processing this request, and it
847 * might be necessary to retry.
849 xig
.xig_gen
= ripcbinfo
.ipi_gencnt
;
850 xig
.xig_sogen
= so_gencnt
;
851 xig
.xig_count
= ripcbinfo
.ipi_count
;
852 error
= SYSCTL_OUT(req
, &xig
, sizeof xig
);
854 FREE(inp_list
, M_TEMP
);
855 lck_rw_done(ripcbinfo
.mtx
);
859 SYSCTL_PROC(_net_inet_raw
, OID_AUTO
/*XXX*/, pcblist
, CTLFLAG_RD
, 0, 0,
860 rip_pcblist
, "S,xinpcb", "List of active raw IP sockets");
862 struct pr_usrreqs rip_usrreqs
= {
863 rip_abort
, pru_accept_notsupp
, rip_attach
, rip_bind
, rip_connect
,
864 pru_connect2_notsupp
, in_control
, rip_detach
, rip_disconnect
,
865 pru_listen_notsupp
, in_setpeeraddr
, pru_rcvd_notsupp
,
866 pru_rcvoob_notsupp
, rip_send
, pru_sense_null
, rip_shutdown
,
867 in_setsockaddr
, sosend
, soreceive
, pru_sopoll_notsupp