2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/param.h> /* for definition of NULL */
24 #include <sys/errno.h>
25 #include <sys/malloc.h>
26 #include <sys/socket.h>
28 #include <sys/systm.h>
31 #include <net/if_var.h>
32 #include <net/route.h>
33 #include <net/kpi_protocol.h>
35 #include <netinet/in_systm.h>
36 #include <netinet/in.h>
37 #include <netinet/in_var.h>
38 #include <netinet6/in6_var.h>
39 #include <netinet/ip.h>
40 #include <netinet/ip6.h>
41 #include <netinet/ip_var.h>
42 #include <netinet6/ip6_var.h>
43 #include <netinet/kpi_ipfilter_var.h>
46 * kipf_lock and kipf_ref protect the linkage of the list of IP filters
47 * An IP filter can be removed only when kipf_ref is zero
48 * If an IP filter cannot be removed because kipf_ref is not null, then
49 * the IP filter is marjed and kipf_delayed_remove is set so that when
50 * kipf_ref eventually goes down to zero, the IP filter is removed
52 static lck_mtx_t
*kipf_lock
= 0;
53 static unsigned long kipf_ref
= 0;
54 static unsigned long kipf_delayed_remove
= 0;
56 __private_extern__
struct ipfilter_list ipv4_filters
= TAILQ_HEAD_INITIALIZER(ipv4_filters
);
57 __private_extern__
struct ipfilter_list ipv6_filters
= TAILQ_HEAD_INITIALIZER(ipv6_filters
);
58 __private_extern__
struct ipfilter_list tbr_filters
= TAILQ_HEAD_INITIALIZER(tbr_filters
);
60 __private_extern__
void
63 lck_mtx_lock(kipf_lock
);
65 lck_mtx_unlock(kipf_lock
);
68 __private_extern__
void
71 lck_mtx_lock(kipf_lock
);
74 panic("ipf_unref: kipf_ref == 0\n");
77 if (kipf_ref
== 0 && kipf_delayed_remove
!= 0) {
78 struct ipfilter
*filter
;
80 while ((filter
= TAILQ_FIRST(&tbr_filters
))) {
81 ipf_detach_func ipf_detach
= filter
->ipf_filter
.ipf_detach
;
82 void* cookie
= filter
->ipf_filter
.cookie
;
84 TAILQ_REMOVE(filter
->ipf_head
, filter
, ipf_link
);
85 TAILQ_REMOVE(&tbr_filters
, filter
, ipf_tbr
);
86 kipf_delayed_remove
--;
89 lck_mtx_unlock(kipf_lock
);
91 lck_mtx_lock(kipf_lock
);
92 /* In case some filter got to run while we released the lock */
98 lck_mtx_unlock(kipf_lock
);
103 const struct ipf_filter
* filter
,
104 ipfilter_t
*filter_ref
,
105 struct ipfilter_list
*head
)
107 struct ipfilter
*new_filter
;
108 if (filter
->name
== NULL
|| (filter
->ipf_input
== NULL
&& filter
->ipf_output
== NULL
))
111 MALLOC(new_filter
, struct ipfilter
*, sizeof(*new_filter
), M_IFADDR
, M_WAITOK
);
112 if (new_filter
== NULL
)
115 lck_mtx_lock(kipf_lock
);
116 new_filter
->ipf_filter
= *filter
;
117 new_filter
->ipf_head
= head
;
121 * Make sure third parties have a chance to filter packets before
122 * SharedIP. Always SharedIP at the end of the list.
124 if (filter
->name
!= NULL
&&
125 strcmp(filter
->name
, "com.apple.nke.SharedIP") == 0) {
126 TAILQ_INSERT_TAIL(head
, new_filter
, ipf_link
);
129 TAILQ_INSERT_HEAD(head
, new_filter
, ipf_link
);
132 lck_mtx_unlock(kipf_lock
);
134 *filter_ref
= (ipfilter_t
)new_filter
;
140 const struct ipf_filter
* filter
,
141 ipfilter_t
*filter_ref
)
143 return ipf_add(filter
, filter_ref
, &ipv4_filters
);
148 const struct ipf_filter
* filter
,
149 ipfilter_t
*filter_ref
)
151 return ipf_add(filter
, filter_ref
, &ipv6_filters
);
156 ipfilter_t filter_ref
)
158 struct ipfilter
*match
= (struct ipfilter
*)filter_ref
;
159 struct ipfilter_list
*head
;
161 if (match
== 0 || (match
->ipf_head
!= &ipv4_filters
&& match
->ipf_head
!= &ipv6_filters
))
164 head
= match
->ipf_head
;
166 lck_mtx_lock(kipf_lock
);
167 TAILQ_FOREACH(match
, head
, ipf_link
) {
168 if (match
== (struct ipfilter
*)filter_ref
) {
169 ipf_detach_func ipf_detach
= match
->ipf_filter
.ipf_detach
;
170 void* cookie
= match
->ipf_filter
.cookie
;
173 * Cannot detach when they are filters running
176 kipf_delayed_remove
++;
177 TAILQ_INSERT_TAIL(&tbr_filters
, match
, ipf_tbr
);
178 match
->ipf_filter
.ipf_input
= 0;
179 match
->ipf_filter
.ipf_output
= 0;
180 lck_mtx_unlock(kipf_lock
);
182 TAILQ_REMOVE(head
, match
, ipf_link
);
183 lck_mtx_unlock(kipf_lock
);
186 FREE(match
, M_IFADDR
);
191 lck_mtx_unlock(kipf_lock
);
201 ipfilter_t filter_ref
)
203 struct mbuf
*m
= (struct mbuf
*)data
;
204 struct m_tag
*mtag
= 0;
205 struct ip
*ip
= mtod(m
, struct ip
*);
209 protocol_family_t proto
;
211 vers
= IP_VHL_V(ip
->ip_vhl
);
225 if (filter_ref
== 0 && m
->m_pkthdr
.rcvif
== 0) {
226 m
->m_pkthdr
.rcvif
= ifunit("lo0");
227 m
->m_pkthdr
.csum_data
= 0;
228 m
->m_pkthdr
.csum_flags
= 0;
230 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
232 ip
->ip_sum
= in_cksum(m
, hlen
);
235 if (filter_ref
!= 0) {
236 mtag
= m_tag_alloc(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_IPFILT
,
237 sizeof (ipfilter_t
), M_NOWAIT
);
242 *(ipfilter_t
*)(mtag
+1) = filter_ref
;
243 m_tag_prepend(m
, mtag
);
246 error
= proto_inject(proto
, data
);
255 ipfilter_t filter_ref
,
256 ipf_pktopts_t options
)
259 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&ro
.ro_dst
;
261 struct mbuf
*m
= (struct mbuf
*)data
;
263 struct m_tag
*mtag
= 0;
264 struct ip_moptions
*imo
= 0, ip_moptions
;
266 /* Make the IP header contiguous in the mbuf */
267 if ((size_t)m
->m_len
< sizeof(struct ip
)) {
268 m
= m_pullup(m
, sizeof(struct ip
));
269 if (m
== NULL
) return ENOMEM
;
271 ip
= (struct ip
*)m_mtod(m
);
273 if (filter_ref
!= 0) {
274 mtag
= m_tag_alloc(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_IPFILT
,
275 sizeof (ipfilter_t
), M_NOWAIT
);
280 *(ipfilter_t
*)(mtag
+1) = filter_ref
;
281 m_tag_prepend(m
, mtag
);
284 if (options
&& (options
->ippo_flags
& IPPOF_MCAST_OPTS
)) {
287 bzero(imo
, sizeof(struct ip6_moptions
));
288 imo
->imo_multicast_ifp
= options
->ippo_mcast_ifnet
;
289 imo
->imo_multicast_ttl
= options
->ippo_mcast_ttl
;
290 imo
->imo_multicast_loop
= options
->ippo_mcast_loop
;
293 /* Fill out a route structure and get a route */
294 bzero(&ro
, sizeof(struct route
));
295 sin
->sin_len
= sizeof(struct sockaddr_in
);
296 sin
->sin_family
= AF_INET
;
298 sin
->sin_addr
= ip
->ip_dst
;
300 if (ro
.ro_rt
== NULL
) {
305 /* Put ip_len and ip_off in host byte order, ip_output expects that */
310 error
= ip_output(m
, NULL
, &ro
, IP_ALLOWBROADCAST
| IP_RAWOUTPUT
, imo
);
312 /* Release the route */
322 ipfilter_t filter_ref
,
323 ipf_pktopts_t options
)
326 struct sockaddr_in6
*sin6
= &ro
.ro_dst
;
328 struct mbuf
*m
= (struct mbuf
*)data
;
330 struct m_tag
*mtag
= 0;
331 struct ip6_moptions
*im6o
= 0, ip6_moptions
;
333 /* Make the IP header contiguous in the mbuf */
334 if ((size_t)m
->m_len
< sizeof(struct ip6_hdr
)) {
335 m
= m_pullup(m
, sizeof(struct ip6_hdr
));
336 if (m
== NULL
) return ENOMEM
;
338 ip6
= (struct ip6_hdr
*)m_mtod(m
);
340 if (filter_ref
!= 0) {
341 mtag
= m_tag_alloc(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_IPFILT
,
342 sizeof (ipfilter_t
), M_NOWAIT
);
347 *(ipfilter_t
*)(mtag
+1) = filter_ref
;
348 m_tag_prepend(m
, mtag
);
351 if (options
&& (options
->ippo_flags
& IPPOF_MCAST_OPTS
)) {
352 im6o
= &ip6_moptions
;
354 bzero(im6o
, sizeof(struct ip6_moptions
));
355 im6o
->im6o_multicast_ifp
= options
->ippo_mcast_ifnet
;
356 im6o
->im6o_multicast_hlim
= options
->ippo_mcast_ttl
;
357 im6o
->im6o_multicast_loop
= options
->ippo_mcast_loop
;
361 /* Fill out a route structure and get a route */
362 bzero(&ro
, sizeof(struct route_in6
));
363 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
364 sin6
->sin6_family
= AF_INET6
;
365 sin6
->sin6_addr
= ip6
->ip6_dst
;
367 /* This is breaks loopback multicast! */
368 /* The scope ID should already at s6_addr16[1] */
369 if (IN6_IS_SCOPE_LINKLOCAL(&ip6
->ip6_dst
)) {
370 /* Hack, pull the scope_id out of the dest addr */
371 sin6
->sin6_scope_id
= ntohs(ip6
->ip6_dst
.s6_addr16
[1]);
372 ip6
->ip6_dst
.s6_addr16
[1] = 0;
374 sin6
->sin6_scope_id
= 0;
376 rtalloc((struct route
*)&ro
);
377 if (ro
.ro_rt
== NULL
) {
383 error
= ip6_output(m
, NULL
, &ro
, 0, im6o
, NULL
, 0);
385 /* Release the route */
395 ipfilter_t filter_ref
,
396 ipf_pktopts_t options
)
398 struct mbuf
*m
= (struct mbuf
*)data
;
402 /* Make one byte of the header contiguous in the mbuf */
409 vers
= (*(u_int8_t
*)m_mtod(m
)) >> 4;
413 error
= ipf_injectv4_out(data
, filter_ref
, options
);
416 error
= ipf_injectv6_out(data
, filter_ref
, options
);
428 __private_extern__ ipfilter_t
429 ipf_get_inject_filter(struct mbuf
*m
)
431 ipfilter_t filter_ref
= 0;
434 mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_IPFILT
, NULL
);
436 filter_ref
= *(ipfilter_t
*)(mtag
+1);
438 m_tag_delete(m
, mtag
);
443 __private_extern__
int
447 lck_grp_attr_t
*grp_attributes
= 0;
448 lck_attr_t
*lck_attributes
= 0;
449 lck_grp_t
*lck_grp
= 0;
451 grp_attributes
= lck_grp_attr_alloc_init();
452 if (grp_attributes
== 0) {
453 printf("ipf_init: lck_grp_attr_alloc_init failed\n");
458 lck_grp
= lck_grp_alloc_init("IP Filter", grp_attributes
);
460 printf("ipf_init: lck_grp_alloc_init failed\n");
465 lck_attributes
= lck_attr_alloc_init();
466 if (lck_attributes
== 0) {
467 printf("ipf_init: lck_attr_alloc_init failed\n");
472 kipf_lock
= lck_mtx_alloc_init(lck_grp
, lck_attributes
);
473 if (kipf_lock
== 0) {
474 printf("ipf_init: lck_mtx_alloc_init failed\n");
481 lck_mtx_free(kipf_lock
, lck_grp
);
486 lck_grp_free(lck_grp
);
489 if (grp_attributes
) {
490 lck_grp_attr_free(grp_attributes
);
493 if (lck_attributes
) {
494 lck_attr_free(lck_attributes
);