2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/kpi_socketfilter.h>
25 #include <sys/socket.h>
26 #include <sys/param.h>
27 #include <sys/errno.h>
28 #include <sys/malloc.h>
29 #include <sys/protosw.h>
30 #include <kern/locks.h>
31 #include <net/kext_net.h>
33 static struct socket_filter_list sock_filter_head
;
34 static lck_mtx_t
*sock_filter_lock
= 0;
36 static void sflt_detach_private(struct socket_filter_entry
*entry
, int unregistering
);
38 __private_extern__
void
41 lck_grp_attr_t
*grp_attrib
= 0;
42 lck_attr_t
*lck_attrib
= 0;
43 lck_grp_t
*lck_group
= 0;
45 TAILQ_INIT(&sock_filter_head
);
47 /* Allocate a spin lock */
48 grp_attrib
= lck_grp_attr_alloc_init();
49 lck_grp_attr_setdefault(grp_attrib
);
50 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
51 lck_grp_attr_free(grp_attrib
);
52 lck_attrib
= lck_attr_alloc_init();
53 lck_attr_setdefault(lck_attrib
);
54 lck_attr_setdebug(lck_attrib
);
55 sock_filter_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
56 lck_grp_free(lck_group
);
57 lck_attr_free(lck_attrib
);
60 __private_extern__
void
64 struct protosw
*proto
= so
->so_proto
;
65 struct socket_filter
*filter
;
67 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
68 lck_mtx_lock(sock_filter_lock
);
69 TAILQ_FOREACH(filter
, &proto
->pr_filter_head
, sf_protosw_next
) {
70 sflt_attach_private(so
, filter
, 0, 0);
72 lck_mtx_unlock(sock_filter_lock
);
76 __private_extern__
void
80 struct socket_filter_entry
*filter
;
81 struct socket_filter_entry
*filter_next
;
83 for (filter
= so
->so_filt
; filter
; filter
= filter_next
) {
84 filter_next
= filter
->sfe_next_onsocket
;
85 sflt_detach_private(filter
, 0);
90 __private_extern__
void
97 __private_extern__
void
102 if (so
->so_filteruse
== 0) {
103 struct socket_filter_entry
*filter
;
104 struct socket_filter_entry
*next_filter
;
105 // search for detaching filters
106 for (filter
= so
->so_filt
; filter
; filter
= next_filter
) {
107 next_filter
= filter
->sfe_next_onsocket
;
109 if (filter
->sfe_flags
& SFEF_DETACHUSEZERO
) {
110 sflt_detach_private(filter
, 0);
116 __private_extern__
void
122 struct socket_filter_entry
*filter
;
125 for (filter
= so
->so_filt
; filter
;
126 filter
= filter
->sfe_next_onsocket
) {
127 if (filter
->sfe_filter
->sf_filter
.sf_notify
) {
131 socket_unlock(so
, 0);
133 filter
->sfe_filter
->sf_filter
.sf_notify(
134 filter
->sfe_cookie
, so
, event
, param
);
144 __private_extern__
int
147 const struct sockaddr
*from
,
150 sflt_data_flag_t flags
,
153 struct socket_filter_entry
*filter
;
155 int filtered_storage
;
157 if (filtered
== NULL
)
158 filtered
= &filtered_storage
;
161 for (filter
= so
->so_filt
; filter
&& (error
== 0);
162 filter
= filter
->sfe_next_onsocket
) {
163 if (filter
->sfe_filter
->sf_filter
.sf_data_in
) {
164 if (*filtered
== 0) {
167 socket_unlock(so
, 0);
169 error
= filter
->sfe_filter
->sf_filter
.sf_data_in(
170 filter
->sfe_cookie
, so
, from
, data
, control
, flags
);
174 if (*filtered
!= 0) {
182 /* sflt_attach_private
184 * Assumptions: If filter is not NULL, socket_filter_lock is held.
187 __private_extern__
int
190 struct socket_filter
*filter
,
194 struct socket_filter_entry
*entry
= NULL
;
198 if (filter
== NULL
) {
199 /* Find the filter by the handle */
200 lck_mtx_lock(sock_filter_lock
);
203 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
204 if (filter
->sf_filter
.sf_handle
== handle
)
213 /* allocate the socket filter entry */
214 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
, M_WAITOK
);
221 /* Initialize the socket filter entry and call the attach function */
222 entry
->sfe_filter
= filter
;
223 entry
->sfe_socket
= so
;
224 entry
->sfe_cookie
= NULL
;
225 entry
->sfe_flags
= 0;
226 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
227 filter
->sf_usecount
++;
230 socket_unlock(so
, 0);
231 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
235 filter
->sf_usecount
--;
237 /* If the attach function returns an error, this filter is not attached */
239 FREE(entry
, M_IFADDR
);
246 /* Put the entry in the socket list */
247 entry
->sfe_next_onsocket
= so
->so_filt
;
250 /* Put the entry in the filter list */
251 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
252 filter
->sf_entry_head
= entry
;
254 /* Incremenet the parent filter's usecount */
255 filter
->sf_usecount
++;
259 lck_mtx_unlock(sock_filter_lock
);
266 /* sflt_detach_private
268 * Assumptions: if you pass 0 in for the second parameter, you are holding the
269 * socket lock for the socket the entry is attached to. If you pass 1 in for
270 * the second parameter, it is assumed that the entry is not on the filter's
271 * list and the socket lock is not held.
276 struct socket_filter_entry
*entry
,
279 struct socket
*so
= entry
->sfe_socket
;
280 struct socket_filter_entry
**next_ptr
;
285 socket_lock(entry
->sfe_socket
, 0);
289 * Attempt to find the entry on the filter's list and
290 * remove it. This prevents a filter detaching at the
291 * same time from attempting to remove the same entry.
293 lck_mtx_lock(sock_filter_lock
);
294 if (!unregistering
) {
295 if ((entry
->sfe_flags
& SFEF_UNREGISTERING
) != 0) {
297 * Another thread is unregistering the filter, we need to
298 * avoid detaching the filter here so the socket won't go
301 lck_mtx_unlock(sock_filter_lock
);
304 for (next_ptr
= &entry
->sfe_filter
->sf_entry_head
; *next_ptr
;
305 next_ptr
= &((*next_ptr
)->sfe_next_onfilter
)) {
306 if (*next_ptr
== entry
) {
308 *next_ptr
= entry
->sfe_next_onfilter
;
313 if (!found
&& (entry
->sfe_flags
& SFEF_DETACHUSEZERO
) == 0) {
314 lck_mtx_unlock(sock_filter_lock
);
320 * Clear the removing flag. We will perform the detach here or
321 * request a delayed deatch.
323 entry
->sfe_flags
&= ~SFEF_UNREGISTERING
;
326 if (entry
->sfe_socket
->so_filteruse
!= 0) {
327 entry
->sfe_flags
|= SFEF_DETACHUSEZERO
;
328 lck_mtx_unlock(sock_filter_lock
);
333 * Check if we are removing the last attached filter and
334 * the parent filter is being unregistered.
336 entry
->sfe_filter
->sf_usecount
--;
337 if ((entry
->sfe_filter
->sf_usecount
== 0) &&
338 (entry
->sfe_filter
->sf_flags
& SFF_DETACHING
) != 0)
341 lck_mtx_unlock(sock_filter_lock
);
343 /* Remove from the socket list */
344 for (next_ptr
= &entry
->sfe_socket
->so_filt
; *next_ptr
;
345 next_ptr
= &((*next_ptr
)->sfe_next_onsocket
)) {
346 if (*next_ptr
== entry
) {
347 *next_ptr
= entry
->sfe_next_onsocket
;
352 if (entry
->sfe_filter
->sf_filter
.sf_detach
)
353 entry
->sfe_filter
->sf_filter
.sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
355 if (detached
&& entry
->sfe_filter
->sf_filter
.sf_unregistered
) {
356 entry
->sfe_filter
->sf_filter
.sf_unregistered(entry
->sfe_filter
->sf_filter
.sf_handle
);
357 FREE(entry
->sfe_filter
, M_IFADDR
);
361 socket_unlock(entry
->sfe_socket
, 1);
363 FREE(entry
, M_IFADDR
);
371 if (socket
== NULL
|| handle
== 0)
374 return sflt_attach_private(socket
, NULL
, handle
, 0);
382 struct socket_filter_entry
*filter
;
385 if (socket
== NULL
|| handle
== 0)
388 socket_lock(socket
, 1);
390 for (filter
= socket
->so_filt
; filter
;
391 filter
= filter
->sfe_next_onsocket
) {
392 if (filter
->sfe_filter
->sf_filter
.sf_handle
== handle
)
396 if (filter
!= NULL
) {
397 sflt_detach_private(filter
, 0);
400 socket
->so_filt
= NULL
;
404 socket_unlock(socket
, 1);
412 const struct sflt_filter
*filter
,
417 struct socket_filter
*sock_filt
= NULL
;
418 struct socket_filter
*match
= NULL
;
420 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
422 if (pr
== NULL
) return ENOENT
;
424 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
) return EINVAL
;
425 if (filter
->sf_handle
== 0) return EINVAL
;
426 if (filter
->sf_name
== NULL
) return EINVAL
;
428 /* Allocate the socket filter */
429 MALLOC(sock_filt
, struct socket_filter
*, sizeof(*sock_filt
), M_IFADDR
, M_WAITOK
);
430 if (sock_filt
== NULL
) {
434 bzero(sock_filt
, sizeof(*sock_filt
));
435 sock_filt
->sf_filter
= *filter
;
437 lck_mtx_lock(sock_filter_lock
);
438 /* Look for an existing entry */
439 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
440 if (match
->sf_filter
.sf_handle
== sock_filt
->sf_filter
.sf_handle
) {
445 /* Add the entry only if there was no existing entry */
447 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
448 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
449 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
, sf_protosw_next
);
450 sock_filt
->sf_proto
= pr
;
453 lck_mtx_unlock(sock_filter_lock
);
456 FREE(sock_filt
, M_IFADDR
);
467 struct socket_filter
*filter
;
468 struct socket_filter_entry
*entry_head
= NULL
;
469 struct socket_filter_entry
*next_entry
= NULL
;
471 /* Find the entry and remove it from the global and protosw lists */
472 lck_mtx_lock(sock_filter_lock
);
473 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
474 if (filter
->sf_filter
.sf_handle
== handle
)
479 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
480 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
481 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
483 entry_head
= filter
->sf_entry_head
;
484 filter
->sf_entry_head
= NULL
;
485 filter
->sf_flags
|= SFF_DETACHING
;
487 for (next_entry
= entry_head
; next_entry
;
488 next_entry
= next_entry
->sfe_next_onfilter
) {
489 socket_lock(next_entry
->sfe_socket
, 1);
490 next_entry
->sfe_flags
|= SFEF_UNREGISTERING
;
491 socket_unlock(next_entry
->sfe_socket
, 0); /* Radar 4201550: prevents the socket from being deleted while being unregistered */
495 lck_mtx_unlock(sock_filter_lock
);
500 /* We need to detach the filter from any sockets it's attached to */
501 if (entry_head
== 0) {
502 if (filter
->sf_filter
.sf_unregistered
)
503 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
506 next_entry
= entry_head
->sfe_next_onfilter
;
507 sflt_detach_private(entry_head
, 1);
508 entry_head
= next_entry
;
518 const struct sockaddr
* from
,
521 sflt_data_flag_t flags
)
524 if (so
== NULL
|| data
== NULL
) return EINVAL
;
526 if (flags
& sock_data_filt_flag_oob
) {
533 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)from
, data
,
540 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
545 if (flags
& sock_data_filt_flag_record
) {
546 if (control
|| from
) {
550 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
555 if (sbappend(&so
->so_rcv
, data
))
558 socket_unlock(so
, 1);
563 sock_inject_data_out(
565 const struct sockaddr
* to
,
568 sflt_data_flag_t flags
)
571 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
572 return sosend(so
, (const struct sockaddr
*)to
, NULL
,
573 data
, control
, sosendflags
);
580 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
587 return sopt
->sopt_level
;
594 return sopt
->sopt_name
;
601 return sopt
->sopt_valsize
;
610 return sooptcopyin(sopt
, data
, len
, len
);
619 return sooptcopyout(sopt
, data
, len
);