2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <kern/locks.h>
37 #include <net/kext_net.h>
41 static struct socket_filter_list sock_filter_head
;
42 static lck_mtx_t
*sock_filter_lock
= 0;
44 static void sflt_detach_private(struct socket_filter_entry
*entry
, int unregistering
);
46 __private_extern__
void
49 lck_grp_attr_t
*grp_attrib
= 0;
50 lck_attr_t
*lck_attrib
= 0;
51 lck_grp_t
*lck_group
= 0;
53 TAILQ_INIT(&sock_filter_head
);
55 /* Allocate a spin lock */
56 grp_attrib
= lck_grp_attr_alloc_init();
57 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
58 lck_grp_attr_free(grp_attrib
);
59 lck_attrib
= lck_attr_alloc_init();
60 sock_filter_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
61 lck_grp_free(lck_group
);
62 lck_attr_free(lck_attrib
);
65 __private_extern__
void
69 struct protosw
*proto
= so
->so_proto
;
70 struct socket_filter
*filter
;
72 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
73 lck_mtx_lock(sock_filter_lock
);
74 TAILQ_FOREACH(filter
, &proto
->pr_filter_head
, sf_protosw_next
) {
75 sflt_attach_private(so
, filter
, 0, 0);
77 lck_mtx_unlock(sock_filter_lock
);
81 __private_extern__
void
85 struct socket_filter_entry
*filter
;
86 struct socket_filter_entry
*filter_next
;
88 for (filter
= so
->so_filt
; filter
; filter
= filter_next
) {
89 filter_next
= filter
->sfe_next_onsocket
;
90 sflt_detach_private(filter
, 0);
95 __private_extern__
void
102 __private_extern__
void
107 if (so
->so_filteruse
== 0) {
108 struct socket_filter_entry
*filter
;
109 struct socket_filter_entry
*next_filter
;
110 // search for detaching filters
111 for (filter
= so
->so_filt
; filter
; filter
= next_filter
) {
112 next_filter
= filter
->sfe_next_onsocket
;
114 if (filter
->sfe_flags
& SFEF_DETACHUSEZERO
) {
115 sflt_detach_private(filter
, 0);
121 __private_extern__
void
127 struct socket_filter_entry
*filter
;
130 for (filter
= so
->so_filt
; filter
;
131 filter
= filter
->sfe_next_onsocket
) {
132 if (filter
->sfe_filter
->sf_filter
.sf_notify
) {
136 socket_unlock(so
, 0);
138 filter
->sfe_filter
->sf_filter
.sf_notify(
139 filter
->sfe_cookie
, so
, event
, param
);
149 __private_extern__
int
152 const struct sockaddr
*from
,
155 sflt_data_flag_t flags
,
158 struct socket_filter_entry
*filter
;
160 int filtered_storage
;
162 if (filtered
== NULL
)
163 filtered
= &filtered_storage
;
166 for (filter
= so
->so_filt
; filter
&& (error
== 0);
167 filter
= filter
->sfe_next_onsocket
) {
168 if (filter
->sfe_filter
->sf_filter
.sf_data_in
) {
169 if (*filtered
== 0) {
172 socket_unlock(so
, 0);
174 error
= filter
->sfe_filter
->sf_filter
.sf_data_in(
175 filter
->sfe_cookie
, so
, from
, data
, control
, flags
);
179 if (*filtered
!= 0) {
187 /* sflt_attach_private
189 * Assumptions: If filter is not NULL, socket_filter_lock is held.
192 __private_extern__
int
195 struct socket_filter
*filter
,
199 struct socket_filter_entry
*entry
= NULL
;
203 if (filter
== NULL
) {
204 /* Find the filter by the handle */
205 lck_mtx_lock(sock_filter_lock
);
208 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
209 if (filter
->sf_filter
.sf_handle
== handle
)
218 /* allocate the socket filter entry */
219 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
, M_WAITOK
);
226 /* Initialize the socket filter entry and call the attach function */
227 entry
->sfe_filter
= filter
;
228 entry
->sfe_socket
= so
;
229 entry
->sfe_cookie
= NULL
;
230 entry
->sfe_flags
= 0;
231 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
232 filter
->sf_usecount
++;
235 socket_unlock(so
, 0);
236 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
240 filter
->sf_usecount
--;
242 /* If the attach function returns an error, this filter is not attached */
244 FREE(entry
, M_IFADDR
);
251 /* Put the entry in the socket list */
252 entry
->sfe_next_onsocket
= so
->so_filt
;
255 /* Put the entry in the filter list */
256 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
257 filter
->sf_entry_head
= entry
;
259 /* Incremenet the parent filter's usecount */
260 filter
->sf_usecount
++;
264 lck_mtx_unlock(sock_filter_lock
);
271 /* sflt_detach_private
273 * Assumptions: if you pass 0 in for the second parameter, you are holding the
274 * socket lock for the socket the entry is attached to. If you pass 1 in for
275 * the second parameter, it is assumed that the entry is not on the filter's
276 * list and the socket lock is not held.
281 struct socket_filter_entry
*entry
,
284 struct socket_filter_entry
**next_ptr
;
289 socket_lock(entry
->sfe_socket
, 0);
293 * Attempt to find the entry on the filter's list and
294 * remove it. This prevents a filter detaching at the
295 * same time from attempting to remove the same entry.
297 lck_mtx_lock(sock_filter_lock
);
298 if (!unregistering
) {
299 if ((entry
->sfe_flags
& SFEF_UNREGISTERING
) != 0) {
301 * Another thread is unregistering the filter, we
302 * need to avoid detaching the filter here so the
303 * socket won't go away. Bump up the socket's
304 * usecount so that it won't be freed until after
305 * the filter unregistration has been completed;
306 * at this point the caller has already held the
307 * socket's lock, so we can directly modify the
310 if (!(entry
->sfe_flags
& SFEF_DETACHXREF
)) {
311 entry
->sfe_socket
->so_usecount
++;
312 entry
->sfe_flags
|= SFEF_DETACHXREF
;
314 lck_mtx_unlock(sock_filter_lock
);
317 for (next_ptr
= &entry
->sfe_filter
->sf_entry_head
; *next_ptr
;
318 next_ptr
= &((*next_ptr
)->sfe_next_onfilter
)) {
319 if (*next_ptr
== entry
) {
321 *next_ptr
= entry
->sfe_next_onfilter
;
326 if (!found
&& (entry
->sfe_flags
& SFEF_DETACHUSEZERO
) == 0) {
327 lck_mtx_unlock(sock_filter_lock
);
333 * Clear the removing flag. We will perform the detach here or
334 * request a delayed detach. Since we do an extra ref release
335 * below, bump up the usecount if we haven't done so.
337 entry
->sfe_flags
&= ~SFEF_UNREGISTERING
;
338 if (!(entry
->sfe_flags
& SFEF_DETACHXREF
)) {
339 entry
->sfe_socket
->so_usecount
++;
340 entry
->sfe_flags
|= SFEF_DETACHXREF
;
344 if (entry
->sfe_socket
->so_filteruse
!= 0) {
345 entry
->sfe_flags
|= SFEF_DETACHUSEZERO
;
346 lck_mtx_unlock(sock_filter_lock
);
351 * Check if we are removing the last attached filter and
352 * the parent filter is being unregistered.
354 entry
->sfe_filter
->sf_usecount
--;
355 if ((entry
->sfe_filter
->sf_usecount
== 0) &&
356 (entry
->sfe_filter
->sf_flags
& SFF_DETACHING
) != 0)
359 lck_mtx_unlock(sock_filter_lock
);
361 /* Remove from the socket list */
362 for (next_ptr
= &entry
->sfe_socket
->so_filt
; *next_ptr
;
363 next_ptr
= &((*next_ptr
)->sfe_next_onsocket
)) {
364 if (*next_ptr
== entry
) {
365 *next_ptr
= entry
->sfe_next_onsocket
;
370 if (entry
->sfe_filter
->sf_filter
.sf_detach
)
371 entry
->sfe_filter
->sf_filter
.sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
373 if (detached
&& entry
->sfe_filter
->sf_filter
.sf_unregistered
) {
374 entry
->sfe_filter
->sf_filter
.sf_unregistered(entry
->sfe_filter
->sf_filter
.sf_handle
);
375 FREE(entry
->sfe_filter
, M_IFADDR
);
379 socket_unlock(entry
->sfe_socket
, 1);
381 FREE(entry
, M_IFADDR
);
389 if (socket
== NULL
|| handle
== 0)
392 return sflt_attach_private(socket
, NULL
, handle
, 0);
400 struct socket_filter_entry
*filter
;
403 if (socket
== NULL
|| handle
== 0)
406 socket_lock(socket
, 1);
408 for (filter
= socket
->so_filt
; filter
;
409 filter
= filter
->sfe_next_onsocket
) {
410 if (filter
->sfe_filter
->sf_filter
.sf_handle
== handle
)
414 if (filter
!= NULL
) {
415 sflt_detach_private(filter
, 0);
418 socket
->so_filt
= NULL
;
422 socket_unlock(socket
, 1);
430 const struct sflt_filter
*filter
,
435 struct socket_filter
*sock_filt
= NULL
;
436 struct socket_filter
*match
= NULL
;
438 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
444 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
445 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
448 /* Allocate the socket filter */
449 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
451 if (sock_filt
== NULL
) {
455 bzero(sock_filt
, sizeof (*sock_filt
));
457 /* Legacy sflt_filter length; current structure minus extended */
458 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
460 * Include extended fields if filter defines SFLT_EXTENDED.
461 * We've zeroed out our internal sflt_filter placeholder,
462 * so any unused portion would have been taken care of.
464 if (filter
->sf_flags
& SFLT_EXTENDED
) {
465 unsigned int ext_len
= filter
->sf_len
;
467 if (ext_len
> sizeof (struct sflt_filter_ext
))
468 ext_len
= sizeof (struct sflt_filter_ext
);
472 bcopy(filter
, &sock_filt
->sf_filter
, len
);
474 lck_mtx_lock(sock_filter_lock
);
475 /* Look for an existing entry */
476 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
477 if (match
->sf_filter
.sf_handle
==
478 sock_filt
->sf_filter
.sf_handle
) {
483 /* Add the entry only if there was no existing entry */
485 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
486 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
487 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
489 sock_filt
->sf_proto
= pr
;
492 lck_mtx_unlock(sock_filter_lock
);
495 FREE(sock_filt
, M_IFADDR
);
506 struct socket_filter
*filter
;
507 struct socket_filter_entry
*entry_head
= NULL
;
508 struct socket_filter_entry
*next_entry
= NULL
;
510 /* Find the entry and remove it from the global and protosw lists */
511 lck_mtx_lock(sock_filter_lock
);
512 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
513 if (filter
->sf_filter
.sf_handle
== handle
)
518 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
519 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
520 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
522 entry_head
= filter
->sf_entry_head
;
523 filter
->sf_entry_head
= NULL
;
524 filter
->sf_flags
|= SFF_DETACHING
;
526 for (next_entry
= entry_head
; next_entry
;
527 next_entry
= next_entry
->sfe_next_onfilter
) {
529 * Mark this as "unregistering"; upon dropping the
530 * lock, another thread may win the race and attempt
531 * to detach a socket from it (e.g. as part of close)
532 * before we get a chance to detach. Setting this
533 * flag practically tells the other thread to go away.
534 * If the other thread wins, this causes an extra
535 * reference hold on the socket so that it won't be
536 * deallocated until after we finish with the detach
537 * for it below. If we win the race, the extra
538 * reference hold is also taken to compensate for the
539 * extra reference release when detach is called
540 * with a "1" for its second parameter.
542 next_entry
->sfe_flags
|= SFEF_UNREGISTERING
;
546 lck_mtx_unlock(sock_filter_lock
);
551 /* We need to detach the filter from any sockets it's attached to */
552 if (entry_head
== 0) {
553 if (filter
->sf_filter
.sf_unregistered
)
554 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
557 next_entry
= entry_head
->sfe_next_onfilter
;
558 sflt_detach_private(entry_head
, 1);
559 entry_head
= next_entry
;
569 const struct sockaddr
* from
,
572 sflt_data_flag_t flags
)
575 if (so
== NULL
|| data
== NULL
) return EINVAL
;
577 if (flags
& sock_data_filt_flag_oob
) {
584 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)from
, data
,
591 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
596 if (flags
& sock_data_filt_flag_record
) {
597 if (control
|| from
) {
601 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
606 if (sbappend(&so
->so_rcv
, data
))
609 socket_unlock(so
, 1);
614 sock_inject_data_out(
616 const struct sockaddr
* to
,
619 sflt_data_flag_t flags
)
622 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
623 return sosend(so
, (struct sockaddr
*)to
, NULL
,
624 data
, control
, sosendflags
);
631 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
638 return sopt
->sopt_level
;
645 return sopt
->sopt_name
;
652 return sopt
->sopt_valsize
;
661 return sooptcopyin(sopt
, data
, len
, len
);
670 return sooptcopyout(sopt
, data
, len
);