2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <kern/locks.h>
37 #include <net/kext_net.h>
39 #include <libkern/libkern.h>
43 static struct socket_filter_list sock_filter_head
;
44 static lck_mtx_t
*sock_filter_lock
= 0;
46 static void sflt_detach_private(struct socket_filter_entry
*entry
, int unregistering
);
48 __private_extern__
void
51 lck_grp_attr_t
*grp_attrib
= 0;
52 lck_attr_t
*lck_attrib
= 0;
53 lck_grp_t
*lck_group
= 0;
55 TAILQ_INIT(&sock_filter_head
);
57 /* Allocate a spin lock */
58 grp_attrib
= lck_grp_attr_alloc_init();
59 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
60 lck_grp_attr_free(grp_attrib
);
61 lck_attrib
= lck_attr_alloc_init();
62 sock_filter_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
63 lck_grp_free(lck_group
);
64 lck_attr_free(lck_attrib
);
67 __private_extern__
void
71 struct protosw
*proto
= so
->so_proto
;
72 struct socket_filter
*filter
;
74 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
75 lck_mtx_lock(sock_filter_lock
);
76 TAILQ_FOREACH(filter
, &proto
->pr_filter_head
, sf_protosw_next
) {
77 sflt_attach_private(so
, filter
, 0, 0);
79 lck_mtx_unlock(sock_filter_lock
);
83 __private_extern__
void
87 struct socket_filter_entry
*filter
;
88 struct socket_filter_entry
*filter_next
;
90 for (filter
= so
->so_filt
; filter
; filter
= filter_next
) {
91 filter_next
= filter
->sfe_next_onsocket
;
92 sflt_detach_private(filter
, 0);
97 __private_extern__
void
104 __private_extern__
void
109 if (so
->so_filteruse
== 0) {
110 struct socket_filter_entry
*filter
;
111 struct socket_filter_entry
*next_filter
;
112 // search for detaching filters
113 for (filter
= so
->so_filt
; filter
; filter
= next_filter
) {
114 next_filter
= filter
->sfe_next_onsocket
;
116 if (filter
->sfe_flags
& SFEF_DETACHUSEZERO
) {
117 sflt_detach_private(filter
, 0);
123 __private_extern__
void
129 struct socket_filter_entry
*filter
;
132 for (filter
= so
->so_filt
; filter
;
133 filter
= filter
->sfe_next_onsocket
) {
134 if (filter
->sfe_filter
->sf_filter
.sf_notify
) {
138 socket_unlock(so
, 0);
140 filter
->sfe_filter
->sf_filter
.sf_notify(
141 filter
->sfe_cookie
, so
, event
, param
);
151 __private_extern__
int
154 const struct sockaddr
*from
,
157 sflt_data_flag_t flags
,
160 struct socket_filter_entry
*filter
;
162 int filtered_storage
;
164 if (filtered
== NULL
)
165 filtered
= &filtered_storage
;
168 for (filter
= so
->so_filt
; filter
&& (error
== 0);
169 filter
= filter
->sfe_next_onsocket
) {
170 if (filter
->sfe_filter
->sf_filter
.sf_data_in
) {
171 if (*filtered
== 0) {
174 socket_unlock(so
, 0);
176 error
= filter
->sfe_filter
->sf_filter
.sf_data_in(
177 filter
->sfe_cookie
, so
, from
, data
, control
, flags
);
181 if (*filtered
!= 0) {
189 /* sflt_attach_private
191 * Assumptions: If filter is not NULL, socket_filter_lock is held.
194 __private_extern__
int
197 struct socket_filter
*filter
,
201 struct socket_filter_entry
*entry
= NULL
;
205 if (filter
== NULL
) {
206 /* Find the filter by the handle */
207 lck_mtx_lock(sock_filter_lock
);
210 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
211 if (filter
->sf_filter
.sf_handle
== handle
)
220 /* allocate the socket filter entry */
221 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
, M_WAITOK
);
228 /* Initialize the socket filter entry and call the attach function */
229 entry
->sfe_filter
= filter
;
230 entry
->sfe_socket
= so
;
231 entry
->sfe_cookie
= NULL
;
232 entry
->sfe_flags
= 0;
233 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
234 filter
->sf_usecount
++;
237 socket_unlock(so
, 0);
238 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
242 filter
->sf_usecount
--;
244 /* If the attach function returns an error, this filter is not attached */
246 FREE(entry
, M_IFADDR
);
253 /* Put the entry in the socket list */
254 entry
->sfe_next_onsocket
= so
->so_filt
;
257 /* Put the entry in the filter list */
258 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
259 filter
->sf_entry_head
= entry
;
261 /* Incremenet the parent filter's usecount */
262 filter
->sf_usecount
++;
266 lck_mtx_unlock(sock_filter_lock
);
273 /* sflt_detach_private
275 * Assumptions: if you pass 0 in for the second parameter, you are holding the
276 * socket lock for the socket the entry is attached to. If you pass 1 in for
277 * the second parameter, it is assumed that the entry is not on the filter's
278 * list and the socket lock is not held.
283 struct socket_filter_entry
*entry
,
286 struct socket_filter_entry
**next_ptr
;
291 socket_lock(entry
->sfe_socket
, 0);
295 * Attempt to find the entry on the filter's list and
296 * remove it. This prevents a filter detaching at the
297 * same time from attempting to remove the same entry.
299 lck_mtx_lock(sock_filter_lock
);
300 if (!unregistering
) {
301 if ((entry
->sfe_flags
& SFEF_UNREGISTERING
) != 0) {
303 * Another thread is unregistering the filter, we
304 * need to avoid detaching the filter here so the
305 * socket won't go away. Bump up the socket's
306 * usecount so that it won't be freed until after
307 * the filter unregistration has been completed;
308 * at this point the caller has already held the
309 * socket's lock, so we can directly modify the
312 if (!(entry
->sfe_flags
& SFEF_DETACHXREF
)) {
313 entry
->sfe_socket
->so_usecount
++;
314 entry
->sfe_flags
|= SFEF_DETACHXREF
;
316 lck_mtx_unlock(sock_filter_lock
);
319 for (next_ptr
= &entry
->sfe_filter
->sf_entry_head
; *next_ptr
;
320 next_ptr
= &((*next_ptr
)->sfe_next_onfilter
)) {
321 if (*next_ptr
== entry
) {
323 *next_ptr
= entry
->sfe_next_onfilter
;
328 if (!found
&& (entry
->sfe_flags
& SFEF_DETACHUSEZERO
) == 0) {
329 lck_mtx_unlock(sock_filter_lock
);
334 * Clear the removing flag. We will perform the detach here or
335 * request a delayed detach. Since we do an extra ref release
336 * below, bump up the usecount if we haven't done so.
338 entry
->sfe_flags
&= ~SFEF_UNREGISTERING
;
339 if (!(entry
->sfe_flags
& SFEF_DETACHXREF
)) {
340 entry
->sfe_socket
->so_usecount
++;
341 entry
->sfe_flags
|= SFEF_DETACHXREF
;
345 if (entry
->sfe_socket
->so_filteruse
!= 0) {
346 entry
->sfe_flags
|= SFEF_DETACHUSEZERO
;
347 lck_mtx_unlock(sock_filter_lock
);
351 printf("sflt_detach_private unregistering SFEF_DETACHUSEZERO "
352 "so%p so_filteruse %u so_usecount %d\n",
353 entry
->sfe_socket
, entry
->sfe_socket
->so_filteruse
,
354 entry
->sfe_socket
->so_usecount
);
356 socket_unlock(entry
->sfe_socket
, 0);
361 * Check if we are removing the last attached filter and
362 * the parent filter is being unregistered.
364 entry
->sfe_filter
->sf_usecount
--;
365 if ((entry
->sfe_filter
->sf_usecount
== 0) &&
366 (entry
->sfe_filter
->sf_flags
& SFF_DETACHING
) != 0)
369 lck_mtx_unlock(sock_filter_lock
);
371 /* Remove from the socket list */
372 for (next_ptr
= &entry
->sfe_socket
->so_filt
; *next_ptr
;
373 next_ptr
= &((*next_ptr
)->sfe_next_onsocket
)) {
374 if (*next_ptr
== entry
) {
375 *next_ptr
= entry
->sfe_next_onsocket
;
380 if (entry
->sfe_filter
->sf_filter
.sf_detach
)
381 entry
->sfe_filter
->sf_filter
.sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
383 if (detached
&& entry
->sfe_filter
->sf_filter
.sf_unregistered
) {
384 entry
->sfe_filter
->sf_filter
.sf_unregistered(entry
->sfe_filter
->sf_filter
.sf_handle
);
385 FREE(entry
->sfe_filter
, M_IFADDR
);
389 socket_unlock(entry
->sfe_socket
, 1);
391 FREE(entry
, M_IFADDR
);
399 if (socket
== NULL
|| handle
== 0)
402 return sflt_attach_private(socket
, NULL
, handle
, 0);
410 struct socket_filter_entry
*filter
;
413 if (socket
== NULL
|| handle
== 0)
416 socket_lock(socket
, 1);
418 for (filter
= socket
->so_filt
; filter
;
419 filter
= filter
->sfe_next_onsocket
) {
420 if (filter
->sfe_filter
->sf_filter
.sf_handle
== handle
)
424 if (filter
!= NULL
) {
425 sflt_detach_private(filter
, 0);
428 socket
->so_filt
= NULL
;
432 socket_unlock(socket
, 1);
440 const struct sflt_filter
*filter
,
445 struct socket_filter
*sock_filt
= NULL
;
446 struct socket_filter
*match
= NULL
;
448 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
454 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
455 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
458 /* Allocate the socket filter */
459 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
461 if (sock_filt
== NULL
) {
465 bzero(sock_filt
, sizeof (*sock_filt
));
467 /* Legacy sflt_filter length; current structure minus extended */
468 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
470 * Include extended fields if filter defines SFLT_EXTENDED.
471 * We've zeroed out our internal sflt_filter placeholder,
472 * so any unused portion would have been taken care of.
474 if (filter
->sf_flags
& SFLT_EXTENDED
) {
475 unsigned int ext_len
= filter
->sf_len
;
477 if (ext_len
> sizeof (struct sflt_filter_ext
))
478 ext_len
= sizeof (struct sflt_filter_ext
);
482 bcopy(filter
, &sock_filt
->sf_filter
, len
);
484 lck_mtx_lock(sock_filter_lock
);
485 /* Look for an existing entry */
486 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
487 if (match
->sf_filter
.sf_handle
==
488 sock_filt
->sf_filter
.sf_handle
) {
493 /* Add the entry only if there was no existing entry */
495 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
496 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
497 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
499 sock_filt
->sf_proto
= pr
;
502 lck_mtx_unlock(sock_filter_lock
);
505 FREE(sock_filt
, M_IFADDR
);
516 struct socket_filter
*filter
;
517 struct socket_filter_entry
*entry_head
= NULL
;
518 struct socket_filter_entry
*next_entry
= NULL
;
520 /* Find the entry and remove it from the global and protosw lists */
521 lck_mtx_lock(sock_filter_lock
);
522 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
523 if (filter
->sf_filter
.sf_handle
== handle
)
528 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
529 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
530 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
532 entry_head
= filter
->sf_entry_head
;
533 filter
->sf_entry_head
= NULL
;
534 filter
->sf_flags
|= SFF_DETACHING
;
536 for (next_entry
= entry_head
; next_entry
;
537 next_entry
= next_entry
->sfe_next_onfilter
) {
539 * Mark this as "unregistering"; upon dropping the
540 * lock, another thread may win the race and attempt
541 * to detach a socket from it (e.g. as part of close)
542 * before we get a chance to detach. Setting this
543 * flag practically tells the other thread to go away.
544 * If the other thread wins, this causes an extra
545 * reference hold on the socket so that it won't be
546 * deallocated until after we finish with the detach
547 * for it below. If we win the race, the extra
548 * reference hold is also taken to compensate for the
549 * extra reference release when detach is called
550 * with a "1" for its second parameter.
552 next_entry
->sfe_flags
|= SFEF_UNREGISTERING
;
556 lck_mtx_unlock(sock_filter_lock
);
561 /* We need to detach the filter from any sockets it's attached to */
562 if (entry_head
== 0) {
563 if (filter
->sf_filter
.sf_unregistered
)
564 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
567 next_entry
= entry_head
->sfe_next_onfilter
;
568 sflt_detach_private(entry_head
, 1);
569 entry_head
= next_entry
;
579 const struct sockaddr
* from
,
582 sflt_data_flag_t flags
)
585 if (so
== NULL
|| data
== NULL
) return EINVAL
;
587 if (flags
& sock_data_filt_flag_oob
) {
594 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)(uintptr_t)from
, data
,
601 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
606 if (flags
& sock_data_filt_flag_record
) {
607 if (control
|| from
) {
611 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
616 if (sbappend(&so
->so_rcv
, data
))
619 socket_unlock(so
, 1);
624 sock_inject_data_out(
626 const struct sockaddr
* to
,
629 sflt_data_flag_t flags
)
632 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
633 return sosend(so
, (struct sockaddr
*)(uintptr_t)to
, NULL
,
634 data
, control
, sosendflags
);
641 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
648 return sopt
->sopt_level
;
655 return sopt
->sopt_name
;
662 return sopt
->sopt_valsize
;
671 return sooptcopyin(sopt
, data
, len
, len
);
680 return sooptcopyout(sopt
, data
, len
);