2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <kern/locks.h>
37 #include <net/kext_net.h>
39 #include <libkern/libkern.h>
43 static struct socket_filter_list sock_filter_head
;
44 static lck_mtx_t
*sock_filter_lock
= 0;
46 static void sflt_detach_private(struct socket_filter_entry
*entry
, int unregistering
);
48 __private_extern__
void
51 lck_grp_attr_t
*grp_attrib
= 0;
52 lck_attr_t
*lck_attrib
= 0;
53 lck_grp_t
*lck_group
= 0;
55 TAILQ_INIT(&sock_filter_head
);
57 /* Allocate a spin lock */
58 grp_attrib
= lck_grp_attr_alloc_init();
59 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
60 lck_grp_attr_free(grp_attrib
);
61 lck_attrib
= lck_attr_alloc_init();
62 sock_filter_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
63 lck_grp_free(lck_group
);
64 lck_attr_free(lck_attrib
);
67 __private_extern__
void
71 struct protosw
*proto
= so
->so_proto
;
72 struct socket_filter
*filter
;
74 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
75 lck_mtx_lock(sock_filter_lock
);
76 TAILQ_FOREACH(filter
, &proto
->pr_filter_head
, sf_protosw_next
) {
77 sflt_attach_private(so
, filter
, 0, 0);
79 lck_mtx_unlock(sock_filter_lock
);
83 __private_extern__
void
87 struct socket_filter_entry
*filter
;
88 struct socket_filter_entry
*filter_next
;
90 for (filter
= so
->so_filt
; filter
; filter
= filter_next
) {
91 filter_next
= filter
->sfe_next_onsocket
;
92 sflt_detach_private(filter
, 0);
97 __private_extern__
void
104 __private_extern__
void
109 if (so
->so_filteruse
== 0) {
110 struct socket_filter_entry
*filter
;
111 struct socket_filter_entry
*next_filter
;
112 // search for detaching filters
113 for (filter
= so
->so_filt
; filter
; filter
= next_filter
) {
114 next_filter
= filter
->sfe_next_onsocket
;
116 if (filter
->sfe_flags
& SFEF_DETACHUSEZERO
) {
117 sflt_detach_private(filter
, 0);
123 __private_extern__
void
129 struct socket_filter_entry
*filter
;
132 for (filter
= so
->so_filt
; filter
;
133 filter
= filter
->sfe_next_onsocket
) {
134 if (filter
->sfe_filter
->sf_filter
.sf_notify
) {
138 socket_unlock(so
, 0);
140 filter
->sfe_filter
->sf_filter
.sf_notify(
141 filter
->sfe_cookie
, so
, event
, param
);
151 __private_extern__
int
154 const struct sockaddr
*from
,
157 sflt_data_flag_t flags
,
160 struct socket_filter_entry
*filter
;
162 int filtered_storage
;
164 if (filtered
== NULL
)
165 filtered
= &filtered_storage
;
168 for (filter
= so
->so_filt
; filter
&& (error
== 0);
169 filter
= filter
->sfe_next_onsocket
) {
170 if (filter
->sfe_filter
->sf_filter
.sf_data_in
) {
171 if (*filtered
== 0) {
174 socket_unlock(so
, 0);
176 error
= filter
->sfe_filter
->sf_filter
.sf_data_in(
177 filter
->sfe_cookie
, so
, from
, data
, control
, flags
);
181 if (*filtered
!= 0) {
189 /* sflt_attach_private
191 * Assumptions: If filter is not NULL, socket_filter_lock is held.
194 __private_extern__
int
197 struct socket_filter
*filter
,
201 struct socket_filter_entry
*entry
= NULL
;
205 if (filter
== NULL
) {
206 /* Find the filter by the handle */
207 lck_mtx_lock(sock_filter_lock
);
210 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
211 if (filter
->sf_filter
.sf_handle
== handle
)
220 /* allocate the socket filter entry */
221 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
, M_WAITOK
);
228 /* Initialize the socket filter entry and call the attach function */
229 entry
->sfe_filter
= filter
;
230 entry
->sfe_socket
= so
;
231 entry
->sfe_cookie
= NULL
;
232 entry
->sfe_flags
= 0;
233 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
234 filter
->sf_usecount
++;
237 socket_unlock(so
, 0);
238 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
242 filter
->sf_usecount
--;
244 /* If the attach function returns an error, this filter is not attached */
246 FREE(entry
, M_IFADDR
);
253 /* Put the entry in the socket list */
254 entry
->sfe_next_onsocket
= so
->so_filt
;
257 /* Put the entry in the filter list */
258 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
259 filter
->sf_entry_head
= entry
;
261 /* Incremenet the parent filter's usecount */
262 filter
->sf_usecount
++;
266 lck_mtx_unlock(sock_filter_lock
);
273 /* sflt_detach_private
275 * Assumptions: if you pass 0 in for the second parameter, you are holding the
276 * socket lock for the socket the entry is attached to. If you pass 1 in for
277 * the second parameter, it is assumed that the entry is not on the filter's
278 * list and the socket lock is not held.
283 struct socket_filter_entry
*entry
,
286 struct socket_filter_entry
**next_ptr
;
291 socket_lock(entry
->sfe_socket
, 0);
295 * Attempt to find the entry on the filter's list and
296 * remove it. This prevents a filter detaching at the
297 * same time from attempting to remove the same entry.
299 lck_mtx_lock(sock_filter_lock
);
300 if (!unregistering
) {
301 if ((entry
->sfe_flags
& SFEF_UNREGISTERING
) != 0) {
303 * Another thread is unregistering the filter, we
304 * need to avoid detaching the filter here so the
305 * socket won't go away. Bump up the socket's
306 * usecount so that it won't be freed until after
307 * the filter unregistration has been completed;
308 * at this point the caller has already held the
309 * socket's lock, so we can directly modify the
312 if (!(entry
->sfe_flags
& SFEF_DETACHXREF
)) {
313 entry
->sfe_socket
->so_usecount
++;
314 entry
->sfe_flags
|= SFEF_DETACHXREF
;
316 lck_mtx_unlock(sock_filter_lock
);
319 for (next_ptr
= &entry
->sfe_filter
->sf_entry_head
; *next_ptr
;
320 next_ptr
= &((*next_ptr
)->sfe_next_onfilter
)) {
321 if (*next_ptr
== entry
) {
323 *next_ptr
= entry
->sfe_next_onfilter
;
328 if (!found
&& (entry
->sfe_flags
& SFEF_DETACHUSEZERO
) == 0) {
329 lck_mtx_unlock(sock_filter_lock
);
334 * Clear the removing flag. We will perform the detach here or
335 * request a delayed detach. Since we do an extra ref release
336 * below, bump up the usecount if we haven't done so.
338 entry
->sfe_flags
&= ~SFEF_UNREGISTERING
;
339 if (!(entry
->sfe_flags
& SFEF_DETACHXREF
)) {
340 entry
->sfe_socket
->so_usecount
++;
341 entry
->sfe_flags
|= SFEF_DETACHXREF
;
345 if (entry
->sfe_socket
->so_filteruse
!= 0) {
346 entry
->sfe_flags
|= SFEF_DETACHUSEZERO
;
347 lck_mtx_unlock(sock_filter_lock
);
351 printf("sflt_detach_private unregistering SFEF_DETACHUSEZERO "
352 "so%p so_filteruse %u so_usecount %d\n",
353 entry
->sfe_socket
, entry
->sfe_socket
->so_filteruse
,
354 entry
->sfe_socket
->so_usecount
);
356 socket_unlock(entry
->sfe_socket
, 0);
362 * Check if we are removing the last attached filter and
363 * the parent filter is being unregistered.
365 entry
->sfe_filter
->sf_usecount
--;
366 if ((entry
->sfe_filter
->sf_usecount
== 0) &&
367 (entry
->sfe_filter
->sf_flags
& SFF_DETACHING
) != 0)
370 lck_mtx_unlock(sock_filter_lock
);
372 /* Remove from the socket list */
373 for (next_ptr
= &entry
->sfe_socket
->so_filt
; *next_ptr
;
374 next_ptr
= &((*next_ptr
)->sfe_next_onsocket
)) {
375 if (*next_ptr
== entry
) {
376 *next_ptr
= entry
->sfe_next_onsocket
;
381 if (entry
->sfe_filter
->sf_filter
.sf_detach
)
382 entry
->sfe_filter
->sf_filter
.sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
384 if (detached
&& entry
->sfe_filter
->sf_filter
.sf_unregistered
) {
385 entry
->sfe_filter
->sf_filter
.sf_unregistered(entry
->sfe_filter
->sf_filter
.sf_handle
);
386 FREE(entry
->sfe_filter
, M_IFADDR
);
390 socket_unlock(entry
->sfe_socket
, 1);
392 FREE(entry
, M_IFADDR
);
400 if (socket
== NULL
|| handle
== 0)
403 return sflt_attach_private(socket
, NULL
, handle
, 0);
411 struct socket_filter_entry
*filter
;
414 if (socket
== NULL
|| handle
== 0)
417 socket_lock(socket
, 1);
419 for (filter
= socket
->so_filt
; filter
;
420 filter
= filter
->sfe_next_onsocket
) {
421 if (filter
->sfe_filter
->sf_filter
.sf_handle
== handle
)
425 if (filter
!= NULL
) {
426 sflt_detach_private(filter
, 0);
429 socket
->so_filt
= NULL
;
433 socket_unlock(socket
, 1);
441 const struct sflt_filter
*filter
,
446 struct socket_filter
*sock_filt
= NULL
;
447 struct socket_filter
*match
= NULL
;
449 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
455 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
456 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
459 /* Allocate the socket filter */
460 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
462 if (sock_filt
== NULL
) {
466 bzero(sock_filt
, sizeof (*sock_filt
));
468 /* Legacy sflt_filter length; current structure minus extended */
469 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
471 * Include extended fields if filter defines SFLT_EXTENDED.
472 * We've zeroed out our internal sflt_filter placeholder,
473 * so any unused portion would have been taken care of.
475 if (filter
->sf_flags
& SFLT_EXTENDED
) {
476 unsigned int ext_len
= filter
->sf_len
;
478 if (ext_len
> sizeof (struct sflt_filter_ext
))
479 ext_len
= sizeof (struct sflt_filter_ext
);
483 bcopy(filter
, &sock_filt
->sf_filter
, len
);
485 lck_mtx_lock(sock_filter_lock
);
486 /* Look for an existing entry */
487 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
488 if (match
->sf_filter
.sf_handle
==
489 sock_filt
->sf_filter
.sf_handle
) {
494 /* Add the entry only if there was no existing entry */
496 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
497 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
498 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
500 sock_filt
->sf_proto
= pr
;
503 lck_mtx_unlock(sock_filter_lock
);
506 FREE(sock_filt
, M_IFADDR
);
517 struct socket_filter
*filter
;
518 struct socket_filter_entry
*entry_head
= NULL
;
519 struct socket_filter_entry
*next_entry
= NULL
;
521 /* Find the entry and remove it from the global and protosw lists */
522 lck_mtx_lock(sock_filter_lock
);
523 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
524 if (filter
->sf_filter
.sf_handle
== handle
)
529 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
530 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
531 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
533 entry_head
= filter
->sf_entry_head
;
534 filter
->sf_entry_head
= NULL
;
535 filter
->sf_flags
|= SFF_DETACHING
;
537 for (next_entry
= entry_head
; next_entry
;
538 next_entry
= next_entry
->sfe_next_onfilter
) {
540 * Mark this as "unregistering"; upon dropping the
541 * lock, another thread may win the race and attempt
542 * to detach a socket from it (e.g. as part of close)
543 * before we get a chance to detach. Setting this
544 * flag practically tells the other thread to go away.
545 * If the other thread wins, this causes an extra
546 * reference hold on the socket so that it won't be
547 * deallocated until after we finish with the detach
548 * for it below. If we win the race, the extra
549 * reference hold is also taken to compensate for the
550 * extra reference release when detach is called
551 * with a "1" for its second parameter.
553 next_entry
->sfe_flags
|= SFEF_UNREGISTERING
;
557 lck_mtx_unlock(sock_filter_lock
);
562 /* We need to detach the filter from any sockets it's attached to */
563 if (entry_head
== 0) {
564 if (filter
->sf_filter
.sf_unregistered
)
565 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
568 next_entry
= entry_head
->sfe_next_onfilter
;
569 sflt_detach_private(entry_head
, 1);
570 entry_head
= next_entry
;
580 const struct sockaddr
* from
,
583 sflt_data_flag_t flags
)
586 if (so
== NULL
|| data
== NULL
) return EINVAL
;
588 if (flags
& sock_data_filt_flag_oob
) {
595 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)from
, data
,
602 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
607 if (flags
& sock_data_filt_flag_record
) {
608 if (control
|| from
) {
612 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
617 if (sbappend(&so
->so_rcv
, data
))
620 socket_unlock(so
, 1);
625 sock_inject_data_out(
627 const struct sockaddr
* to
,
630 sflt_data_flag_t flags
)
633 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
634 return sosend(so
, (struct sockaddr
*)to
, NULL
,
635 data
, control
, sosendflags
);
642 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
649 return sopt
->sopt_level
;
656 return sopt
->sopt_name
;
663 return sopt
->sopt_valsize
;
672 return sooptcopyin(sopt
, data
, len
, len
);
681 return sooptcopyout(sopt
, data
, len
);