2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
37 #include <kern/locks.h>
38 #include <kern/thread.h>
39 #include <kern/debug.h>
40 #include <net/kext_net.h>
42 #include <libkern/libkern.h>
43 #include <libkern/OSAtomic.h>
47 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
48 #define SFEF_NODETACH 0x2 /* Detach should not be called */
49 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
51 struct socket_filter_entry
{
52 struct socket_filter_entry
*sfe_next_onsocket
;
53 struct socket_filter_entry
*sfe_next_onfilter
;
54 struct socket_filter_entry
*sfe_next_oncleanup
;
56 struct socket_filter
*sfe_filter
;
57 struct socket
*sfe_socket
;
64 struct socket_filter
{
65 TAILQ_ENTRY(socket_filter
) sf_protosw_next
;
66 TAILQ_ENTRY(socket_filter
) sf_global_next
;
67 struct socket_filter_entry
*sf_entry_head
;
69 struct protosw
*sf_proto
;
70 struct sflt_filter sf_filter
;
71 u_int32_t sf_refcount
;
74 TAILQ_HEAD(socket_filter_list
, socket_filter
);
76 static struct socket_filter_list sock_filter_head
;
77 static lck_rw_t
*sock_filter_lock
= NULL
;
78 static lck_mtx_t
*sock_filter_cleanup_lock
= NULL
;
79 static struct socket_filter_entry
*sock_filter_cleanup_entries
= NULL
;
80 static thread_t sock_filter_cleanup_thread
= NULL
;
82 static void sflt_cleanup_thread(void *, wait_result_t
);
83 static void sflt_detach_locked(struct socket_filter_entry
*entry
);
85 #pragma mark -- Internal State Management --
87 __private_extern__
void
90 lck_grp_attr_t
*grp_attrib
= 0;
91 lck_attr_t
*lck_attrib
= 0;
92 lck_grp_t
*lck_group
= 0;
94 TAILQ_INIT(&sock_filter_head
);
96 /* Allocate a rw lock */
97 grp_attrib
= lck_grp_attr_alloc_init();
98 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
99 lck_grp_attr_free(grp_attrib
);
100 lck_attrib
= lck_attr_alloc_init();
101 sock_filter_lock
= lck_rw_alloc_init(lck_group
, lck_attrib
);
102 sock_filter_cleanup_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
103 lck_grp_free(lck_group
);
104 lck_attr_free(lck_attrib
);
109 struct socket_filter
*filter
)
111 filter
->sf_refcount
++;
116 struct socket_filter
*filter
)
118 filter
->sf_refcount
--;
119 if (filter
->sf_refcount
== 0)
121 // Call the unregistered function
122 if (filter
->sf_filter
.sf_unregistered
) {
123 lck_rw_unlock_exclusive(sock_filter_lock
);
124 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
125 lck_rw_lock_exclusive(sock_filter_lock
);
129 FREE(filter
, M_IFADDR
);
135 struct socket_filter_entry
*entry
)
137 if (OSIncrementAtomic(&entry
->sfe_refcount
) <= 0)
138 panic("sflt_entry_retain - sfe_refcount <= 0\n");
143 struct socket_filter_entry
*entry
)
145 SInt32 old
= OSDecrementAtomic(&entry
->sfe_refcount
);
147 // That was the last reference
149 // Take the cleanup lock
150 lck_mtx_lock(sock_filter_cleanup_lock
);
152 // Put this item on the cleanup list
153 entry
->sfe_next_oncleanup
= sock_filter_cleanup_entries
;
154 sock_filter_cleanup_entries
= entry
;
156 // If the item is the first item in the list
157 if (entry
->sfe_next_oncleanup
== NULL
) {
158 if (sock_filter_cleanup_thread
== NULL
) {
160 kernel_thread_start(sflt_cleanup_thread
, NULL
, &sock_filter_cleanup_thread
);
163 wakeup(&sock_filter_cleanup_entries
);
167 // Drop the cleanup lock
168 lck_mtx_unlock(sock_filter_cleanup_lock
);
172 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n", (int)old
);
178 __unused
void * blah
,
179 __unused wait_result_t blah2
)
182 lck_mtx_lock(sock_filter_cleanup_lock
);
183 while (sock_filter_cleanup_entries
== NULL
) {
184 // Sleep until we've got something better to do
185 msleep(&sock_filter_cleanup_entries
, sock_filter_cleanup_lock
, PWAIT
, "sflt_cleanup", NULL
);
188 // Pull the current list of dead items
189 struct socket_filter_entry
*dead
= sock_filter_cleanup_entries
;
190 sock_filter_cleanup_entries
= NULL
;
193 lck_mtx_unlock(sock_filter_cleanup_lock
);
195 // Take the socket filter lock
196 lck_rw_lock_exclusive(sock_filter_lock
);
198 // Cleanup every dead item
199 struct socket_filter_entry
*entry
;
200 for (entry
= dead
; entry
; entry
= dead
) {
201 struct socket_filter_entry
**nextpp
;
203 dead
= entry
->sfe_next_oncleanup
;
205 // Call the detach function if necessary - drop the lock
206 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
207 entry
->sfe_filter
->sf_filter
.sf_detach
) {
208 entry
->sfe_flags
|= SFEF_NODETACH
;
209 lck_rw_unlock_exclusive(sock_filter_lock
);
211 // Warning - passing a potentially dead socket may be bad
212 entry
->sfe_filter
->sf_filter
.
213 sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
215 lck_rw_lock_exclusive(sock_filter_lock
);
218 // Pull entry off the socket list -- if the socket still exists
219 if ((entry
->sfe_flags
& SFEF_NOSOCKET
) == 0) {
220 for (nextpp
= &entry
->sfe_socket
->so_filt
; *nextpp
;
221 nextpp
= &(*nextpp
)->sfe_next_onsocket
) {
222 if (*nextpp
== entry
) {
223 *nextpp
= entry
->sfe_next_onsocket
;
229 // Pull entry off the filter list
230 for (nextpp
= &entry
->sfe_filter
->sf_entry_head
; *nextpp
;
231 nextpp
= &(*nextpp
)->sfe_next_onfilter
) {
232 if (*nextpp
== entry
) {
233 *nextpp
= entry
->sfe_next_onfilter
;
238 // Release the filter -- may drop lock, but that's okay
239 sflt_release_locked(entry
->sfe_filter
);
240 entry
->sfe_socket
= NULL
;
241 entry
->sfe_filter
= NULL
;
242 FREE(entry
, M_IFADDR
);
245 // Drop the socket filter lock
246 lck_rw_unlock_exclusive(sock_filter_lock
);
254 struct socket_filter
*filter
,
258 struct socket_filter_entry
*entry
= NULL
;
264 /* allocate the socket filter entry */
265 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
, M_WAITOK
);
272 /* Initialize the socket filter entry */
273 entry
->sfe_cookie
= NULL
;
274 entry
->sfe_flags
= SFEF_ATTACHED
;
275 entry
->sfe_refcount
= 1; // corresponds to SFEF_ATTACHED flag set
277 /* Put the entry in the filter list */
278 sflt_retain_locked(filter
);
279 entry
->sfe_filter
= filter
;
280 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
281 filter
->sf_entry_head
= entry
;
283 /* Put the entry on the socket filter list */
284 entry
->sfe_socket
= so
;
285 entry
->sfe_next_onsocket
= so
->so_filt
;
288 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
289 // Retain the entry while we call attach
290 sflt_entry_retain(entry
);
292 // Release the filter lock -- callers must be aware we will do this
293 lck_rw_unlock_exclusive(sock_filter_lock
);
297 socket_unlock(so
, 0);
299 // It's finally safe to call the filter function
300 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
302 // Lock the socket again
306 // Lock the filters again
307 lck_rw_lock_exclusive(sock_filter_lock
);
309 // If the attach function returns an error, this filter must be detached
311 entry
->sfe_flags
|= SFEF_NODETACH
; // don't call sf_detach
312 sflt_detach_locked(entry
);
315 // Release the retain we held through the attach call
316 sflt_entry_release(entry
);
324 sflt_attach_internal(
328 if (socket
== NULL
|| handle
== 0)
333 lck_rw_lock_exclusive(sock_filter_lock
);
335 struct socket_filter
*filter
= NULL
;
336 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
337 if (filter
->sf_filter
.sf_handle
== handle
) break;
341 result
= sflt_attach_locked(socket
, filter
, 1);
344 lck_rw_unlock_exclusive(sock_filter_lock
);
351 struct socket_filter_entry
*entry
)
353 if ((entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
354 entry
->sfe_flags
&= ~SFEF_ATTACHED
;
355 sflt_entry_release(entry
);
359 #pragma mark -- Socket Layer Hooks --
361 __private_extern__
void
365 struct protosw
*proto
= so
->so_proto
;
367 lck_rw_lock_shared(sock_filter_lock
);
368 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
369 // Promote lock to exclusive
370 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock
))
371 lck_rw_lock_exclusive(sock_filter_lock
);
373 // Warning: A filter unregistering will be pulled out of the list.
374 // This could happen while we drop the lock in sftl_attach_locked
375 // or sflt_release_locked. For this reason we retain a reference
376 // on the filter (or next_filter) while calling this function
378 // This protects us from a panic, but it could result in a
379 // socket being created without all of the global filters if
380 // we're attaching a filter as it is removed, if that's possible.
381 struct socket_filter
*filter
= TAILQ_FIRST(&proto
->pr_filter_head
);
382 sflt_retain_locked(filter
);
386 struct socket_filter
*filter_next
;
388 // Warning: sflt_attach_private_locked will drop the lock
389 sflt_attach_locked(so
, filter
, 0);
391 filter_next
= TAILQ_NEXT(filter
, sf_protosw_next
);
393 sflt_retain_locked(filter_next
);
395 // Warning: filt_release_locked may remove the filter from the queue
396 sflt_release_locked(filter
);
397 filter
= filter_next
;
400 lck_rw_done(sock_filter_lock
);
406 * Detaches all filters from the socket.
409 __private_extern__
void
413 lck_rw_lock_exclusive(sock_filter_lock
);
415 struct socket_filter_entry
*entry
;
417 while ((entry
= so
->so_filt
) != NULL
) {
418 // Pull filter off the socket
419 so
->so_filt
= entry
->sfe_next_onsocket
;
420 entry
->sfe_flags
|= SFEF_NOSOCKET
;
423 sflt_detach_locked(entry
);
425 // On sflt_termsock, we can't return until the detach function has been called
426 // Call the detach function - this is gross because the socket filter
427 // entry could be freed when we drop the lock, so we make copies on
428 // the stack and retain everything we need before dropping the lock
429 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
430 entry
->sfe_filter
->sf_filter
.sf_detach
) {
431 void *sfe_cookie
= entry
->sfe_cookie
;
432 struct socket_filter
*sfe_filter
= entry
->sfe_filter
;
434 // Retain the socket filter
435 sflt_retain_locked(sfe_filter
);
437 // Mark that we've called the detach function
438 entry
->sfe_flags
|= SFEF_NODETACH
;
440 // Drop the lock around the call to the detach function
441 lck_rw_unlock_exclusive(sock_filter_lock
);
442 sfe_filter
->sf_filter
.sf_detach(sfe_cookie
, so
);
443 lck_rw_lock_exclusive(sock_filter_lock
);
445 // Release the filter
446 sflt_release_locked(sfe_filter
);
450 lck_rw_unlock_exclusive(sock_filter_lock
);
453 __private_extern__
void
459 if (so
->so_filt
== NULL
) return;
461 struct socket_filter_entry
*entry
;
464 lck_rw_lock_shared(sock_filter_lock
);
465 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
466 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
467 && entry
->sfe_filter
->sf_filter
.sf_notify
) {
468 // Retain the filter entry and release the socket filter lock
469 sflt_entry_retain(entry
);
470 lck_rw_unlock_shared(sock_filter_lock
);
472 // If the socket isn't already unlocked, unlock it
475 socket_unlock(so
, 0);
478 // Finally call the filter
479 entry
->sfe_filter
->sf_filter
.
480 sf_notify(entry
->sfe_cookie
, so
, event
, param
);
482 // Take the socket filter lock again and release the entry
483 lck_rw_lock_shared(sock_filter_lock
);
484 sflt_entry_release(entry
);
487 lck_rw_unlock_shared(sock_filter_lock
);
494 __private_extern__
int
500 if (so
->so_filt
== NULL
) return 0;
502 struct socket_filter_entry
*entry
;
506 lck_rw_lock_shared(sock_filter_lock
);
507 for (entry
= so
->so_filt
; entry
&& error
== 0;
508 entry
= entry
->sfe_next_onsocket
) {
509 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
510 && entry
->sfe_filter
->sf_filter
.sf_ioctl
) {
511 // Retain the filter entry and release the socket filter lock
512 sflt_entry_retain(entry
);
513 lck_rw_unlock_shared(sock_filter_lock
);
515 // If the socket isn't already unlocked, unlock it
517 socket_unlock(so
, 0);
522 error
= entry
->sfe_filter
->sf_filter
.
523 sf_ioctl(entry
->sfe_cookie
, so
, cmd
, data
);
525 // Take the socket filter lock again and release the entry
526 lck_rw_lock_shared(sock_filter_lock
);
527 sflt_entry_release(entry
);
530 lck_rw_unlock_shared(sock_filter_lock
);
539 __private_extern__
int
542 const struct sockaddr
*nam
)
544 if (so
->so_filt
== NULL
) return 0;
546 struct socket_filter_entry
*entry
;
550 lck_rw_lock_shared(sock_filter_lock
);
551 for (entry
= so
->so_filt
; entry
&& error
== 0;
552 entry
= entry
->sfe_next_onsocket
) {
553 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
554 && entry
->sfe_filter
->sf_filter
.sf_bind
) {
555 // Retain the filter entry and release the socket filter lock
556 sflt_entry_retain(entry
);
557 lck_rw_unlock_shared(sock_filter_lock
);
559 // If the socket isn't already unlocked, unlock it
561 socket_unlock(so
, 0);
566 error
= entry
->sfe_filter
->sf_filter
.
567 sf_bind(entry
->sfe_cookie
, so
, nam
);
569 // Take the socket filter lock again and release the entry
570 lck_rw_lock_shared(sock_filter_lock
);
571 sflt_entry_release(entry
);
574 lck_rw_unlock_shared(sock_filter_lock
);
583 __private_extern__
int
587 if (so
->so_filt
== NULL
) return 0;
589 struct socket_filter_entry
*entry
;
593 lck_rw_lock_shared(sock_filter_lock
);
594 for (entry
= so
->so_filt
; entry
&& error
== 0;
595 entry
= entry
->sfe_next_onsocket
) {
596 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
597 && entry
->sfe_filter
->sf_filter
.sf_listen
) {
598 // Retain the filter entry and release the socket filter lock
599 sflt_entry_retain(entry
);
600 lck_rw_unlock_shared(sock_filter_lock
);
602 // If the socket isn't already unlocked, unlock it
604 socket_unlock(so
, 0);
609 error
= entry
->sfe_filter
->sf_filter
.
610 sf_listen(entry
->sfe_cookie
, so
);
612 // Take the socket filter lock again and release the entry
613 lck_rw_lock_shared(sock_filter_lock
);
614 sflt_entry_release(entry
);
617 lck_rw_unlock_shared(sock_filter_lock
);
626 __private_extern__
int
630 const struct sockaddr
*local
,
631 const struct sockaddr
*remote
)
633 if (so
->so_filt
== NULL
) return 0;
635 struct socket_filter_entry
*entry
;
639 lck_rw_lock_shared(sock_filter_lock
);
640 for (entry
= so
->so_filt
; entry
&& error
== 0;
641 entry
= entry
->sfe_next_onsocket
) {
642 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
643 && entry
->sfe_filter
->sf_filter
.sf_accept
) {
644 // Retain the filter entry and release the socket filter lock
645 sflt_entry_retain(entry
);
646 lck_rw_unlock_shared(sock_filter_lock
);
648 // If the socket isn't already unlocked, unlock it
650 socket_unlock(so
, 0);
655 error
= entry
->sfe_filter
->sf_filter
.
656 sf_accept(entry
->sfe_cookie
, head
, so
, local
, remote
);
658 // Take the socket filter lock again and release the entry
659 lck_rw_lock_shared(sock_filter_lock
);
660 sflt_entry_release(entry
);
663 lck_rw_unlock_shared(sock_filter_lock
);
672 __private_extern__
int
675 struct sockaddr
**local
)
677 if (so
->so_filt
== NULL
) return 0;
679 struct socket_filter_entry
*entry
;
683 lck_rw_lock_shared(sock_filter_lock
);
684 for (entry
= so
->so_filt
; entry
&& error
== 0;
685 entry
= entry
->sfe_next_onsocket
) {
686 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
687 && entry
->sfe_filter
->sf_filter
.sf_getsockname
) {
688 // Retain the filter entry and release the socket filter lock
689 sflt_entry_retain(entry
);
690 lck_rw_unlock_shared(sock_filter_lock
);
692 // If the socket isn't already unlocked, unlock it
694 socket_unlock(so
, 0);
699 error
= entry
->sfe_filter
->sf_filter
.
700 sf_getsockname(entry
->sfe_cookie
, so
, local
);
702 // Take the socket filter lock again and release the entry
703 lck_rw_lock_shared(sock_filter_lock
);
704 sflt_entry_release(entry
);
707 lck_rw_unlock_shared(sock_filter_lock
);
716 __private_extern__
int
719 struct sockaddr
**remote
)
721 if (so
->so_filt
== NULL
) return 0;
723 struct socket_filter_entry
*entry
;
727 lck_rw_lock_shared(sock_filter_lock
);
728 for (entry
= so
->so_filt
; entry
&& error
== 0;
729 entry
= entry
->sfe_next_onsocket
) {
730 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
731 && entry
->sfe_filter
->sf_filter
.sf_getpeername
) {
732 // Retain the filter entry and release the socket filter lock
733 sflt_entry_retain(entry
);
734 lck_rw_unlock_shared(sock_filter_lock
);
736 // If the socket isn't already unlocked, unlock it
738 socket_unlock(so
, 0);
743 error
= entry
->sfe_filter
->sf_filter
.
744 sf_getpeername(entry
->sfe_cookie
, so
, remote
);
746 // Take the socket filter lock again and release the entry
747 lck_rw_lock_shared(sock_filter_lock
);
748 sflt_entry_release(entry
);
751 lck_rw_unlock_shared(sock_filter_lock
);
760 __private_extern__
int
763 const struct sockaddr
*remote
)
765 if (so
->so_filt
== NULL
) return 0;
767 struct socket_filter_entry
*entry
;
771 lck_rw_lock_shared(sock_filter_lock
);
772 for (entry
= so
->so_filt
; entry
&& error
== 0;
773 entry
= entry
->sfe_next_onsocket
) {
774 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
775 && entry
->sfe_filter
->sf_filter
.sf_connect_in
) {
776 // Retain the filter entry and release the socket filter lock
777 sflt_entry_retain(entry
);
778 lck_rw_unlock_shared(sock_filter_lock
);
780 // If the socket isn't already unlocked, unlock it
782 socket_unlock(so
, 0);
787 error
= entry
->sfe_filter
->sf_filter
.
788 sf_connect_in(entry
->sfe_cookie
, so
, remote
);
790 // Take the socket filter lock again and release the entry
791 lck_rw_lock_shared(sock_filter_lock
);
792 sflt_entry_release(entry
);
795 lck_rw_unlock_shared(sock_filter_lock
);
804 __private_extern__
int
807 const struct sockaddr
*nam
)
809 if (so
->so_filt
== NULL
) return 0;
811 struct socket_filter_entry
*entry
;
815 lck_rw_lock_shared(sock_filter_lock
);
816 for (entry
= so
->so_filt
; entry
&& error
== 0;
817 entry
= entry
->sfe_next_onsocket
) {
818 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
819 && entry
->sfe_filter
->sf_filter
.sf_connect_out
) {
820 // Retain the filter entry and release the socket filter lock
821 sflt_entry_retain(entry
);
822 lck_rw_unlock_shared(sock_filter_lock
);
824 // If the socket isn't already unlocked, unlock it
826 socket_unlock(so
, 0);
831 error
= entry
->sfe_filter
->sf_filter
.
832 sf_connect_out(entry
->sfe_cookie
, so
, nam
);
834 // Take the socket filter lock again and release the entry
835 lck_rw_lock_shared(sock_filter_lock
);
836 sflt_entry_release(entry
);
839 lck_rw_unlock_shared(sock_filter_lock
);
848 __private_extern__
int
851 struct sockopt
*sopt
)
853 if (so
->so_filt
== NULL
) return 0;
855 struct socket_filter_entry
*entry
;
859 lck_rw_lock_shared(sock_filter_lock
);
860 for (entry
= so
->so_filt
; entry
&& error
== 0;
861 entry
= entry
->sfe_next_onsocket
) {
862 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
863 && entry
->sfe_filter
->sf_filter
.sf_setoption
) {
864 // Retain the filter entry and release the socket filter lock
865 sflt_entry_retain(entry
);
866 lck_rw_unlock_shared(sock_filter_lock
);
868 // If the socket isn't already unlocked, unlock it
870 socket_unlock(so
, 0);
875 error
= entry
->sfe_filter
->sf_filter
.
876 sf_setoption(entry
->sfe_cookie
, so
, sopt
);
878 // Take the socket filter lock again and release the entry
879 lck_rw_lock_shared(sock_filter_lock
);
880 sflt_entry_release(entry
);
883 lck_rw_unlock_shared(sock_filter_lock
);
892 __private_extern__
int
895 struct sockopt
*sopt
)
897 if (so
->so_filt
== NULL
) return 0;
899 struct socket_filter_entry
*entry
;
903 lck_rw_lock_shared(sock_filter_lock
);
904 for (entry
= so
->so_filt
; entry
&& error
== 0;
905 entry
= entry
->sfe_next_onsocket
) {
906 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
907 && entry
->sfe_filter
->sf_filter
.sf_getoption
) {
908 // Retain the filter entry and release the socket filter lock
909 sflt_entry_retain(entry
);
910 lck_rw_unlock_shared(sock_filter_lock
);
912 // If the socket isn't already unlocked, unlock it
914 socket_unlock(so
, 0);
919 error
= entry
->sfe_filter
->sf_filter
.
920 sf_getoption(entry
->sfe_cookie
, so
, sopt
);
922 // Take the socket filter lock again and release the entry
923 lck_rw_lock_shared(sock_filter_lock
);
924 sflt_entry_release(entry
);
927 lck_rw_unlock_shared(sock_filter_lock
);
936 __private_extern__
int
939 const struct sockaddr
*to
,
942 sflt_data_flag_t flags
)
944 if (so
->so_filt
== NULL
) return 0;
946 struct socket_filter_entry
*entry
;
948 int setsendthread
= 0;
951 lck_rw_lock_shared(sock_filter_lock
);
952 for (entry
= so
->so_filt
; entry
&& error
== 0;
953 entry
= entry
->sfe_next_onsocket
) {
954 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
955 && entry
->sfe_filter
->sf_filter
.sf_data_out
) {
956 // Retain the filter entry and release the socket filter lock
957 sflt_entry_retain(entry
);
958 lck_rw_unlock_shared(sock_filter_lock
);
960 // If the socket isn't already unlocked, unlock it
962 if (so
->so_send_filt_thread
== NULL
) {
964 so
->so_send_filt_thread
= current_thread();
966 socket_unlock(so
, 0);
971 error
= entry
->sfe_filter
->sf_filter
.
972 sf_data_out(entry
->sfe_cookie
, so
, to
, data
, control
, flags
);
974 // Take the socket filter lock again and release the entry
975 lck_rw_lock_shared(sock_filter_lock
);
976 sflt_entry_release(entry
);
979 lck_rw_unlock_shared(sock_filter_lock
);
983 if (setsendthread
) so
->so_send_filt_thread
= NULL
;
989 __private_extern__
int
992 const struct sockaddr
*from
,
995 sflt_data_flag_t flags
)
997 if (so
->so_filt
== NULL
) return 0;
999 struct socket_filter_entry
*entry
;
1003 lck_rw_lock_shared(sock_filter_lock
);
1005 for (entry
= so
->so_filt
; entry
&& (error
== 0);
1006 entry
= entry
->sfe_next_onsocket
) {
1007 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1008 entry
->sfe_filter
->sf_filter
.sf_data_in
) {
1009 // Retain the filter entry and release the socket filter lock
1010 sflt_entry_retain(entry
);
1011 lck_rw_unlock_shared(sock_filter_lock
);
1013 // If the socket isn't already unlocked, unlock it
1014 if (unlocked
== 0) {
1016 socket_unlock(so
, 0);
1020 error
= entry
->sfe_filter
->sf_filter
.sf_data_in(
1021 entry
->sfe_cookie
, so
, from
, data
, control
, flags
);
1023 // Take the socket filter lock again and release the entry
1024 lck_rw_lock_shared(sock_filter_lock
);
1025 sflt_entry_release(entry
);
1028 lck_rw_unlock_shared(sock_filter_lock
);
1037 #pragma mark -- KPI --
1044 socket_lock(socket
, 1);
1045 errno_t result
= sflt_attach_internal(socket
, handle
);
1046 socket_unlock(socket
, 1);
1055 struct socket_filter_entry
*entry
;
1058 if (socket
== NULL
|| handle
== 0)
1061 lck_rw_lock_exclusive(sock_filter_lock
);
1062 for (entry
= socket
->so_filt
; entry
;
1063 entry
= entry
->sfe_next_onsocket
) {
1064 if (entry
->sfe_filter
->sf_filter
.sf_handle
== handle
&&
1065 (entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
1070 if (entry
!= NULL
) {
1071 sflt_detach_locked(entry
);
1073 lck_rw_unlock_exclusive(sock_filter_lock
);
1080 const struct sflt_filter
*filter
,
1085 struct socket_filter
*sock_filt
= NULL
;
1086 struct socket_filter
*match
= NULL
;
1088 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
1094 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
1095 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
1098 /* Allocate the socket filter */
1099 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
1100 M_IFADDR
, M_WAITOK
);
1101 if (sock_filt
== NULL
) {
1105 bzero(sock_filt
, sizeof (*sock_filt
));
1107 /* Legacy sflt_filter length; current structure minus extended */
1108 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
1110 * Include extended fields if filter defines SFLT_EXTENDED.
1111 * We've zeroed out our internal sflt_filter placeholder,
1112 * so any unused portion would have been taken care of.
1114 if (filter
->sf_flags
& SFLT_EXTENDED
) {
1115 unsigned int ext_len
= filter
->sf_len
;
1117 if (ext_len
> sizeof (struct sflt_filter_ext
))
1118 ext_len
= sizeof (struct sflt_filter_ext
);
1122 bcopy(filter
, &sock_filt
->sf_filter
, len
);
1124 lck_rw_lock_exclusive(sock_filter_lock
);
1125 /* Look for an existing entry */
1126 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
1127 if (match
->sf_filter
.sf_handle
==
1128 sock_filt
->sf_filter
.sf_handle
) {
1133 /* Add the entry only if there was no existing entry */
1134 if (match
== NULL
) {
1135 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
1136 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1137 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
1139 sock_filt
->sf_proto
= pr
;
1141 sflt_retain_locked(sock_filt
);
1143 lck_rw_unlock_exclusive(sock_filter_lock
);
1145 if (match
!= NULL
) {
1146 FREE(sock_filt
, M_IFADDR
);
1157 struct socket_filter
*filter
;
1158 lck_rw_lock_exclusive(sock_filter_lock
);
1160 /* Find the entry by the handle */
1161 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
1162 if (filter
->sf_filter
.sf_handle
== handle
)
1167 // Remove it from the global list
1168 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
1170 // Remove it from the protosw list
1171 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1172 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
1175 // Detach from any sockets
1176 struct socket_filter_entry
*entry
= NULL
;
1178 for (entry
= filter
->sf_entry_head
; entry
; entry
= entry
->sfe_next_onfilter
) {
1179 sflt_detach_locked(entry
);
1182 // Release the filter
1183 sflt_release_locked(filter
);
1186 lck_rw_unlock_exclusive(sock_filter_lock
);
1195 sock_inject_data_in(
1197 const struct sockaddr
* from
,
1200 sflt_data_flag_t flags
)
1203 if (so
== NULL
|| data
== NULL
) return EINVAL
;
1205 if (flags
& sock_data_filt_flag_oob
) {
1212 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)(uintptr_t)from
, data
,
1219 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
1224 if (flags
& sock_data_filt_flag_record
) {
1225 if (control
|| from
) {
1229 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
1234 if (sbappend(&so
->so_rcv
, data
))
1237 socket_unlock(so
, 1);
1242 sock_inject_data_out(
1244 const struct sockaddr
* to
,
1247 sflt_data_flag_t flags
)
1249 int sosendflags
= 0;
1250 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
1251 return sosend(so
, (struct sockaddr
*)(uintptr_t)to
, NULL
,
1252 data
, control
, sosendflags
);
1259 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
1266 return sopt
->sopt_level
;
1273 return sopt
->sopt_name
;
1280 return sopt
->sopt_valsize
;
1289 return sooptcopyin(sopt
, data
, len
, len
);
1298 return sooptcopyout(sopt
, data
, len
);