2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
60 struct socket_filter_entry
{
61 struct socket_filter_entry
*sfe_next_onsocket
;
62 struct socket_filter_entry
*sfe_next_onfilter
;
63 struct socket_filter_entry
*sfe_next_oncleanup
;
65 struct socket_filter
*sfe_filter
;
66 struct socket
*sfe_socket
;
73 struct socket_filter
{
74 TAILQ_ENTRY(socket_filter
) sf_protosw_next
;
75 TAILQ_ENTRY(socket_filter
) sf_global_next
;
76 struct socket_filter_entry
*sf_entry_head
;
78 struct protosw
*sf_proto
;
79 struct sflt_filter sf_filter
;
80 u_int32_t sf_refcount
;
83 TAILQ_HEAD(socket_filter_list
, socket_filter
);
85 static struct socket_filter_list sock_filter_head
;
86 static lck_rw_t
*sock_filter_lock
= NULL
;
87 static lck_mtx_t
*sock_filter_cleanup_lock
= NULL
;
88 static struct socket_filter_entry
*sock_filter_cleanup_entries
= NULL
;
89 static thread_t sock_filter_cleanup_thread
= NULL
;
91 static void sflt_cleanup_thread(void *, wait_result_t
);
92 static void sflt_detach_locked(struct socket_filter_entry
*entry
);
94 #pragma mark -- Internal State Management --
96 __private_extern__
void
99 lck_grp_attr_t
*grp_attrib
= 0;
100 lck_attr_t
*lck_attrib
= 0;
101 lck_grp_t
*lck_group
= 0;
103 TAILQ_INIT(&sock_filter_head
);
105 /* Allocate a rw lock */
106 grp_attrib
= lck_grp_attr_alloc_init();
107 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
108 lck_grp_attr_free(grp_attrib
);
109 lck_attrib
= lck_attr_alloc_init();
110 sock_filter_lock
= lck_rw_alloc_init(lck_group
, lck_attrib
);
111 sock_filter_cleanup_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
112 lck_grp_free(lck_group
);
113 lck_attr_free(lck_attrib
);
118 struct socket_filter
*filter
)
120 filter
->sf_refcount
++;
125 struct socket_filter
*filter
)
127 filter
->sf_refcount
--;
128 if (filter
->sf_refcount
== 0)
130 // Call the unregistered function
131 if (filter
->sf_filter
.sf_unregistered
) {
132 lck_rw_unlock_exclusive(sock_filter_lock
);
133 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
134 lck_rw_lock_exclusive(sock_filter_lock
);
138 FREE(filter
, M_IFADDR
);
144 struct socket_filter_entry
*entry
)
146 if (OSIncrementAtomic(&entry
->sfe_refcount
) <= 0)
147 panic("sflt_entry_retain - sfe_refcount <= 0\n");
152 struct socket_filter_entry
*entry
)
154 SInt32 old
= OSDecrementAtomic(&entry
->sfe_refcount
);
156 // That was the last reference
158 // Take the cleanup lock
159 lck_mtx_lock(sock_filter_cleanup_lock
);
161 // Put this item on the cleanup list
162 entry
->sfe_next_oncleanup
= sock_filter_cleanup_entries
;
163 sock_filter_cleanup_entries
= entry
;
165 // If the item is the first item in the list
166 if (entry
->sfe_next_oncleanup
== NULL
) {
167 if (sock_filter_cleanup_thread
== NULL
) {
169 kernel_thread_start(sflt_cleanup_thread
, NULL
, &sock_filter_cleanup_thread
);
172 wakeup(&sock_filter_cleanup_entries
);
176 // Drop the cleanup lock
177 lck_mtx_unlock(sock_filter_cleanup_lock
);
181 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n", (int)old
);
187 __unused
void * blah
,
188 __unused wait_result_t blah2
)
191 lck_mtx_lock(sock_filter_cleanup_lock
);
192 while (sock_filter_cleanup_entries
== NULL
) {
193 // Sleep until we've got something better to do
194 msleep(&sock_filter_cleanup_entries
, sock_filter_cleanup_lock
, PWAIT
, "sflt_cleanup", NULL
);
197 // Pull the current list of dead items
198 struct socket_filter_entry
*dead
= sock_filter_cleanup_entries
;
199 sock_filter_cleanup_entries
= NULL
;
202 lck_mtx_unlock(sock_filter_cleanup_lock
);
204 // Take the socket filter lock
205 lck_rw_lock_exclusive(sock_filter_lock
);
207 // Cleanup every dead item
208 struct socket_filter_entry
*entry
;
209 for (entry
= dead
; entry
; entry
= dead
) {
210 struct socket_filter_entry
**nextpp
;
212 dead
= entry
->sfe_next_oncleanup
;
214 // Call the detach function if necessary - drop the lock
215 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
216 entry
->sfe_filter
->sf_filter
.sf_detach
) {
217 entry
->sfe_flags
|= SFEF_NODETACH
;
218 lck_rw_unlock_exclusive(sock_filter_lock
);
220 // Warning - passing a potentially dead socket may be bad
221 entry
->sfe_filter
->sf_filter
.
222 sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
224 lck_rw_lock_exclusive(sock_filter_lock
);
227 // Pull entry off the socket list -- if the socket still exists
228 if ((entry
->sfe_flags
& SFEF_NOSOCKET
) == 0) {
229 for (nextpp
= &entry
->sfe_socket
->so_filt
; *nextpp
;
230 nextpp
= &(*nextpp
)->sfe_next_onsocket
) {
231 if (*nextpp
== entry
) {
232 *nextpp
= entry
->sfe_next_onsocket
;
238 // Pull entry off the filter list
239 for (nextpp
= &entry
->sfe_filter
->sf_entry_head
; *nextpp
;
240 nextpp
= &(*nextpp
)->sfe_next_onfilter
) {
241 if (*nextpp
== entry
) {
242 *nextpp
= entry
->sfe_next_onfilter
;
247 // Release the filter -- may drop lock, but that's okay
248 sflt_release_locked(entry
->sfe_filter
);
249 entry
->sfe_socket
= NULL
;
250 entry
->sfe_filter
= NULL
;
251 FREE(entry
, M_IFADDR
);
254 // Drop the socket filter lock
255 lck_rw_unlock_exclusive(sock_filter_lock
);
263 struct socket_filter
*filter
,
267 struct socket_filter_entry
*entry
= NULL
;
272 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onfilter
)
273 if (entry
->sfe_filter
->sf_filter
.sf_handle
==
274 filter
->sf_filter
.sf_handle
)
277 /* allocate the socket filter entry */
278 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
,
283 /* Initialize the socket filter entry */
284 entry
->sfe_cookie
= NULL
;
285 entry
->sfe_flags
= SFEF_ATTACHED
;
286 entry
->sfe_refcount
= 1; // corresponds to SFEF_ATTACHED flag set
288 /* Put the entry in the filter list */
289 sflt_retain_locked(filter
);
290 entry
->sfe_filter
= filter
;
291 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
292 filter
->sf_entry_head
= entry
;
294 /* Put the entry on the socket filter list */
295 entry
->sfe_socket
= so
;
296 entry
->sfe_next_onsocket
= so
->so_filt
;
299 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
300 // Retain the entry while we call attach
301 sflt_entry_retain(entry
);
303 // Release the filter lock -- callers must be aware we will do this
304 lck_rw_unlock_exclusive(sock_filter_lock
);
308 socket_unlock(so
, 0);
310 // It's finally safe to call the filter function
311 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
313 // Lock the socket again
317 // Lock the filters again
318 lck_rw_lock_exclusive(sock_filter_lock
);
320 // If the attach function returns an error, this filter must be detached
322 entry
->sfe_flags
|= SFEF_NODETACH
; // don't call sf_detach
323 sflt_detach_locked(entry
);
326 // Release the retain we held through the attach call
327 sflt_entry_release(entry
);
334 sflt_attach_internal(
338 if (socket
== NULL
|| handle
== 0)
343 lck_rw_lock_exclusive(sock_filter_lock
);
345 struct socket_filter
*filter
= NULL
;
346 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
347 if (filter
->sf_filter
.sf_handle
== handle
) break;
351 result
= sflt_attach_locked(socket
, filter
, 1);
354 lck_rw_unlock_exclusive(sock_filter_lock
);
361 struct socket_filter_entry
*entry
)
363 if ((entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
364 entry
->sfe_flags
&= ~SFEF_ATTACHED
;
365 sflt_entry_release(entry
);
369 #pragma mark -- Socket Layer Hooks --
371 __private_extern__
void
375 struct protosw
*proto
= so
->so_proto
;
377 lck_rw_lock_shared(sock_filter_lock
);
378 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
379 // Promote lock to exclusive
380 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock
))
381 lck_rw_lock_exclusive(sock_filter_lock
);
383 // Warning: A filter unregistering will be pulled out of the list.
384 // This could happen while we drop the lock in sftl_attach_locked
385 // or sflt_release_locked. For this reason we retain a reference
386 // on the filter (or next_filter) while calling this function
388 // This protects us from a panic, but it could result in a
389 // socket being created without all of the global filters if
390 // we're attaching a filter as it is removed, if that's possible.
391 struct socket_filter
*filter
= TAILQ_FIRST(&proto
->pr_filter_head
);
392 sflt_retain_locked(filter
);
396 struct socket_filter
*filter_next
;
398 // Warning: sflt_attach_private_locked will drop the lock
399 sflt_attach_locked(so
, filter
, 0);
401 filter_next
= TAILQ_NEXT(filter
, sf_protosw_next
);
403 sflt_retain_locked(filter_next
);
405 // Warning: filt_release_locked may remove the filter from the queue
406 sflt_release_locked(filter
);
407 filter
= filter_next
;
410 lck_rw_done(sock_filter_lock
);
416 * Detaches all filters from the socket.
419 __private_extern__
void
423 lck_rw_lock_exclusive(sock_filter_lock
);
425 struct socket_filter_entry
*entry
;
427 while ((entry
= so
->so_filt
) != NULL
) {
428 // Pull filter off the socket
429 so
->so_filt
= entry
->sfe_next_onsocket
;
430 entry
->sfe_flags
|= SFEF_NOSOCKET
;
433 sflt_detach_locked(entry
);
435 // On sflt_termsock, we can't return until the detach function has been called
436 // Call the detach function - this is gross because the socket filter
437 // entry could be freed when we drop the lock, so we make copies on
438 // the stack and retain everything we need before dropping the lock
439 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
440 entry
->sfe_filter
->sf_filter
.sf_detach
) {
441 void *sfe_cookie
= entry
->sfe_cookie
;
442 struct socket_filter
*sfe_filter
= entry
->sfe_filter
;
444 // Retain the socket filter
445 sflt_retain_locked(sfe_filter
);
447 // Mark that we've called the detach function
448 entry
->sfe_flags
|= SFEF_NODETACH
;
450 // Drop the lock around the call to the detach function
451 lck_rw_unlock_exclusive(sock_filter_lock
);
452 sfe_filter
->sf_filter
.sf_detach(sfe_cookie
, so
);
453 lck_rw_lock_exclusive(sock_filter_lock
);
455 // Release the filter
456 sflt_release_locked(sfe_filter
);
460 lck_rw_unlock_exclusive(sock_filter_lock
);
465 sflt_notify_internal(
471 if (so
->so_filt
== NULL
) return;
473 struct socket_filter_entry
*entry
;
476 lck_rw_lock_shared(sock_filter_lock
);
477 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
478 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
479 && entry
->sfe_filter
->sf_filter
.sf_notify
&&
480 ((handle
&& entry
->sfe_filter
->sf_filter
.sf_handle
!= handle
) ||
482 // Retain the filter entry and release the socket filter lock
483 sflt_entry_retain(entry
);
484 lck_rw_unlock_shared(sock_filter_lock
);
486 // If the socket isn't already unlocked, unlock it
489 socket_unlock(so
, 0);
492 // Finally call the filter
493 entry
->sfe_filter
->sf_filter
.
494 sf_notify(entry
->sfe_cookie
, so
, event
, param
);
496 // Take the socket filter lock again and release the entry
497 lck_rw_lock_shared(sock_filter_lock
);
498 sflt_entry_release(entry
);
501 lck_rw_unlock_shared(sock_filter_lock
);
508 __private_extern__
void
514 sflt_notify_internal(so
, event
, param
, 0);
518 sflt_notify_after_register(
523 sflt_notify_internal(so
, event
, NULL
, handle
);
526 __private_extern__
int
532 if (so
->so_filt
== NULL
) return 0;
534 struct socket_filter_entry
*entry
;
538 lck_rw_lock_shared(sock_filter_lock
);
539 for (entry
= so
->so_filt
; entry
&& error
== 0;
540 entry
= entry
->sfe_next_onsocket
) {
541 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
542 && entry
->sfe_filter
->sf_filter
.sf_ioctl
) {
543 // Retain the filter entry and release the socket filter lock
544 sflt_entry_retain(entry
);
545 lck_rw_unlock_shared(sock_filter_lock
);
547 // If the socket isn't already unlocked, unlock it
549 socket_unlock(so
, 0);
554 error
= entry
->sfe_filter
->sf_filter
.
555 sf_ioctl(entry
->sfe_cookie
, so
, cmd
, data
);
557 // Take the socket filter lock again and release the entry
558 lck_rw_lock_shared(sock_filter_lock
);
559 sflt_entry_release(entry
);
562 lck_rw_unlock_shared(sock_filter_lock
);
571 __private_extern__
int
574 const struct sockaddr
*nam
)
576 if (so
->so_filt
== NULL
) return 0;
578 struct socket_filter_entry
*entry
;
582 lck_rw_lock_shared(sock_filter_lock
);
583 for (entry
= so
->so_filt
; entry
&& error
== 0;
584 entry
= entry
->sfe_next_onsocket
) {
585 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
586 && entry
->sfe_filter
->sf_filter
.sf_bind
) {
587 // Retain the filter entry and release the socket filter lock
588 sflt_entry_retain(entry
);
589 lck_rw_unlock_shared(sock_filter_lock
);
591 // If the socket isn't already unlocked, unlock it
593 socket_unlock(so
, 0);
598 error
= entry
->sfe_filter
->sf_filter
.
599 sf_bind(entry
->sfe_cookie
, so
, nam
);
601 // Take the socket filter lock again and release the entry
602 lck_rw_lock_shared(sock_filter_lock
);
603 sflt_entry_release(entry
);
606 lck_rw_unlock_shared(sock_filter_lock
);
615 __private_extern__
int
619 if (so
->so_filt
== NULL
) return 0;
621 struct socket_filter_entry
*entry
;
625 lck_rw_lock_shared(sock_filter_lock
);
626 for (entry
= so
->so_filt
; entry
&& error
== 0;
627 entry
= entry
->sfe_next_onsocket
) {
628 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
629 && entry
->sfe_filter
->sf_filter
.sf_listen
) {
630 // Retain the filter entry and release the socket filter lock
631 sflt_entry_retain(entry
);
632 lck_rw_unlock_shared(sock_filter_lock
);
634 // If the socket isn't already unlocked, unlock it
636 socket_unlock(so
, 0);
641 error
= entry
->sfe_filter
->sf_filter
.
642 sf_listen(entry
->sfe_cookie
, so
);
644 // Take the socket filter lock again and release the entry
645 lck_rw_lock_shared(sock_filter_lock
);
646 sflt_entry_release(entry
);
649 lck_rw_unlock_shared(sock_filter_lock
);
658 __private_extern__
int
662 const struct sockaddr
*local
,
663 const struct sockaddr
*remote
)
665 if (so
->so_filt
== NULL
) return 0;
667 struct socket_filter_entry
*entry
;
671 lck_rw_lock_shared(sock_filter_lock
);
672 for (entry
= so
->so_filt
; entry
&& error
== 0;
673 entry
= entry
->sfe_next_onsocket
) {
674 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
675 && entry
->sfe_filter
->sf_filter
.sf_accept
) {
676 // Retain the filter entry and release the socket filter lock
677 sflt_entry_retain(entry
);
678 lck_rw_unlock_shared(sock_filter_lock
);
680 // If the socket isn't already unlocked, unlock it
682 socket_unlock(so
, 0);
687 error
= entry
->sfe_filter
->sf_filter
.
688 sf_accept(entry
->sfe_cookie
, head
, so
, local
, remote
);
690 // Take the socket filter lock again and release the entry
691 lck_rw_lock_shared(sock_filter_lock
);
692 sflt_entry_release(entry
);
695 lck_rw_unlock_shared(sock_filter_lock
);
704 __private_extern__
int
707 struct sockaddr
**local
)
709 if (so
->so_filt
== NULL
) return 0;
711 struct socket_filter_entry
*entry
;
715 lck_rw_lock_shared(sock_filter_lock
);
716 for (entry
= so
->so_filt
; entry
&& error
== 0;
717 entry
= entry
->sfe_next_onsocket
) {
718 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
719 && entry
->sfe_filter
->sf_filter
.sf_getsockname
) {
720 // Retain the filter entry and release the socket filter lock
721 sflt_entry_retain(entry
);
722 lck_rw_unlock_shared(sock_filter_lock
);
724 // If the socket isn't already unlocked, unlock it
726 socket_unlock(so
, 0);
731 error
= entry
->sfe_filter
->sf_filter
.
732 sf_getsockname(entry
->sfe_cookie
, so
, local
);
734 // Take the socket filter lock again and release the entry
735 lck_rw_lock_shared(sock_filter_lock
);
736 sflt_entry_release(entry
);
739 lck_rw_unlock_shared(sock_filter_lock
);
748 __private_extern__
int
751 struct sockaddr
**remote
)
753 if (so
->so_filt
== NULL
) return 0;
755 struct socket_filter_entry
*entry
;
759 lck_rw_lock_shared(sock_filter_lock
);
760 for (entry
= so
->so_filt
; entry
&& error
== 0;
761 entry
= entry
->sfe_next_onsocket
) {
762 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
763 && entry
->sfe_filter
->sf_filter
.sf_getpeername
) {
764 // Retain the filter entry and release the socket filter lock
765 sflt_entry_retain(entry
);
766 lck_rw_unlock_shared(sock_filter_lock
);
768 // If the socket isn't already unlocked, unlock it
770 socket_unlock(so
, 0);
775 error
= entry
->sfe_filter
->sf_filter
.
776 sf_getpeername(entry
->sfe_cookie
, so
, remote
);
778 // Take the socket filter lock again and release the entry
779 lck_rw_lock_shared(sock_filter_lock
);
780 sflt_entry_release(entry
);
783 lck_rw_unlock_shared(sock_filter_lock
);
792 __private_extern__
int
795 const struct sockaddr
*remote
)
797 if (so
->so_filt
== NULL
) return 0;
799 struct socket_filter_entry
*entry
;
803 lck_rw_lock_shared(sock_filter_lock
);
804 for (entry
= so
->so_filt
; entry
&& error
== 0;
805 entry
= entry
->sfe_next_onsocket
) {
806 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
807 && entry
->sfe_filter
->sf_filter
.sf_connect_in
) {
808 // Retain the filter entry and release the socket filter lock
809 sflt_entry_retain(entry
);
810 lck_rw_unlock_shared(sock_filter_lock
);
812 // If the socket isn't already unlocked, unlock it
814 socket_unlock(so
, 0);
819 error
= entry
->sfe_filter
->sf_filter
.
820 sf_connect_in(entry
->sfe_cookie
, so
, remote
);
822 // Take the socket filter lock again and release the entry
823 lck_rw_lock_shared(sock_filter_lock
);
824 sflt_entry_release(entry
);
827 lck_rw_unlock_shared(sock_filter_lock
);
836 __private_extern__
int
839 const struct sockaddr
*nam
)
841 if (so
->so_filt
== NULL
) return 0;
843 struct socket_filter_entry
*entry
;
847 lck_rw_lock_shared(sock_filter_lock
);
848 for (entry
= so
->so_filt
; entry
&& error
== 0;
849 entry
= entry
->sfe_next_onsocket
) {
850 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
851 && entry
->sfe_filter
->sf_filter
.sf_connect_out
) {
852 // Retain the filter entry and release the socket filter lock
853 sflt_entry_retain(entry
);
854 lck_rw_unlock_shared(sock_filter_lock
);
856 // If the socket isn't already unlocked, unlock it
858 socket_unlock(so
, 0);
863 error
= entry
->sfe_filter
->sf_filter
.
864 sf_connect_out(entry
->sfe_cookie
, so
, nam
);
866 // Take the socket filter lock again and release the entry
867 lck_rw_lock_shared(sock_filter_lock
);
868 sflt_entry_release(entry
);
871 lck_rw_unlock_shared(sock_filter_lock
);
880 __private_extern__
int
883 struct sockopt
*sopt
)
885 if (so
->so_filt
== NULL
) return 0;
887 struct socket_filter_entry
*entry
;
891 lck_rw_lock_shared(sock_filter_lock
);
892 for (entry
= so
->so_filt
; entry
&& error
== 0;
893 entry
= entry
->sfe_next_onsocket
) {
894 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
895 && entry
->sfe_filter
->sf_filter
.sf_setoption
) {
896 // Retain the filter entry and release the socket filter lock
897 sflt_entry_retain(entry
);
898 lck_rw_unlock_shared(sock_filter_lock
);
900 // If the socket isn't already unlocked, unlock it
902 socket_unlock(so
, 0);
907 error
= entry
->sfe_filter
->sf_filter
.
908 sf_setoption(entry
->sfe_cookie
, so
, sopt
);
910 // Take the socket filter lock again and release the entry
911 lck_rw_lock_shared(sock_filter_lock
);
912 sflt_entry_release(entry
);
915 lck_rw_unlock_shared(sock_filter_lock
);
924 __private_extern__
int
927 struct sockopt
*sopt
)
929 if (so
->so_filt
== NULL
) return 0;
931 struct socket_filter_entry
*entry
;
935 lck_rw_lock_shared(sock_filter_lock
);
936 for (entry
= so
->so_filt
; entry
&& error
== 0;
937 entry
= entry
->sfe_next_onsocket
) {
938 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
939 && entry
->sfe_filter
->sf_filter
.sf_getoption
) {
940 // Retain the filter entry and release the socket filter lock
941 sflt_entry_retain(entry
);
942 lck_rw_unlock_shared(sock_filter_lock
);
944 // If the socket isn't already unlocked, unlock it
946 socket_unlock(so
, 0);
951 error
= entry
->sfe_filter
->sf_filter
.
952 sf_getoption(entry
->sfe_cookie
, so
, sopt
);
954 // Take the socket filter lock again and release the entry
955 lck_rw_lock_shared(sock_filter_lock
);
956 sflt_entry_release(entry
);
959 lck_rw_unlock_shared(sock_filter_lock
);
968 __private_extern__
int
971 const struct sockaddr
*to
,
974 sflt_data_flag_t flags
)
976 if (so
->so_filt
== NULL
) return 0;
978 struct socket_filter_entry
*entry
;
980 int setsendthread
= 0;
983 lck_rw_lock_shared(sock_filter_lock
);
984 for (entry
= so
->so_filt
; entry
&& error
== 0;
985 entry
= entry
->sfe_next_onsocket
) {
986 if ((entry
->sfe_flags
& SFEF_ATTACHED
)
987 && entry
->sfe_filter
->sf_filter
.sf_data_out
) {
988 // Retain the filter entry and release the socket filter lock
989 sflt_entry_retain(entry
);
990 lck_rw_unlock_shared(sock_filter_lock
);
992 // If the socket isn't already unlocked, unlock it
994 if (so
->so_send_filt_thread
== NULL
) {
996 so
->so_send_filt_thread
= current_thread();
998 socket_unlock(so
, 0);
1003 error
= entry
->sfe_filter
->sf_filter
.
1004 sf_data_out(entry
->sfe_cookie
, so
, to
, data
, control
, flags
);
1006 // Take the socket filter lock again and release the entry
1007 lck_rw_lock_shared(sock_filter_lock
);
1008 sflt_entry_release(entry
);
1011 lck_rw_unlock_shared(sock_filter_lock
);
1015 if (setsendthread
) so
->so_send_filt_thread
= NULL
;
1021 __private_extern__
int
1024 const struct sockaddr
*from
,
1027 sflt_data_flag_t flags
)
1029 if (so
->so_filt
== NULL
) return 0;
1031 struct socket_filter_entry
*entry
;
1035 lck_rw_lock_shared(sock_filter_lock
);
1037 for (entry
= so
->so_filt
; entry
&& (error
== 0);
1038 entry
= entry
->sfe_next_onsocket
) {
1039 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1040 entry
->sfe_filter
->sf_filter
.sf_data_in
) {
1041 // Retain the filter entry and release the socket filter lock
1042 sflt_entry_retain(entry
);
1043 lck_rw_unlock_shared(sock_filter_lock
);
1045 // If the socket isn't already unlocked, unlock it
1046 if (unlocked
== 0) {
1048 socket_unlock(so
, 0);
1052 error
= entry
->sfe_filter
->sf_filter
.sf_data_in(
1053 entry
->sfe_cookie
, so
, from
, data
, control
, flags
);
1055 // Take the socket filter lock again and release the entry
1056 lck_rw_lock_shared(sock_filter_lock
);
1057 sflt_entry_release(entry
);
1060 lck_rw_unlock_shared(sock_filter_lock
);
1069 #pragma mark -- KPI --
1076 socket_lock(socket
, 1);
1077 errno_t result
= sflt_attach_internal(socket
, handle
);
1078 socket_unlock(socket
, 1);
1087 struct socket_filter_entry
*entry
;
1090 if (socket
== NULL
|| handle
== 0)
1093 lck_rw_lock_exclusive(sock_filter_lock
);
1094 for (entry
= socket
->so_filt
; entry
;
1095 entry
= entry
->sfe_next_onsocket
) {
1096 if (entry
->sfe_filter
->sf_filter
.sf_handle
== handle
&&
1097 (entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
1102 if (entry
!= NULL
) {
1103 sflt_detach_locked(entry
);
1105 lck_rw_unlock_exclusive(sock_filter_lock
);
1111 struct solist
*next
;
1117 const struct sflt_filter
*filter
,
1122 struct socket_filter
*sock_filt
= NULL
;
1123 struct socket_filter
*match
= NULL
;
1125 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
1129 struct solist
*solisthead
= NULL
, *solist
= NULL
;
1134 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
1135 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
1138 /* Allocate the socket filter */
1139 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
1140 M_IFADDR
, M_WAITOK
);
1141 if (sock_filt
== NULL
) {
1145 bzero(sock_filt
, sizeof (*sock_filt
));
1147 /* Legacy sflt_filter length; current structure minus extended */
1148 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
1150 * Include extended fields if filter defines SFLT_EXTENDED.
1151 * We've zeroed out our internal sflt_filter placeholder,
1152 * so any unused portion would have been taken care of.
1154 if (filter
->sf_flags
& SFLT_EXTENDED
) {
1155 unsigned int ext_len
= filter
->sf_len
;
1157 if (ext_len
> sizeof (struct sflt_filter_ext
))
1158 ext_len
= sizeof (struct sflt_filter_ext
);
1162 bcopy(filter
, &sock_filt
->sf_filter
, len
);
1164 lck_rw_lock_exclusive(sock_filter_lock
);
1165 /* Look for an existing entry */
1166 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
1167 if (match
->sf_filter
.sf_handle
==
1168 sock_filt
->sf_filter
.sf_handle
) {
1173 /* Add the entry only if there was no existing entry */
1174 if (match
== NULL
) {
1175 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
1176 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1177 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
1179 sock_filt
->sf_proto
= pr
;
1181 sflt_retain_locked(sock_filt
);
1183 lck_rw_unlock_exclusive(sock_filter_lock
);
1185 if (match
!= NULL
) {
1186 FREE(sock_filt
, M_IFADDR
);
1190 if (!(filter
->sf_flags
& SFLT_EXTENDED_REGISTRY
))
1194 * Setup the filter on the TCP and UDP sockets already created.
1196 #define SOLIST_ADD(_so) do { \
1197 solist->next = solisthead; \
1198 sock_retain((_so)); \
1199 solist->so = (_so); \
1200 solisthead = solist; \
1202 if (protocol
== IPPROTO_TCP
) {
1203 lck_rw_lock_shared(tcbinfo
.mtx
);
1204 LIST_FOREACH(inp
, tcbinfo
.listhead
, inp_list
) {
1205 so
= inp
->inp_socket
;
1206 if (so
== NULL
|| so
->so_state
& SS_DEFUNCT
||
1207 so
->so_state
& SS_NOFDREF
||
1208 !INP_CHECK_SOCKAF(so
, domain
) ||
1209 !INP_CHECK_SOCKTYPE(so
, type
))
1211 MALLOC(solist
, struct solist
*, sizeof(*solist
),
1212 M_IFADDR
, M_NOWAIT
);
1217 lck_rw_done(tcbinfo
.mtx
);
1218 } else if (protocol
== IPPROTO_UDP
) {
1219 lck_rw_lock_shared(udbinfo
.mtx
);
1220 LIST_FOREACH(inp
, udbinfo
.listhead
, inp_list
) {
1221 so
= inp
->inp_socket
;
1222 if (so
== NULL
|| so
->so_state
& SS_DEFUNCT
||
1223 so
->so_state
& SS_NOFDREF
||
1224 !INP_CHECK_SOCKAF(so
, domain
) ||
1225 !INP_CHECK_SOCKTYPE(so
, type
))
1227 MALLOC(solist
, struct solist
*, sizeof(*solist
),
1228 M_IFADDR
, M_NOWAIT
);
1233 lck_rw_done(udbinfo
.mtx
);
1235 /* XXX it's possible to walk the raw socket list as well */
1238 while (solisthead
) {
1239 sflt_handle handle
= filter
->sf_handle
;
1241 so
= solisthead
->so
;
1244 if (so
->so_state
& SS_ISCONNECTING
)
1245 sflt_notify_after_register(so
, sock_evt_connecting
,
1247 else if (so
->so_state
& SS_ISCONNECTED
)
1248 sflt_notify_after_register(so
, sock_evt_connected
,
1250 else if ((so
->so_state
&
1251 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
)) ==
1252 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
))
1253 sflt_notify_after_register(so
, sock_evt_disconnecting
,
1255 else if ((so
->so_state
&
1256 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
)) ==
1257 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
))
1258 sflt_notify_after_register(so
, sock_evt_disconnected
,
1260 else if (so
->so_state
& SS_CANTSENDMORE
)
1261 sflt_notify_after_register(so
, sock_evt_cantsendmore
,
1263 else if (so
->so_state
& SS_CANTRCVMORE
)
1264 sflt_notify_after_register(so
, sock_evt_cantrecvmore
,
1266 /* XXX no easy way to post the sock_evt_closing event */
1268 solist
= solisthead
;
1269 solisthead
= solisthead
->next
;
1270 FREE(solist
, M_IFADDR
);
1280 struct socket_filter
*filter
;
1281 lck_rw_lock_exclusive(sock_filter_lock
);
1283 /* Find the entry by the handle */
1284 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
1285 if (filter
->sf_filter
.sf_handle
== handle
)
1290 // Remove it from the global list
1291 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
1293 // Remove it from the protosw list
1294 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1295 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
1298 // Detach from any sockets
1299 struct socket_filter_entry
*entry
= NULL
;
1301 for (entry
= filter
->sf_entry_head
; entry
; entry
= entry
->sfe_next_onfilter
) {
1302 sflt_detach_locked(entry
);
1305 // Release the filter
1306 sflt_release_locked(filter
);
1309 lck_rw_unlock_exclusive(sock_filter_lock
);
1318 sock_inject_data_in(
1320 const struct sockaddr
* from
,
1323 sflt_data_flag_t flags
)
1326 if (so
== NULL
|| data
== NULL
) return EINVAL
;
1328 if (flags
& sock_data_filt_flag_oob
) {
1335 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)(uintptr_t)from
, data
,
1342 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
1347 if (flags
& sock_data_filt_flag_record
) {
1348 if (control
|| from
) {
1352 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
1357 if (sbappend(&so
->so_rcv
, data
))
1360 socket_unlock(so
, 1);
1365 sock_inject_data_out(
1367 const struct sockaddr
* to
,
1370 sflt_data_flag_t flags
)
1372 int sosendflags
= 0;
1373 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
1374 return sosend(so
, (struct sockaddr
*)(uintptr_t)to
, NULL
,
1375 data
, control
, sosendflags
);
1382 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
1389 return sopt
->sopt_level
;
1396 return sopt
->sopt_name
;
1403 return sopt
->sopt_valsize
;
1412 return sooptcopyin(sopt
, data
, len
, len
);
1421 return sooptcopyout(sopt
, data
, len
);