2 * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
60 struct socket_filter_entry
{
61 struct socket_filter_entry
*sfe_next_onsocket
;
62 struct socket_filter_entry
*sfe_next_onfilter
;
63 struct socket_filter_entry
*sfe_next_oncleanup
;
65 struct socket_filter
*sfe_filter
;
66 struct socket
*sfe_socket
;
73 struct socket_filter
{
74 TAILQ_ENTRY(socket_filter
) sf_protosw_next
;
75 TAILQ_ENTRY(socket_filter
) sf_global_next
;
76 struct socket_filter_entry
*sf_entry_head
;
78 struct protosw
*sf_proto
;
79 struct sflt_filter sf_filter
;
80 u_int32_t sf_refcount
;
83 TAILQ_HEAD(socket_filter_list
, socket_filter
);
85 static struct socket_filter_list sock_filter_head
;
86 static lck_rw_t
*sock_filter_lock
= NULL
;
87 static lck_mtx_t
*sock_filter_cleanup_lock
= NULL
;
88 static struct socket_filter_entry
*sock_filter_cleanup_entries
= NULL
;
89 static thread_t sock_filter_cleanup_thread
= NULL
;
91 static void sflt_cleanup_thread(void *, wait_result_t
);
92 static void sflt_detach_locked(struct socket_filter_entry
*entry
);
94 #pragma mark -- Internal State Management --
96 __private_extern__
int
97 sflt_permission_check(struct inpcb
*inp
)
101 * All these permissions only apply to the co-processor interface,
104 if (!(inp
->inp_vflag
& INP_IPV6
)) {
107 /* Sockets that have this entitlement bypass socket filters. */
108 if (INP_INTCOPROC_ALLOWED(inp
)) {
111 if ((inp
->inp_flags
& INP_BOUND_IF
) &&
112 IFNET_IS_INTCOPROC(inp
->inp_boundifp
)) {
118 __private_extern__
void
121 lck_grp_attr_t
*grp_attrib
= NULL
;
122 lck_attr_t
*lck_attrib
= NULL
;
123 lck_grp_t
*lck_group
= NULL
;
125 TAILQ_INIT(&sock_filter_head
);
127 /* Allocate a rw lock */
128 grp_attrib
= lck_grp_attr_alloc_init();
129 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
130 lck_grp_attr_free(grp_attrib
);
131 lck_attrib
= lck_attr_alloc_init();
132 sock_filter_lock
= lck_rw_alloc_init(lck_group
, lck_attrib
);
133 sock_filter_cleanup_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
134 lck_grp_free(lck_group
);
135 lck_attr_free(lck_attrib
);
139 sflt_retain_locked(struct socket_filter
*filter
)
141 filter
->sf_refcount
++;
145 sflt_release_locked(struct socket_filter
*filter
)
147 filter
->sf_refcount
--;
148 if (filter
->sf_refcount
== 0) {
149 /* Call the unregistered function */
150 if (filter
->sf_filter
.sf_unregistered
) {
151 lck_rw_unlock_exclusive(sock_filter_lock
);
152 filter
->sf_filter
.sf_unregistered(
153 filter
->sf_filter
.sf_handle
);
154 lck_rw_lock_exclusive(sock_filter_lock
);
158 FREE(filter
, M_IFADDR
);
163 sflt_entry_retain(struct socket_filter_entry
*entry
)
165 if (OSIncrementAtomic(&entry
->sfe_refcount
) <= 0) {
166 panic("sflt_entry_retain - sfe_refcount <= 0\n");
172 sflt_entry_release(struct socket_filter_entry
*entry
)
174 SInt32 old
= OSDecrementAtomic(&entry
->sfe_refcount
);
176 /* That was the last reference */
178 /* Take the cleanup lock */
179 lck_mtx_lock(sock_filter_cleanup_lock
);
181 /* Put this item on the cleanup list */
182 entry
->sfe_next_oncleanup
= sock_filter_cleanup_entries
;
183 sock_filter_cleanup_entries
= entry
;
185 /* If the item is the first item in the list */
186 if (entry
->sfe_next_oncleanup
== NULL
) {
187 if (sock_filter_cleanup_thread
== NULL
) {
188 /* Create a thread */
189 kernel_thread_start(sflt_cleanup_thread
,
190 NULL
, &sock_filter_cleanup_thread
);
192 /* Wakeup the thread */
193 wakeup(&sock_filter_cleanup_entries
);
197 /* Drop the cleanup lock */
198 lck_mtx_unlock(sock_filter_cleanup_lock
);
199 } else if (old
<= 0) {
200 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
206 __attribute__((noreturn
))
208 sflt_cleanup_thread(void *blah
, wait_result_t blah2
)
210 #pragma unused(blah, blah2)
212 lck_mtx_lock(sock_filter_cleanup_lock
);
213 while (sock_filter_cleanup_entries
== NULL
) {
214 /* Sleep until we've got something better to do */
215 msleep(&sock_filter_cleanup_entries
,
216 sock_filter_cleanup_lock
, PWAIT
,
217 "sflt_cleanup", NULL
);
220 /* Pull the current list of dead items */
221 struct socket_filter_entry
*dead
= sock_filter_cleanup_entries
;
222 sock_filter_cleanup_entries
= NULL
;
225 lck_mtx_unlock(sock_filter_cleanup_lock
);
227 /* Take the socket filter lock */
228 lck_rw_lock_exclusive(sock_filter_lock
);
230 /* Cleanup every dead item */
231 struct socket_filter_entry
*entry
;
232 for (entry
= dead
; entry
; entry
= dead
) {
233 struct socket_filter_entry
**nextpp
;
235 dead
= entry
->sfe_next_oncleanup
;
237 /* Call detach function if necessary - drop the lock */
238 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
239 entry
->sfe_filter
->sf_filter
.sf_detach
) {
240 entry
->sfe_flags
|= SFEF_NODETACH
;
241 lck_rw_unlock_exclusive(sock_filter_lock
);
244 * Warning - passing a potentially
245 * dead socket may be bad
247 entry
->sfe_filter
->sf_filter
. sf_detach(
248 entry
->sfe_cookie
, entry
->sfe_socket
);
250 lck_rw_lock_exclusive(sock_filter_lock
);
254 * Pull entry off the socket list --
255 * if the socket still exists
257 if ((entry
->sfe_flags
& SFEF_NOSOCKET
) == 0) {
258 for (nextpp
= &entry
->sfe_socket
->so_filt
;
260 nextpp
= &(*nextpp
)->sfe_next_onsocket
) {
261 if (*nextpp
== entry
) {
263 entry
->sfe_next_onsocket
;
269 /* Pull entry off the filter list */
270 for (nextpp
= &entry
->sfe_filter
->sf_entry_head
;
271 *nextpp
; nextpp
= &(*nextpp
)->sfe_next_onfilter
) {
272 if (*nextpp
== entry
) {
273 *nextpp
= entry
->sfe_next_onfilter
;
279 * Release the filter -- may drop lock, but that's okay
281 sflt_release_locked(entry
->sfe_filter
);
282 entry
->sfe_socket
= NULL
;
283 entry
->sfe_filter
= NULL
;
284 FREE(entry
, M_IFADDR
);
287 /* Drop the socket filter lock */
288 lck_rw_unlock_exclusive(sock_filter_lock
);
294 sflt_attach_locked(struct socket
*so
, struct socket_filter
*filter
,
298 struct socket_filter_entry
*entry
= NULL
;
300 if (sflt_permission_check(sotoinpcb(so
)))
306 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onfilter
) {
307 if (entry
->sfe_filter
->sf_filter
.sf_handle
==
308 filter
->sf_filter
.sf_handle
)
311 /* allocate the socket filter entry */
312 MALLOC(entry
, struct socket_filter_entry
*, sizeof (*entry
), M_IFADDR
,
317 /* Initialize the socket filter entry */
318 entry
->sfe_cookie
= NULL
;
319 entry
->sfe_flags
= SFEF_ATTACHED
;
320 entry
->sfe_refcount
= 1; /* corresponds to SFEF_ATTACHED flag set */
322 /* Put the entry in the filter list */
323 sflt_retain_locked(filter
);
324 entry
->sfe_filter
= filter
;
325 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
326 filter
->sf_entry_head
= entry
;
328 /* Put the entry on the socket filter list */
329 entry
->sfe_socket
= so
;
330 entry
->sfe_next_onsocket
= so
->so_filt
;
333 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
334 /* Retain the entry while we call attach */
335 sflt_entry_retain(entry
);
338 * Release the filter lock --
339 * callers must be aware we will do this
341 lck_rw_unlock_exclusive(sock_filter_lock
);
343 /* Unlock the socket */
345 socket_unlock(so
, 0);
347 /* It's finally safe to call the filter function */
348 error
= entry
->sfe_filter
->sf_filter
.sf_attach(
349 &entry
->sfe_cookie
, so
);
351 /* Lock the socket again */
355 /* Lock the filters again */
356 lck_rw_lock_exclusive(sock_filter_lock
);
359 * If the attach function returns an error,
360 * this filter must be detached
363 /* don't call sf_detach */
364 entry
->sfe_flags
|= SFEF_NODETACH
;
365 sflt_detach_locked(entry
);
368 /* Release the retain we held through the attach call */
369 sflt_entry_release(entry
);
376 sflt_attach_internal(socket_t socket
, sflt_handle handle
)
378 if (socket
== NULL
|| handle
== 0)
383 lck_rw_lock_exclusive(sock_filter_lock
);
385 struct socket_filter
*filter
= NULL
;
386 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
387 if (filter
->sf_filter
.sf_handle
== handle
) break;
391 result
= sflt_attach_locked(socket
, filter
, 1);
394 lck_rw_unlock_exclusive(sock_filter_lock
);
400 sflt_detach_locked(struct socket_filter_entry
*entry
)
402 if ((entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
403 entry
->sfe_flags
&= ~SFEF_ATTACHED
;
404 sflt_entry_release(entry
);
408 #pragma mark -- Socket Layer Hooks --
410 __private_extern__
void
411 sflt_initsock(struct socket
*so
)
414 * Point to the real protosw, as so_proto might have been
415 * pointed to a modified version.
417 struct protosw
*proto
= so
->so_proto
->pr_protosw
;
419 lck_rw_lock_shared(sock_filter_lock
);
420 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
421 /* Promote lock to exclusive */
422 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock
))
423 lck_rw_lock_exclusive(sock_filter_lock
);
426 * Warning: A filter unregistering will be pulled out of
427 * the list. This could happen while we drop the lock in
428 * sftl_attach_locked or sflt_release_locked. For this
429 * reason we retain a reference on the filter (or next_filter)
430 * while calling this function. This protects us from a panic,
431 * but it could result in a socket being created without all
432 * of the global filters if we're attaching a filter as it
433 * is removed, if that's possible.
435 struct socket_filter
*filter
=
436 TAILQ_FIRST(&proto
->pr_filter_head
);
438 sflt_retain_locked(filter
);
441 struct socket_filter
*filter_next
;
443 * Warning: sflt_attach_private_locked
446 sflt_attach_locked(so
, filter
, 0);
448 filter_next
= TAILQ_NEXT(filter
, sf_protosw_next
);
450 sflt_retain_locked(filter_next
);
453 * Warning: filt_release_locked may remove
454 * the filter from the queue
456 sflt_release_locked(filter
);
457 filter
= filter_next
;
460 lck_rw_done(sock_filter_lock
);
466 * Detaches all filters from the socket.
468 __private_extern__
void
469 sflt_termsock(struct socket
*so
)
471 lck_rw_lock_exclusive(sock_filter_lock
);
473 struct socket_filter_entry
*entry
;
475 while ((entry
= so
->so_filt
) != NULL
) {
476 /* Pull filter off the socket */
477 so
->so_filt
= entry
->sfe_next_onsocket
;
478 entry
->sfe_flags
|= SFEF_NOSOCKET
;
481 sflt_detach_locked(entry
);
484 * On sflt_termsock, we can't return until the detach function
485 * has been called. Call the detach function - this is gross
486 * because the socket filter entry could be freed when we drop
487 * the lock, so we make copies on the stack and retain
488 * everything we need before dropping the lock.
490 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
491 entry
->sfe_filter
->sf_filter
.sf_detach
) {
492 void *sfe_cookie
= entry
->sfe_cookie
;
493 struct socket_filter
*sfe_filter
= entry
->sfe_filter
;
495 /* Retain the socket filter */
496 sflt_retain_locked(sfe_filter
);
498 /* Mark that we've called the detach function */
499 entry
->sfe_flags
|= SFEF_NODETACH
;
501 /* Drop the lock before calling the detach function */
502 lck_rw_unlock_exclusive(sock_filter_lock
);
503 sfe_filter
->sf_filter
.sf_detach(sfe_cookie
, so
);
504 lck_rw_lock_exclusive(sock_filter_lock
);
506 /* Release the filter */
507 sflt_release_locked(sfe_filter
);
511 lck_rw_unlock_exclusive(sock_filter_lock
);
516 sflt_notify_internal(struct socket
*so
, sflt_event_t event
, void *param
,
519 if (so
->so_filt
== NULL
)
522 struct socket_filter_entry
*entry
;
525 lck_rw_lock_shared(sock_filter_lock
);
526 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
527 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
528 entry
->sfe_filter
->sf_filter
.sf_notify
&&
529 ((handle
&& entry
->sfe_filter
->sf_filter
.sf_handle
!=
530 handle
) || !handle
)) {
532 * Retain the filter entry and release
533 * the socket filter lock
535 sflt_entry_retain(entry
);
536 lck_rw_unlock_shared(sock_filter_lock
);
538 /* If the socket isn't already unlocked, unlock it */
541 socket_unlock(so
, 0);
544 /* Finally call the filter */
545 entry
->sfe_filter
->sf_filter
.sf_notify(
546 entry
->sfe_cookie
, so
, event
, param
);
549 * Take the socket filter lock again
550 * and release the entry
552 lck_rw_lock_shared(sock_filter_lock
);
553 sflt_entry_release(entry
);
556 lck_rw_unlock_shared(sock_filter_lock
);
563 __private_extern__
void
564 sflt_notify(struct socket
*so
, sflt_event_t event
, void *param
)
566 sflt_notify_internal(so
, event
, param
, 0);
570 sflt_notify_after_register(struct socket
*so
, sflt_event_t event
,
573 sflt_notify_internal(so
, event
, NULL
, handle
);
576 __private_extern__
int
577 sflt_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
)
579 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
582 struct socket_filter_entry
*entry
;
586 lck_rw_lock_shared(sock_filter_lock
);
587 for (entry
= so
->so_filt
; entry
&& error
== 0;
588 entry
= entry
->sfe_next_onsocket
) {
589 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
590 entry
->sfe_filter
->sf_filter
.sf_ioctl
) {
592 * Retain the filter entry and release
593 * the socket filter lock
595 sflt_entry_retain(entry
);
596 lck_rw_unlock_shared(sock_filter_lock
);
598 /* If the socket isn't already unlocked, unlock it */
600 socket_unlock(so
, 0);
604 /* Call the filter */
605 error
= entry
->sfe_filter
->sf_filter
.sf_ioctl(
606 entry
->sfe_cookie
, so
, cmd
, data
);
609 * Take the socket filter lock again
610 * and release the entry
612 lck_rw_lock_shared(sock_filter_lock
);
613 sflt_entry_release(entry
);
616 lck_rw_unlock_shared(sock_filter_lock
);
625 __private_extern__
int
626 sflt_bind(struct socket
*so
, const struct sockaddr
*nam
)
628 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
631 struct socket_filter_entry
*entry
;
635 lck_rw_lock_shared(sock_filter_lock
);
636 for (entry
= so
->so_filt
; entry
&& error
== 0;
637 entry
= entry
->sfe_next_onsocket
) {
638 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
639 entry
->sfe_filter
->sf_filter
.sf_bind
) {
641 * Retain the filter entry and
642 * release the socket filter lock
644 sflt_entry_retain(entry
);
645 lck_rw_unlock_shared(sock_filter_lock
);
647 /* If the socket isn't already unlocked, unlock it */
649 socket_unlock(so
, 0);
653 /* Call the filter */
654 error
= entry
->sfe_filter
->sf_filter
.sf_bind(
655 entry
->sfe_cookie
, so
, nam
);
658 * Take the socket filter lock again and
661 lck_rw_lock_shared(sock_filter_lock
);
662 sflt_entry_release(entry
);
665 lck_rw_unlock_shared(sock_filter_lock
);
674 __private_extern__
int
675 sflt_listen(struct socket
*so
)
677 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
680 struct socket_filter_entry
*entry
;
684 lck_rw_lock_shared(sock_filter_lock
);
685 for (entry
= so
->so_filt
; entry
&& error
== 0;
686 entry
= entry
->sfe_next_onsocket
) {
687 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
688 entry
->sfe_filter
->sf_filter
.sf_listen
) {
690 * Retain the filter entry and release
691 * the socket filter lock
693 sflt_entry_retain(entry
);
694 lck_rw_unlock_shared(sock_filter_lock
);
696 /* If the socket isn't already unlocked, unlock it */
698 socket_unlock(so
, 0);
702 /* Call the filter */
703 error
= entry
->sfe_filter
->sf_filter
.sf_listen(
704 entry
->sfe_cookie
, so
);
707 * Take the socket filter lock again
708 * and release the entry
710 lck_rw_lock_shared(sock_filter_lock
);
711 sflt_entry_release(entry
);
714 lck_rw_unlock_shared(sock_filter_lock
);
723 __private_extern__
int
724 sflt_accept(struct socket
*head
, struct socket
*so
,
725 const struct sockaddr
*local
, const struct sockaddr
*remote
)
727 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
730 struct socket_filter_entry
*entry
;
734 lck_rw_lock_shared(sock_filter_lock
);
735 for (entry
= so
->so_filt
; entry
&& error
== 0;
736 entry
= entry
->sfe_next_onsocket
) {
737 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
738 entry
->sfe_filter
->sf_filter
.sf_accept
) {
740 * Retain the filter entry and
741 * release the socket filter lock
743 sflt_entry_retain(entry
);
744 lck_rw_unlock_shared(sock_filter_lock
);
746 /* If the socket isn't already unlocked, unlock it */
748 socket_unlock(so
, 0);
752 /* Call the filter */
753 error
= entry
->sfe_filter
->sf_filter
.sf_accept(
754 entry
->sfe_cookie
, head
, so
, local
, remote
);
757 * Take the socket filter lock again
758 * and release the entry
760 lck_rw_lock_shared(sock_filter_lock
);
761 sflt_entry_release(entry
);
764 lck_rw_unlock_shared(sock_filter_lock
);
773 __private_extern__
int
774 sflt_getsockname(struct socket
*so
, struct sockaddr
**local
)
776 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
779 struct socket_filter_entry
*entry
;
783 lck_rw_lock_shared(sock_filter_lock
);
784 for (entry
= so
->so_filt
; entry
&& error
== 0;
785 entry
= entry
->sfe_next_onsocket
) {
786 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
787 entry
->sfe_filter
->sf_filter
.sf_getsockname
) {
789 * Retain the filter entry and
790 * release the socket filter lock
792 sflt_entry_retain(entry
);
793 lck_rw_unlock_shared(sock_filter_lock
);
795 /* If the socket isn't already unlocked, unlock it */
797 socket_unlock(so
, 0);
801 /* Call the filter */
802 error
= entry
->sfe_filter
->sf_filter
.sf_getsockname(
803 entry
->sfe_cookie
, so
, local
);
806 * Take the socket filter lock again
807 * and release the entry
809 lck_rw_lock_shared(sock_filter_lock
);
810 sflt_entry_release(entry
);
813 lck_rw_unlock_shared(sock_filter_lock
);
822 __private_extern__
int
823 sflt_getpeername(struct socket
*so
, struct sockaddr
**remote
)
825 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
828 struct socket_filter_entry
*entry
;
832 lck_rw_lock_shared(sock_filter_lock
);
833 for (entry
= so
->so_filt
; entry
&& error
== 0;
834 entry
= entry
->sfe_next_onsocket
) {
835 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
836 entry
->sfe_filter
->sf_filter
.sf_getpeername
) {
838 * Retain the filter entry and release
839 * the socket filter lock
841 sflt_entry_retain(entry
);
842 lck_rw_unlock_shared(sock_filter_lock
);
844 /* If the socket isn't already unlocked, unlock it */
846 socket_unlock(so
, 0);
850 /* Call the filter */
851 error
= entry
->sfe_filter
->sf_filter
.sf_getpeername(
852 entry
->sfe_cookie
, so
, remote
);
855 * Take the socket filter lock again
856 * and release the entry
858 lck_rw_lock_shared(sock_filter_lock
);
859 sflt_entry_release(entry
);
862 lck_rw_unlock_shared(sock_filter_lock
);
871 __private_extern__
int
872 sflt_connectin(struct socket
*so
, const struct sockaddr
*remote
)
874 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
877 struct socket_filter_entry
*entry
;
881 lck_rw_lock_shared(sock_filter_lock
);
882 for (entry
= so
->so_filt
; entry
&& error
== 0;
883 entry
= entry
->sfe_next_onsocket
) {
884 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
885 entry
->sfe_filter
->sf_filter
.sf_connect_in
) {
887 * Retain the filter entry and release
888 * the socket filter lock
890 sflt_entry_retain(entry
);
891 lck_rw_unlock_shared(sock_filter_lock
);
893 /* If the socket isn't already unlocked, unlock it */
895 socket_unlock(so
, 0);
899 /* Call the filter */
900 error
= entry
->sfe_filter
->sf_filter
.sf_connect_in(
901 entry
->sfe_cookie
, so
, remote
);
904 * Take the socket filter lock again
905 * and release the entry
907 lck_rw_lock_shared(sock_filter_lock
);
908 sflt_entry_release(entry
);
911 lck_rw_unlock_shared(sock_filter_lock
);
921 sflt_connectout_common(struct socket
*so
, const struct sockaddr
*nam
)
923 struct socket_filter_entry
*entry
;
927 lck_rw_lock_shared(sock_filter_lock
);
928 for (entry
= so
->so_filt
; entry
&& error
== 0;
929 entry
= entry
->sfe_next_onsocket
) {
930 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
931 entry
->sfe_filter
->sf_filter
.sf_connect_out
) {
933 * Retain the filter entry and release
934 * the socket filter lock
936 sflt_entry_retain(entry
);
937 lck_rw_unlock_shared(sock_filter_lock
);
939 /* If the socket isn't already unlocked, unlock it */
941 socket_unlock(so
, 0);
945 /* Call the filter */
946 error
= entry
->sfe_filter
->sf_filter
.sf_connect_out(
947 entry
->sfe_cookie
, so
, nam
);
950 * Take the socket filter lock again
951 * and release the entry
953 lck_rw_lock_shared(sock_filter_lock
);
954 sflt_entry_release(entry
);
957 lck_rw_unlock_shared(sock_filter_lock
);
966 __private_extern__
int
967 sflt_connectout(struct socket
*so
, const struct sockaddr
*nam
)
969 char buf
[SOCK_MAXADDRLEN
];
973 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
977 * Workaround for rdar://23362120
978 * Always pass a buffer that can hold an IPv6 socket address
980 bzero(buf
, sizeof (buf
));
981 bcopy(nam
, buf
, nam
->sa_len
);
982 sa
= (struct sockaddr
*)buf
;
984 error
= sflt_connectout_common(so
, sa
);
989 * If the address was modified, copy it back
991 if (bcmp(sa
, nam
, nam
->sa_len
) != 0) {
992 bcopy(sa
, (struct sockaddr
*)(uintptr_t)nam
, nam
->sa_len
);
998 __private_extern__
int
999 sflt_setsockopt(struct socket
*so
, struct sockopt
*sopt
)
1001 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
1004 struct socket_filter_entry
*entry
;
1008 lck_rw_lock_shared(sock_filter_lock
);
1009 for (entry
= so
->so_filt
; entry
&& error
== 0;
1010 entry
= entry
->sfe_next_onsocket
) {
1011 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1012 entry
->sfe_filter
->sf_filter
.sf_setoption
) {
1014 * Retain the filter entry and release
1015 * the socket filter lock
1017 sflt_entry_retain(entry
);
1018 lck_rw_unlock_shared(sock_filter_lock
);
1020 /* If the socket isn't already unlocked, unlock it */
1021 if (unlocked
== 0) {
1022 socket_unlock(so
, 0);
1026 /* Call the filter */
1027 error
= entry
->sfe_filter
->sf_filter
.sf_setoption(
1028 entry
->sfe_cookie
, so
, sopt
);
1031 * Take the socket filter lock again
1032 * and release the entry
1034 lck_rw_lock_shared(sock_filter_lock
);
1035 sflt_entry_release(entry
);
1038 lck_rw_unlock_shared(sock_filter_lock
);
1047 __private_extern__
int
1048 sflt_getsockopt(struct socket
*so
, struct sockopt
*sopt
)
1050 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
1053 struct socket_filter_entry
*entry
;
1057 lck_rw_lock_shared(sock_filter_lock
);
1058 for (entry
= so
->so_filt
; entry
&& error
== 0;
1059 entry
= entry
->sfe_next_onsocket
) {
1060 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1061 entry
->sfe_filter
->sf_filter
.sf_getoption
) {
1063 * Retain the filter entry and release
1064 * the socket filter lock
1066 sflt_entry_retain(entry
);
1067 lck_rw_unlock_shared(sock_filter_lock
);
1069 /* If the socket isn't already unlocked, unlock it */
1070 if (unlocked
== 0) {
1071 socket_unlock(so
, 0);
1075 /* Call the filter */
1076 error
= entry
->sfe_filter
->sf_filter
.sf_getoption(
1077 entry
->sfe_cookie
, so
, sopt
);
1080 * Take the socket filter lock again
1081 * and release the entry
1083 lck_rw_lock_shared(sock_filter_lock
);
1084 sflt_entry_release(entry
);
1087 lck_rw_unlock_shared(sock_filter_lock
);
1096 __private_extern__
int
1097 sflt_data_out(struct socket
*so
, const struct sockaddr
*to
, mbuf_t
*data
,
1098 mbuf_t
*control
, sflt_data_flag_t flags
)
1100 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
1103 struct socket_filter_entry
*entry
;
1105 int setsendthread
= 0;
1108 lck_rw_lock_shared(sock_filter_lock
);
1109 for (entry
= so
->so_filt
; entry
&& error
== 0;
1110 entry
= entry
->sfe_next_onsocket
) {
1111 /* skip if this is a subflow socket */
1112 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1114 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1115 entry
->sfe_filter
->sf_filter
.sf_data_out
) {
1117 * Retain the filter entry and
1118 * release the socket filter lock
1120 sflt_entry_retain(entry
);
1121 lck_rw_unlock_shared(sock_filter_lock
);
1123 /* If the socket isn't already unlocked, unlock it */
1124 if (unlocked
== 0) {
1125 if (so
->so_send_filt_thread
== NULL
) {
1127 so
->so_send_filt_thread
=
1130 socket_unlock(so
, 0);
1134 /* Call the filter */
1135 error
= entry
->sfe_filter
->sf_filter
.sf_data_out(
1136 entry
->sfe_cookie
, so
, to
, data
, control
, flags
);
1139 * Take the socket filter lock again
1140 * and release the entry
1142 lck_rw_lock_shared(sock_filter_lock
);
1143 sflt_entry_release(entry
);
1146 lck_rw_unlock_shared(sock_filter_lock
);
1151 so
->so_send_filt_thread
= NULL
;
1157 __private_extern__
int
1158 sflt_data_in(struct socket
*so
, const struct sockaddr
*from
, mbuf_t
*data
,
1159 mbuf_t
*control
, sflt_data_flag_t flags
)
1161 if (so
->so_filt
== NULL
|| sflt_permission_check(sotoinpcb(so
)))
1164 struct socket_filter_entry
*entry
;
1168 lck_rw_lock_shared(sock_filter_lock
);
1170 for (entry
= so
->so_filt
; entry
&& (error
== 0);
1171 entry
= entry
->sfe_next_onsocket
) {
1172 /* skip if this is a subflow socket */
1173 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1175 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1176 entry
->sfe_filter
->sf_filter
.sf_data_in
) {
1178 * Retain the filter entry and
1179 * release the socket filter lock
1181 sflt_entry_retain(entry
);
1182 lck_rw_unlock_shared(sock_filter_lock
);
1184 /* If the socket isn't already unlocked, unlock it */
1185 if (unlocked
== 0) {
1187 socket_unlock(so
, 0);
1190 /* Call the filter */
1191 error
= entry
->sfe_filter
->sf_filter
.sf_data_in(
1192 entry
->sfe_cookie
, so
, from
, data
, control
, flags
);
1195 * Take the socket filter lock again
1196 * and release the entry
1198 lck_rw_lock_shared(sock_filter_lock
);
1199 sflt_entry_release(entry
);
1202 lck_rw_unlock_shared(sock_filter_lock
);
1211 #pragma mark -- KPI --
1214 sflt_attach(socket_t socket
, sflt_handle handle
)
1216 socket_lock(socket
, 1);
1217 errno_t result
= sflt_attach_internal(socket
, handle
);
1218 socket_unlock(socket
, 1);
1223 sflt_detach(socket_t socket
, sflt_handle handle
)
1225 struct socket_filter_entry
*entry
;
1228 if (socket
== NULL
|| handle
== 0)
1231 lck_rw_lock_exclusive(sock_filter_lock
);
1232 for (entry
= socket
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
1233 if (entry
->sfe_filter
->sf_filter
.sf_handle
== handle
&&
1234 (entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
1239 if (entry
!= NULL
) {
1240 sflt_detach_locked(entry
);
1242 lck_rw_unlock_exclusive(sock_filter_lock
);
1248 struct solist
*next
;
1253 sflt_register(const struct sflt_filter
*filter
, int domain
, int type
,
1256 struct socket_filter
*sock_filt
= NULL
;
1257 struct socket_filter
*match
= NULL
;
1263 struct solist
*solisthead
= NULL
, *solist
= NULL
;
1265 if ((domain
!= PF_INET
) && (domain
!= PF_INET6
))
1268 pr
= pffindproto(domain
, protocol
, type
);
1272 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
1273 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
1276 /* Allocate the socket filter */
1277 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
1278 M_IFADDR
, M_WAITOK
);
1279 if (sock_filt
== NULL
) {
1283 bzero(sock_filt
, sizeof (*sock_filt
));
1285 /* Legacy sflt_filter length; current structure minus extended */
1286 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
1288 * Include extended fields if filter defines SFLT_EXTENDED.
1289 * We've zeroed out our internal sflt_filter placeholder,
1290 * so any unused portion would have been taken care of.
1292 if (filter
->sf_flags
& SFLT_EXTENDED
) {
1293 unsigned int ext_len
= filter
->sf_len
;
1295 if (ext_len
> sizeof (struct sflt_filter_ext
))
1296 ext_len
= sizeof (struct sflt_filter_ext
);
1300 bcopy(filter
, &sock_filt
->sf_filter
, len
);
1302 lck_rw_lock_exclusive(sock_filter_lock
);
1303 /* Look for an existing entry */
1304 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
1305 if (match
->sf_filter
.sf_handle
==
1306 sock_filt
->sf_filter
.sf_handle
) {
1311 /* Add the entry only if there was no existing entry */
1312 if (match
== NULL
) {
1313 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
1314 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1315 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
1317 sock_filt
->sf_proto
= pr
;
1319 sflt_retain_locked(sock_filt
);
1321 lck_rw_unlock_exclusive(sock_filter_lock
);
1323 if (match
!= NULL
) {
1324 FREE(sock_filt
, M_IFADDR
);
1328 if (!(filter
->sf_flags
& SFLT_EXTENDED_REGISTRY
))
1332 * Setup the filter on the TCP and UDP sockets already created.
1334 #define SOLIST_ADD(_so) do { \
1335 solist->next = solisthead; \
1336 sock_retain((_so)); \
1337 solist->so = (_so); \
1338 solisthead = solist; \
1340 if (protocol
== IPPROTO_TCP
) {
1341 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1342 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
) {
1343 so
= inp
->inp_socket
;
1344 if (so
== NULL
|| (so
->so_state
& SS_DEFUNCT
) ||
1345 (!(so
->so_flags
& SOF_MP_SUBFLOW
) &&
1346 (so
->so_state
& SS_NOFDREF
)) ||
1347 !SOCK_CHECK_DOM(so
, domain
) ||
1348 !SOCK_CHECK_TYPE(so
, type
))
1350 MALLOC(solist
, struct solist
*, sizeof (*solist
),
1351 M_IFADDR
, M_NOWAIT
);
1356 lck_rw_done(tcbinfo
.ipi_lock
);
1357 } else if (protocol
== IPPROTO_UDP
) {
1358 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1359 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
) {
1360 so
= inp
->inp_socket
;
1361 if (so
== NULL
|| (so
->so_state
& SS_DEFUNCT
) ||
1362 (!(so
->so_flags
& SOF_MP_SUBFLOW
) &&
1363 (so
->so_state
& SS_NOFDREF
)) ||
1364 !SOCK_CHECK_DOM(so
, domain
) ||
1365 !SOCK_CHECK_TYPE(so
, type
))
1367 MALLOC(solist
, struct solist
*, sizeof (*solist
),
1368 M_IFADDR
, M_NOWAIT
);
1373 lck_rw_done(udbinfo
.ipi_lock
);
1375 /* XXX it's possible to walk the raw socket list as well */
1378 while (solisthead
) {
1379 sflt_handle handle
= filter
->sf_handle
;
1381 so
= solisthead
->so
;
1384 if (so
->so_state
& SS_ISCONNECTING
)
1385 sflt_notify_after_register(so
, sock_evt_connecting
,
1387 else if (so
->so_state
& SS_ISCONNECTED
)
1388 sflt_notify_after_register(so
, sock_evt_connected
,
1390 else if ((so
->so_state
&
1391 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
)) ==
1392 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
))
1393 sflt_notify_after_register(so
, sock_evt_disconnecting
,
1395 else if ((so
->so_state
&
1396 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
)) ==
1397 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
))
1398 sflt_notify_after_register(so
, sock_evt_disconnected
,
1400 else if (so
->so_state
& SS_CANTSENDMORE
)
1401 sflt_notify_after_register(so
, sock_evt_cantsendmore
,
1403 else if (so
->so_state
& SS_CANTRCVMORE
)
1404 sflt_notify_after_register(so
, sock_evt_cantrecvmore
,
1406 socket_unlock(so
, 0);
1407 /* XXX no easy way to post the sock_evt_closing event */
1409 solist
= solisthead
;
1410 solisthead
= solisthead
->next
;
1411 FREE(solist
, M_IFADDR
);
1418 sflt_unregister(sflt_handle handle
)
1420 struct socket_filter
*filter
;
1421 lck_rw_lock_exclusive(sock_filter_lock
);
1423 /* Find the entry by the handle */
1424 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
1425 if (filter
->sf_filter
.sf_handle
== handle
)
1430 /* Remove it from the global list */
1431 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
1433 /* Remove it from the protosw list */
1434 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1435 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
,
1436 filter
, sf_protosw_next
);
1439 /* Detach from any sockets */
1440 struct socket_filter_entry
*entry
= NULL
;
1442 for (entry
= filter
->sf_entry_head
; entry
;
1443 entry
= entry
->sfe_next_onfilter
) {
1444 sflt_detach_locked(entry
);
1447 /* Release the filter */
1448 sflt_release_locked(filter
);
1451 lck_rw_unlock_exclusive(sock_filter_lock
);
1460 sock_inject_data_in(socket_t so
, const struct sockaddr
*from
, mbuf_t data
,
1461 mbuf_t control
, sflt_data_flag_t flags
)
1465 if (so
== NULL
|| data
== NULL
)
1468 if (flags
& sock_data_filt_flag_oob
) {
1474 /* reject if this is a subflow socket */
1475 if (so
->so_flags
& SOF_MP_SUBFLOW
) {
1481 if (sbappendaddr(&so
->so_rcv
,
1482 (struct sockaddr
*)(uintptr_t)from
, data
, control
, NULL
))
1488 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
1493 if (flags
& sock_data_filt_flag_record
) {
1494 if (control
|| from
) {
1498 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
1503 if (sbappend(&so
->so_rcv
, data
))
1506 socket_unlock(so
, 1);
1511 sock_inject_data_out(socket_t so
, const struct sockaddr
*to
, mbuf_t data
,
1512 mbuf_t control
, sflt_data_flag_t flags
)
1514 int sosendflags
= 0;
1516 /* reject if this is a subflow socket */
1517 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1520 if (flags
& sock_data_filt_flag_oob
)
1521 sosendflags
= MSG_OOB
;
1522 return (sosend(so
, (struct sockaddr
*)(uintptr_t)to
, NULL
,
1523 data
, control
, sosendflags
));
1527 sockopt_direction(sockopt_t sopt
)
1529 return ((sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
);
1533 sockopt_level(sockopt_t sopt
)
1535 return (sopt
->sopt_level
);
1539 sockopt_name(sockopt_t sopt
)
1541 return (sopt
->sopt_name
);
1545 sockopt_valsize(sockopt_t sopt
)
1547 return (sopt
->sopt_valsize
);
1551 sockopt_copyin(sockopt_t sopt
, void *data
, size_t len
)
1553 return (sooptcopyin(sopt
, data
, len
, len
));
1557 sockopt_copyout(sockopt_t sopt
, void *data
, size_t len
)
1559 return (sooptcopyout(sopt
, data
, len
));