2 * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
60 struct socket_filter_entry
{
61 struct socket_filter_entry
*sfe_next_onsocket
;
62 struct socket_filter_entry
*sfe_next_onfilter
;
63 struct socket_filter_entry
*sfe_next_oncleanup
;
65 struct socket_filter
*sfe_filter
;
66 struct socket
*sfe_socket
;
73 struct socket_filter
{
74 TAILQ_ENTRY(socket_filter
) sf_protosw_next
;
75 TAILQ_ENTRY(socket_filter
) sf_global_next
;
76 struct socket_filter_entry
*sf_entry_head
;
78 struct protosw
*sf_proto
;
79 struct sflt_filter sf_filter
;
80 u_int32_t sf_refcount
;
83 TAILQ_HEAD(socket_filter_list
, socket_filter
);
85 static struct socket_filter_list sock_filter_head
;
86 static lck_rw_t
*sock_filter_lock
= NULL
;
87 static lck_mtx_t
*sock_filter_cleanup_lock
= NULL
;
88 static struct socket_filter_entry
*sock_filter_cleanup_entries
= NULL
;
89 static thread_t sock_filter_cleanup_thread
= NULL
;
91 static void sflt_cleanup_thread(void *, wait_result_t
);
92 static void sflt_detach_locked(struct socket_filter_entry
*entry
);
94 #pragma mark -- Internal State Management --
96 __private_extern__
void
99 lck_grp_attr_t
*grp_attrib
= NULL
;
100 lck_attr_t
*lck_attrib
= NULL
;
101 lck_grp_t
*lck_group
= NULL
;
103 TAILQ_INIT(&sock_filter_head
);
105 /* Allocate a rw lock */
106 grp_attrib
= lck_grp_attr_alloc_init();
107 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
108 lck_grp_attr_free(grp_attrib
);
109 lck_attrib
= lck_attr_alloc_init();
110 sock_filter_lock
= lck_rw_alloc_init(lck_group
, lck_attrib
);
111 sock_filter_cleanup_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
112 lck_grp_free(lck_group
);
113 lck_attr_free(lck_attrib
);
117 sflt_retain_locked(struct socket_filter
*filter
)
119 filter
->sf_refcount
++;
123 sflt_release_locked(struct socket_filter
*filter
)
125 filter
->sf_refcount
--;
126 if (filter
->sf_refcount
== 0) {
127 /* Call the unregistered function */
128 if (filter
->sf_filter
.sf_unregistered
) {
129 lck_rw_unlock_exclusive(sock_filter_lock
);
130 filter
->sf_filter
.sf_unregistered(
131 filter
->sf_filter
.sf_handle
);
132 lck_rw_lock_exclusive(sock_filter_lock
);
136 FREE(filter
, M_IFADDR
);
141 sflt_entry_retain(struct socket_filter_entry
*entry
)
143 if (OSIncrementAtomic(&entry
->sfe_refcount
) <= 0) {
144 panic("sflt_entry_retain - sfe_refcount <= 0\n");
150 sflt_entry_release(struct socket_filter_entry
*entry
)
152 SInt32 old
= OSDecrementAtomic(&entry
->sfe_refcount
);
154 /* That was the last reference */
156 /* Take the cleanup lock */
157 lck_mtx_lock(sock_filter_cleanup_lock
);
159 /* Put this item on the cleanup list */
160 entry
->sfe_next_oncleanup
= sock_filter_cleanup_entries
;
161 sock_filter_cleanup_entries
= entry
;
163 /* If the item is the first item in the list */
164 if (entry
->sfe_next_oncleanup
== NULL
) {
165 if (sock_filter_cleanup_thread
== NULL
) {
166 /* Create a thread */
167 kernel_thread_start(sflt_cleanup_thread
,
168 NULL
, &sock_filter_cleanup_thread
);
170 /* Wakeup the thread */
171 wakeup(&sock_filter_cleanup_entries
);
175 /* Drop the cleanup lock */
176 lck_mtx_unlock(sock_filter_cleanup_lock
);
177 } else if (old
<= 0) {
178 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
184 __attribute__((noreturn
))
186 sflt_cleanup_thread(void *blah
, wait_result_t blah2
)
188 #pragma unused(blah, blah2)
190 lck_mtx_lock(sock_filter_cleanup_lock
);
191 while (sock_filter_cleanup_entries
== NULL
) {
192 /* Sleep until we've got something better to do */
193 msleep(&sock_filter_cleanup_entries
,
194 sock_filter_cleanup_lock
, PWAIT
,
195 "sflt_cleanup", NULL
);
198 /* Pull the current list of dead items */
199 struct socket_filter_entry
*dead
= sock_filter_cleanup_entries
;
200 sock_filter_cleanup_entries
= NULL
;
203 lck_mtx_unlock(sock_filter_cleanup_lock
);
205 /* Take the socket filter lock */
206 lck_rw_lock_exclusive(sock_filter_lock
);
208 /* Cleanup every dead item */
209 struct socket_filter_entry
*entry
;
210 for (entry
= dead
; entry
; entry
= dead
) {
211 struct socket_filter_entry
**nextpp
;
213 dead
= entry
->sfe_next_oncleanup
;
215 /* Call detach function if necessary - drop the lock */
216 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
217 entry
->sfe_filter
->sf_filter
.sf_detach
) {
218 entry
->sfe_flags
|= SFEF_NODETACH
;
219 lck_rw_unlock_exclusive(sock_filter_lock
);
222 * Warning - passing a potentially
223 * dead socket may be bad
225 entry
->sfe_filter
->sf_filter
. sf_detach(
226 entry
->sfe_cookie
, entry
->sfe_socket
);
228 lck_rw_lock_exclusive(sock_filter_lock
);
232 * Pull entry off the socket list --
233 * if the socket still exists
235 if ((entry
->sfe_flags
& SFEF_NOSOCKET
) == 0) {
236 for (nextpp
= &entry
->sfe_socket
->so_filt
;
238 nextpp
= &(*nextpp
)->sfe_next_onsocket
) {
239 if (*nextpp
== entry
) {
241 entry
->sfe_next_onsocket
;
247 /* Pull entry off the filter list */
248 for (nextpp
= &entry
->sfe_filter
->sf_entry_head
;
249 *nextpp
; nextpp
= &(*nextpp
)->sfe_next_onfilter
) {
250 if (*nextpp
== entry
) {
251 *nextpp
= entry
->sfe_next_onfilter
;
257 * Release the filter -- may drop lock, but that's okay
259 sflt_release_locked(entry
->sfe_filter
);
260 entry
->sfe_socket
= NULL
;
261 entry
->sfe_filter
= NULL
;
262 FREE(entry
, M_IFADDR
);
265 /* Drop the socket filter lock */
266 lck_rw_unlock_exclusive(sock_filter_lock
);
272 sflt_attach_locked(struct socket
*so
, struct socket_filter
*filter
,
276 struct socket_filter_entry
*entry
= NULL
;
281 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onfilter
) {
282 if (entry
->sfe_filter
->sf_filter
.sf_handle
==
283 filter
->sf_filter
.sf_handle
)
286 /* allocate the socket filter entry */
287 MALLOC(entry
, struct socket_filter_entry
*, sizeof (*entry
), M_IFADDR
,
292 /* Initialize the socket filter entry */
293 entry
->sfe_cookie
= NULL
;
294 entry
->sfe_flags
= SFEF_ATTACHED
;
295 entry
->sfe_refcount
= 1; /* corresponds to SFEF_ATTACHED flag set */
297 /* Put the entry in the filter list */
298 sflt_retain_locked(filter
);
299 entry
->sfe_filter
= filter
;
300 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
301 filter
->sf_entry_head
= entry
;
303 /* Put the entry on the socket filter list */
304 entry
->sfe_socket
= so
;
305 entry
->sfe_next_onsocket
= so
->so_filt
;
308 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
309 /* Retain the entry while we call attach */
310 sflt_entry_retain(entry
);
313 * Release the filter lock --
314 * callers must be aware we will do this
316 lck_rw_unlock_exclusive(sock_filter_lock
);
318 /* Unlock the socket */
320 socket_unlock(so
, 0);
322 /* It's finally safe to call the filter function */
323 error
= entry
->sfe_filter
->sf_filter
.sf_attach(
324 &entry
->sfe_cookie
, so
);
326 /* Lock the socket again */
330 /* Lock the filters again */
331 lck_rw_lock_exclusive(sock_filter_lock
);
334 * If the attach function returns an error,
335 * this filter must be detached
338 /* don't call sf_detach */
339 entry
->sfe_flags
|= SFEF_NODETACH
;
340 sflt_detach_locked(entry
);
343 /* Release the retain we held through the attach call */
344 sflt_entry_release(entry
);
351 sflt_attach_internal(socket_t socket
, sflt_handle handle
)
353 if (socket
== NULL
|| handle
== 0)
358 lck_rw_lock_exclusive(sock_filter_lock
);
360 struct socket_filter
*filter
= NULL
;
361 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
362 if (filter
->sf_filter
.sf_handle
== handle
) break;
366 result
= sflt_attach_locked(socket
, filter
, 1);
369 lck_rw_unlock_exclusive(sock_filter_lock
);
375 sflt_detach_locked(struct socket_filter_entry
*entry
)
377 if ((entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
378 entry
->sfe_flags
&= ~SFEF_ATTACHED
;
379 sflt_entry_release(entry
);
383 #pragma mark -- Socket Layer Hooks --
385 __private_extern__
void
386 sflt_initsock(struct socket
*so
)
389 * Point to the real protosw, as so_proto might have been
390 * pointed to a modified version.
392 struct protosw
*proto
= so
->so_proto
->pr_protosw
;
394 lck_rw_lock_shared(sock_filter_lock
);
395 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
396 /* Promote lock to exclusive */
397 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock
))
398 lck_rw_lock_exclusive(sock_filter_lock
);
401 * Warning: A filter unregistering will be pulled out of
402 * the list. This could happen while we drop the lock in
403 * sftl_attach_locked or sflt_release_locked. For this
404 * reason we retain a reference on the filter (or next_filter)
405 * while calling this function. This protects us from a panic,
406 * but it could result in a socket being created without all
407 * of the global filters if we're attaching a filter as it
408 * is removed, if that's possible.
410 struct socket_filter
*filter
=
411 TAILQ_FIRST(&proto
->pr_filter_head
);
413 sflt_retain_locked(filter
);
416 struct socket_filter
*filter_next
;
418 * Warning: sflt_attach_private_locked
421 sflt_attach_locked(so
, filter
, 0);
423 filter_next
= TAILQ_NEXT(filter
, sf_protosw_next
);
425 sflt_retain_locked(filter_next
);
428 * Warning: filt_release_locked may remove
429 * the filter from the queue
431 sflt_release_locked(filter
);
432 filter
= filter_next
;
435 lck_rw_done(sock_filter_lock
);
441 * Detaches all filters from the socket.
443 __private_extern__
void
444 sflt_termsock(struct socket
*so
)
446 lck_rw_lock_exclusive(sock_filter_lock
);
448 struct socket_filter_entry
*entry
;
450 while ((entry
= so
->so_filt
) != NULL
) {
451 /* Pull filter off the socket */
452 so
->so_filt
= entry
->sfe_next_onsocket
;
453 entry
->sfe_flags
|= SFEF_NOSOCKET
;
456 sflt_detach_locked(entry
);
459 * On sflt_termsock, we can't return until the detach function
460 * has been called. Call the detach function - this is gross
461 * because the socket filter entry could be freed when we drop
462 * the lock, so we make copies on the stack and retain
463 * everything we need before dropping the lock.
465 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
466 entry
->sfe_filter
->sf_filter
.sf_detach
) {
467 void *sfe_cookie
= entry
->sfe_cookie
;
468 struct socket_filter
*sfe_filter
= entry
->sfe_filter
;
470 /* Retain the socket filter */
471 sflt_retain_locked(sfe_filter
);
473 /* Mark that we've called the detach function */
474 entry
->sfe_flags
|= SFEF_NODETACH
;
476 /* Drop the lock before calling the detach function */
477 lck_rw_unlock_exclusive(sock_filter_lock
);
478 sfe_filter
->sf_filter
.sf_detach(sfe_cookie
, so
);
479 lck_rw_lock_exclusive(sock_filter_lock
);
481 /* Release the filter */
482 sflt_release_locked(sfe_filter
);
486 lck_rw_unlock_exclusive(sock_filter_lock
);
491 sflt_notify_internal(struct socket
*so
, sflt_event_t event
, void *param
,
494 if (so
->so_filt
== NULL
)
497 struct socket_filter_entry
*entry
;
500 lck_rw_lock_shared(sock_filter_lock
);
501 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
502 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
503 entry
->sfe_filter
->sf_filter
.sf_notify
&&
504 ((handle
&& entry
->sfe_filter
->sf_filter
.sf_handle
!=
505 handle
) || !handle
)) {
507 * Retain the filter entry and release
508 * the socket filter lock
510 sflt_entry_retain(entry
);
511 lck_rw_unlock_shared(sock_filter_lock
);
513 /* If the socket isn't already unlocked, unlock it */
516 socket_unlock(so
, 0);
519 /* Finally call the filter */
520 entry
->sfe_filter
->sf_filter
.sf_notify(
521 entry
->sfe_cookie
, so
, event
, param
);
524 * Take the socket filter lock again
525 * and release the entry
527 lck_rw_lock_shared(sock_filter_lock
);
528 sflt_entry_release(entry
);
531 lck_rw_unlock_shared(sock_filter_lock
);
538 __private_extern__
void
539 sflt_notify(struct socket
*so
, sflt_event_t event
, void *param
)
541 sflt_notify_internal(so
, event
, param
, 0);
545 sflt_notify_after_register(struct socket
*so
, sflt_event_t event
,
548 sflt_notify_internal(so
, event
, NULL
, handle
);
551 __private_extern__
int
552 sflt_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
)
554 if (so
->so_filt
== NULL
)
557 struct socket_filter_entry
*entry
;
561 lck_rw_lock_shared(sock_filter_lock
);
562 for (entry
= so
->so_filt
; entry
&& error
== 0;
563 entry
= entry
->sfe_next_onsocket
) {
564 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
565 entry
->sfe_filter
->sf_filter
.sf_ioctl
) {
567 * Retain the filter entry and release
568 * the socket filter lock
570 sflt_entry_retain(entry
);
571 lck_rw_unlock_shared(sock_filter_lock
);
573 /* If the socket isn't already unlocked, unlock it */
575 socket_unlock(so
, 0);
579 /* Call the filter */
580 error
= entry
->sfe_filter
->sf_filter
.sf_ioctl(
581 entry
->sfe_cookie
, so
, cmd
, data
);
584 * Take the socket filter lock again
585 * and release the entry
587 lck_rw_lock_shared(sock_filter_lock
);
588 sflt_entry_release(entry
);
591 lck_rw_unlock_shared(sock_filter_lock
);
600 __private_extern__
int
601 sflt_bind(struct socket
*so
, const struct sockaddr
*nam
)
603 if (so
->so_filt
== NULL
)
606 struct socket_filter_entry
*entry
;
610 lck_rw_lock_shared(sock_filter_lock
);
611 for (entry
= so
->so_filt
; entry
&& error
== 0;
612 entry
= entry
->sfe_next_onsocket
) {
613 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
614 entry
->sfe_filter
->sf_filter
.sf_bind
) {
616 * Retain the filter entry and
617 * release the socket filter lock
619 sflt_entry_retain(entry
);
620 lck_rw_unlock_shared(sock_filter_lock
);
622 /* If the socket isn't already unlocked, unlock it */
624 socket_unlock(so
, 0);
628 /* Call the filter */
629 error
= entry
->sfe_filter
->sf_filter
.sf_bind(
630 entry
->sfe_cookie
, so
, nam
);
633 * Take the socket filter lock again and
636 lck_rw_lock_shared(sock_filter_lock
);
637 sflt_entry_release(entry
);
640 lck_rw_unlock_shared(sock_filter_lock
);
649 __private_extern__
int
650 sflt_listen(struct socket
*so
)
652 if (so
->so_filt
== NULL
)
655 struct socket_filter_entry
*entry
;
659 lck_rw_lock_shared(sock_filter_lock
);
660 for (entry
= so
->so_filt
; entry
&& error
== 0;
661 entry
= entry
->sfe_next_onsocket
) {
662 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
663 entry
->sfe_filter
->sf_filter
.sf_listen
) {
665 * Retain the filter entry and release
666 * the socket filter lock
668 sflt_entry_retain(entry
);
669 lck_rw_unlock_shared(sock_filter_lock
);
671 /* If the socket isn't already unlocked, unlock it */
673 socket_unlock(so
, 0);
677 /* Call the filter */
678 error
= entry
->sfe_filter
->sf_filter
.sf_listen(
679 entry
->sfe_cookie
, so
);
682 * Take the socket filter lock again
683 * and release the entry
685 lck_rw_lock_shared(sock_filter_lock
);
686 sflt_entry_release(entry
);
689 lck_rw_unlock_shared(sock_filter_lock
);
698 __private_extern__
int
699 sflt_accept(struct socket
*head
, struct socket
*so
,
700 const struct sockaddr
*local
, const struct sockaddr
*remote
)
702 if (so
->so_filt
== NULL
)
705 struct socket_filter_entry
*entry
;
709 lck_rw_lock_shared(sock_filter_lock
);
710 for (entry
= so
->so_filt
; entry
&& error
== 0;
711 entry
= entry
->sfe_next_onsocket
) {
712 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
713 entry
->sfe_filter
->sf_filter
.sf_accept
) {
715 * Retain the filter entry and
716 * release the socket filter lock
718 sflt_entry_retain(entry
);
719 lck_rw_unlock_shared(sock_filter_lock
);
721 /* If the socket isn't already unlocked, unlock it */
723 socket_unlock(so
, 0);
727 /* Call the filter */
728 error
= entry
->sfe_filter
->sf_filter
.sf_accept(
729 entry
->sfe_cookie
, head
, so
, local
, remote
);
732 * Take the socket filter lock again
733 * and release the entry
735 lck_rw_lock_shared(sock_filter_lock
);
736 sflt_entry_release(entry
);
739 lck_rw_unlock_shared(sock_filter_lock
);
748 __private_extern__
int
749 sflt_getsockname(struct socket
*so
, struct sockaddr
**local
)
751 if (so
->so_filt
== NULL
)
754 struct socket_filter_entry
*entry
;
758 lck_rw_lock_shared(sock_filter_lock
);
759 for (entry
= so
->so_filt
; entry
&& error
== 0;
760 entry
= entry
->sfe_next_onsocket
) {
761 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
762 entry
->sfe_filter
->sf_filter
.sf_getsockname
) {
764 * Retain the filter entry and
765 * release the socket filter lock
767 sflt_entry_retain(entry
);
768 lck_rw_unlock_shared(sock_filter_lock
);
770 /* If the socket isn't already unlocked, unlock it */
772 socket_unlock(so
, 0);
776 /* Call the filter */
777 error
= entry
->sfe_filter
->sf_filter
.sf_getsockname(
778 entry
->sfe_cookie
, so
, local
);
781 * Take the socket filter lock again
782 * and release the entry
784 lck_rw_lock_shared(sock_filter_lock
);
785 sflt_entry_release(entry
);
788 lck_rw_unlock_shared(sock_filter_lock
);
797 __private_extern__
int
798 sflt_getpeername(struct socket
*so
, struct sockaddr
**remote
)
800 if (so
->so_filt
== NULL
)
803 struct socket_filter_entry
*entry
;
807 lck_rw_lock_shared(sock_filter_lock
);
808 for (entry
= so
->so_filt
; entry
&& error
== 0;
809 entry
= entry
->sfe_next_onsocket
) {
810 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
811 entry
->sfe_filter
->sf_filter
.sf_getpeername
) {
813 * Retain the filter entry and release
814 * the socket filter lock
816 sflt_entry_retain(entry
);
817 lck_rw_unlock_shared(sock_filter_lock
);
819 /* If the socket isn't already unlocked, unlock it */
821 socket_unlock(so
, 0);
825 /* Call the filter */
826 error
= entry
->sfe_filter
->sf_filter
.sf_getpeername(
827 entry
->sfe_cookie
, so
, remote
);
830 * Take the socket filter lock again
831 * and release the entry
833 lck_rw_lock_shared(sock_filter_lock
);
834 sflt_entry_release(entry
);
837 lck_rw_unlock_shared(sock_filter_lock
);
846 __private_extern__
int
847 sflt_connectin(struct socket
*so
, const struct sockaddr
*remote
)
849 if (so
->so_filt
== NULL
)
852 struct socket_filter_entry
*entry
;
856 lck_rw_lock_shared(sock_filter_lock
);
857 for (entry
= so
->so_filt
; entry
&& error
== 0;
858 entry
= entry
->sfe_next_onsocket
) {
859 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
860 entry
->sfe_filter
->sf_filter
.sf_connect_in
) {
862 * Retain the filter entry and release
863 * the socket filter lock
865 sflt_entry_retain(entry
);
866 lck_rw_unlock_shared(sock_filter_lock
);
868 /* If the socket isn't already unlocked, unlock it */
870 socket_unlock(so
, 0);
874 /* Call the filter */
875 error
= entry
->sfe_filter
->sf_filter
.sf_connect_in(
876 entry
->sfe_cookie
, so
, remote
);
879 * Take the socket filter lock again
880 * and release the entry
882 lck_rw_lock_shared(sock_filter_lock
);
883 sflt_entry_release(entry
);
886 lck_rw_unlock_shared(sock_filter_lock
);
896 sflt_connectout_common(struct socket
*so
, const struct sockaddr
*nam
)
898 struct socket_filter_entry
*entry
;
902 lck_rw_lock_shared(sock_filter_lock
);
903 for (entry
= so
->so_filt
; entry
&& error
== 0;
904 entry
= entry
->sfe_next_onsocket
) {
905 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
906 entry
->sfe_filter
->sf_filter
.sf_connect_out
) {
908 * Retain the filter entry and release
909 * the socket filter lock
911 sflt_entry_retain(entry
);
912 lck_rw_unlock_shared(sock_filter_lock
);
914 /* If the socket isn't already unlocked, unlock it */
916 socket_unlock(so
, 0);
920 /* Call the filter */
921 error
= entry
->sfe_filter
->sf_filter
.sf_connect_out(
922 entry
->sfe_cookie
, so
, nam
);
925 * Take the socket filter lock again
926 * and release the entry
928 lck_rw_lock_shared(sock_filter_lock
);
929 sflt_entry_release(entry
);
932 lck_rw_unlock_shared(sock_filter_lock
);
941 __private_extern__
int
942 sflt_connectout(struct socket
*so
, const struct sockaddr
*nam
)
944 char buf
[SOCK_MAXADDRLEN
];
948 if (so
->so_filt
== NULL
)
952 * Workaround for rdar://23362120
953 * Always pass a buffer that can hold an IPv6 socket address
955 bzero(buf
, sizeof (buf
));
956 bcopy(nam
, buf
, nam
->sa_len
);
957 sa
= (struct sockaddr
*)buf
;
959 error
= sflt_connectout_common(so
, sa
);
964 * If the address was modified, copy it back
966 if (bcmp(sa
, nam
, nam
->sa_len
) != 0) {
967 bcopy(sa
, (struct sockaddr
*)(uintptr_t)nam
, nam
->sa_len
);
973 __private_extern__
int
974 sflt_connectxout(struct socket
*so
, struct sockaddr_list
**dst_sl0
)
976 struct sockaddr_list
*dst_sl
;
977 struct sockaddr_entry
*se
, *tse
;
981 if (so
->so_filt
== NULL
)
984 /* make a copy as sflt_connectout() releases socket lock */
985 dst_sl
= sockaddrlist_dup(*dst_sl0
, M_WAITOK
);
990 * Hmm; we don't yet have a connectx socket filter callback,
991 * so the closest thing to do is to probably call sflt_connectout()
992 * as many times as there are addresses in the list, and bail
993 * as soon as we get an error.
995 TAILQ_FOREACH_SAFE(se
, &dst_sl
->sl_head
, se_link
, tse
) {
996 char buf
[SOCK_MAXADDRLEN
];
999 VERIFY(se
->se_addr
!= NULL
);
1002 * Workaround for rdar://23362120
1003 * Always pass a buffer that can hold an IPv6 socket address
1005 bzero(buf
, sizeof (buf
));
1006 bcopy(se
->se_addr
, buf
, se
->se_addr
->sa_len
);
1007 sa
= (struct sockaddr
*)buf
;
1009 error
= sflt_connectout_common(so
, sa
);
1014 * If the address was modified, copy it back
1016 if (bcmp(se
->se_addr
, sa
, se
->se_addr
->sa_len
) != 0) {
1017 bcopy(sa
, se
->se_addr
, se
->se_addr
->sa_len
);
1022 if (error
!= 0 || !modified
) {
1023 /* leave the original as is */
1024 sockaddrlist_free(dst_sl
);
1027 * At least one address was modified and there were no errors;
1028 * ditch the original and return the modified list.
1030 sockaddrlist_free(*dst_sl0
);
1037 __private_extern__
int
1038 sflt_setsockopt(struct socket
*so
, struct sockopt
*sopt
)
1040 if (so
->so_filt
== NULL
)
1043 struct socket_filter_entry
*entry
;
1047 lck_rw_lock_shared(sock_filter_lock
);
1048 for (entry
= so
->so_filt
; entry
&& error
== 0;
1049 entry
= entry
->sfe_next_onsocket
) {
1050 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1051 entry
->sfe_filter
->sf_filter
.sf_setoption
) {
1053 * Retain the filter entry and release
1054 * the socket filter lock
1056 sflt_entry_retain(entry
);
1057 lck_rw_unlock_shared(sock_filter_lock
);
1059 /* If the socket isn't already unlocked, unlock it */
1060 if (unlocked
== 0) {
1061 socket_unlock(so
, 0);
1065 /* Call the filter */
1066 error
= entry
->sfe_filter
->sf_filter
.sf_setoption(
1067 entry
->sfe_cookie
, so
, sopt
);
1070 * Take the socket filter lock again
1071 * and release the entry
1073 lck_rw_lock_shared(sock_filter_lock
);
1074 sflt_entry_release(entry
);
1077 lck_rw_unlock_shared(sock_filter_lock
);
1086 __private_extern__
int
1087 sflt_getsockopt(struct socket
*so
, struct sockopt
*sopt
)
1089 if (so
->so_filt
== NULL
)
1092 struct socket_filter_entry
*entry
;
1096 lck_rw_lock_shared(sock_filter_lock
);
1097 for (entry
= so
->so_filt
; entry
&& error
== 0;
1098 entry
= entry
->sfe_next_onsocket
) {
1099 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1100 entry
->sfe_filter
->sf_filter
.sf_getoption
) {
1102 * Retain the filter entry and release
1103 * the socket filter lock
1105 sflt_entry_retain(entry
);
1106 lck_rw_unlock_shared(sock_filter_lock
);
1108 /* If the socket isn't already unlocked, unlock it */
1109 if (unlocked
== 0) {
1110 socket_unlock(so
, 0);
1114 /* Call the filter */
1115 error
= entry
->sfe_filter
->sf_filter
.sf_getoption(
1116 entry
->sfe_cookie
, so
, sopt
);
1119 * Take the socket filter lock again
1120 * and release the entry
1122 lck_rw_lock_shared(sock_filter_lock
);
1123 sflt_entry_release(entry
);
1126 lck_rw_unlock_shared(sock_filter_lock
);
1135 __private_extern__
int
1136 sflt_data_out(struct socket
*so
, const struct sockaddr
*to
, mbuf_t
*data
,
1137 mbuf_t
*control
, sflt_data_flag_t flags
)
1139 if (so
->so_filt
== NULL
)
1142 struct socket_filter_entry
*entry
;
1144 int setsendthread
= 0;
1147 lck_rw_lock_shared(sock_filter_lock
);
1148 for (entry
= so
->so_filt
; entry
&& error
== 0;
1149 entry
= entry
->sfe_next_onsocket
) {
1150 /* skip if this is a subflow socket */
1151 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1153 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1154 entry
->sfe_filter
->sf_filter
.sf_data_out
) {
1156 * Retain the filter entry and
1157 * release the socket filter lock
1159 sflt_entry_retain(entry
);
1160 lck_rw_unlock_shared(sock_filter_lock
);
1162 /* If the socket isn't already unlocked, unlock it */
1163 if (unlocked
== 0) {
1164 if (so
->so_send_filt_thread
== NULL
) {
1166 so
->so_send_filt_thread
=
1169 socket_unlock(so
, 0);
1173 /* Call the filter */
1174 error
= entry
->sfe_filter
->sf_filter
.sf_data_out(
1175 entry
->sfe_cookie
, so
, to
, data
, control
, flags
);
1178 * Take the socket filter lock again
1179 * and release the entry
1181 lck_rw_lock_shared(sock_filter_lock
);
1182 sflt_entry_release(entry
);
1185 lck_rw_unlock_shared(sock_filter_lock
);
1190 so
->so_send_filt_thread
= NULL
;
1196 __private_extern__
int
1197 sflt_data_in(struct socket
*so
, const struct sockaddr
*from
, mbuf_t
*data
,
1198 mbuf_t
*control
, sflt_data_flag_t flags
)
1200 if (so
->so_filt
== NULL
)
1203 struct socket_filter_entry
*entry
;
1207 lck_rw_lock_shared(sock_filter_lock
);
1209 for (entry
= so
->so_filt
; entry
&& (error
== 0);
1210 entry
= entry
->sfe_next_onsocket
) {
1211 /* skip if this is a subflow socket */
1212 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1214 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1215 entry
->sfe_filter
->sf_filter
.sf_data_in
) {
1217 * Retain the filter entry and
1218 * release the socket filter lock
1220 sflt_entry_retain(entry
);
1221 lck_rw_unlock_shared(sock_filter_lock
);
1223 /* If the socket isn't already unlocked, unlock it */
1224 if (unlocked
== 0) {
1226 socket_unlock(so
, 0);
1229 /* Call the filter */
1230 error
= entry
->sfe_filter
->sf_filter
.sf_data_in(
1231 entry
->sfe_cookie
, so
, from
, data
, control
, flags
);
1234 * Take the socket filter lock again
1235 * and release the entry
1237 lck_rw_lock_shared(sock_filter_lock
);
1238 sflt_entry_release(entry
);
1241 lck_rw_unlock_shared(sock_filter_lock
);
1250 #pragma mark -- KPI --
1253 sflt_attach(socket_t socket
, sflt_handle handle
)
1255 socket_lock(socket
, 1);
1256 errno_t result
= sflt_attach_internal(socket
, handle
);
1257 socket_unlock(socket
, 1);
1262 sflt_detach(socket_t socket
, sflt_handle handle
)
1264 struct socket_filter_entry
*entry
;
1267 if (socket
== NULL
|| handle
== 0)
1270 lck_rw_lock_exclusive(sock_filter_lock
);
1271 for (entry
= socket
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
1272 if (entry
->sfe_filter
->sf_filter
.sf_handle
== handle
&&
1273 (entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
1278 if (entry
!= NULL
) {
1279 sflt_detach_locked(entry
);
1281 lck_rw_unlock_exclusive(sock_filter_lock
);
1287 struct solist
*next
;
1292 sflt_register(const struct sflt_filter
*filter
, int domain
, int type
,
1295 struct socket_filter
*sock_filt
= NULL
;
1296 struct socket_filter
*match
= NULL
;
1302 struct solist
*solisthead
= NULL
, *solist
= NULL
;
1304 if ((domain
!= PF_INET
) && (domain
!= PF_INET6
))
1307 pr
= pffindproto(domain
, protocol
, type
);
1311 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
1312 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
1315 /* Allocate the socket filter */
1316 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
1317 M_IFADDR
, M_WAITOK
);
1318 if (sock_filt
== NULL
) {
1322 bzero(sock_filt
, sizeof (*sock_filt
));
1324 /* Legacy sflt_filter length; current structure minus extended */
1325 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
1327 * Include extended fields if filter defines SFLT_EXTENDED.
1328 * We've zeroed out our internal sflt_filter placeholder,
1329 * so any unused portion would have been taken care of.
1331 if (filter
->sf_flags
& SFLT_EXTENDED
) {
1332 unsigned int ext_len
= filter
->sf_len
;
1334 if (ext_len
> sizeof (struct sflt_filter_ext
))
1335 ext_len
= sizeof (struct sflt_filter_ext
);
1339 bcopy(filter
, &sock_filt
->sf_filter
, len
);
1341 lck_rw_lock_exclusive(sock_filter_lock
);
1342 /* Look for an existing entry */
1343 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
1344 if (match
->sf_filter
.sf_handle
==
1345 sock_filt
->sf_filter
.sf_handle
) {
1350 /* Add the entry only if there was no existing entry */
1351 if (match
== NULL
) {
1352 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
1353 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1354 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
1356 sock_filt
->sf_proto
= pr
;
1358 sflt_retain_locked(sock_filt
);
1360 lck_rw_unlock_exclusive(sock_filter_lock
);
1362 if (match
!= NULL
) {
1363 FREE(sock_filt
, M_IFADDR
);
1367 if (!(filter
->sf_flags
& SFLT_EXTENDED_REGISTRY
))
1371 * Setup the filter on the TCP and UDP sockets already created.
1373 #define SOLIST_ADD(_so) do { \
1374 solist->next = solisthead; \
1375 sock_retain((_so)); \
1376 solist->so = (_so); \
1377 solisthead = solist; \
1379 if (protocol
== IPPROTO_TCP
) {
1380 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1381 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
) {
1382 so
= inp
->inp_socket
;
1383 if (so
== NULL
|| (so
->so_state
& SS_DEFUNCT
) ||
1384 (!(so
->so_flags
& SOF_MP_SUBFLOW
) &&
1385 (so
->so_state
& SS_NOFDREF
)) ||
1386 !SOCK_CHECK_DOM(so
, domain
) ||
1387 !SOCK_CHECK_TYPE(so
, type
))
1389 MALLOC(solist
, struct solist
*, sizeof (*solist
),
1390 M_IFADDR
, M_NOWAIT
);
1395 lck_rw_done(tcbinfo
.ipi_lock
);
1396 } else if (protocol
== IPPROTO_UDP
) {
1397 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1398 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
) {
1399 so
= inp
->inp_socket
;
1400 if (so
== NULL
|| (so
->so_state
& SS_DEFUNCT
) ||
1401 (!(so
->so_flags
& SOF_MP_SUBFLOW
) &&
1402 (so
->so_state
& SS_NOFDREF
)) ||
1403 !SOCK_CHECK_DOM(so
, domain
) ||
1404 !SOCK_CHECK_TYPE(so
, type
))
1406 MALLOC(solist
, struct solist
*, sizeof (*solist
),
1407 M_IFADDR
, M_NOWAIT
);
1412 lck_rw_done(udbinfo
.ipi_lock
);
1414 /* XXX it's possible to walk the raw socket list as well */
1417 while (solisthead
) {
1418 sflt_handle handle
= filter
->sf_handle
;
1420 so
= solisthead
->so
;
1423 if (so
->so_state
& SS_ISCONNECTING
)
1424 sflt_notify_after_register(so
, sock_evt_connecting
,
1426 else if (so
->so_state
& SS_ISCONNECTED
)
1427 sflt_notify_after_register(so
, sock_evt_connected
,
1429 else if ((so
->so_state
&
1430 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
)) ==
1431 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
))
1432 sflt_notify_after_register(so
, sock_evt_disconnecting
,
1434 else if ((so
->so_state
&
1435 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
)) ==
1436 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
))
1437 sflt_notify_after_register(so
, sock_evt_disconnected
,
1439 else if (so
->so_state
& SS_CANTSENDMORE
)
1440 sflt_notify_after_register(so
, sock_evt_cantsendmore
,
1442 else if (so
->so_state
& SS_CANTRCVMORE
)
1443 sflt_notify_after_register(so
, sock_evt_cantrecvmore
,
1445 socket_unlock(so
, 0);
1446 /* XXX no easy way to post the sock_evt_closing event */
1448 solist
= solisthead
;
1449 solisthead
= solisthead
->next
;
1450 FREE(solist
, M_IFADDR
);
1457 sflt_unregister(sflt_handle handle
)
1459 struct socket_filter
*filter
;
1460 lck_rw_lock_exclusive(sock_filter_lock
);
1462 /* Find the entry by the handle */
1463 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
1464 if (filter
->sf_filter
.sf_handle
== handle
)
1469 /* Remove it from the global list */
1470 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
1472 /* Remove it from the protosw list */
1473 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1474 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
,
1475 filter
, sf_protosw_next
);
1478 /* Detach from any sockets */
1479 struct socket_filter_entry
*entry
= NULL
;
1481 for (entry
= filter
->sf_entry_head
; entry
;
1482 entry
= entry
->sfe_next_onfilter
) {
1483 sflt_detach_locked(entry
);
1486 /* Release the filter */
1487 sflt_release_locked(filter
);
1490 lck_rw_unlock_exclusive(sock_filter_lock
);
1499 sock_inject_data_in(socket_t so
, const struct sockaddr
*from
, mbuf_t data
,
1500 mbuf_t control
, sflt_data_flag_t flags
)
1504 if (so
== NULL
|| data
== NULL
)
1507 if (flags
& sock_data_filt_flag_oob
) {
1513 /* reject if this is a subflow socket */
1514 if (so
->so_flags
& SOF_MP_SUBFLOW
) {
1520 if (sbappendaddr(&so
->so_rcv
,
1521 (struct sockaddr
*)(uintptr_t)from
, data
, control
, NULL
))
1527 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
1532 if (flags
& sock_data_filt_flag_record
) {
1533 if (control
|| from
) {
1537 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
1542 if (sbappend(&so
->so_rcv
, data
))
1545 socket_unlock(so
, 1);
1550 sock_inject_data_out(socket_t so
, const struct sockaddr
*to
, mbuf_t data
,
1551 mbuf_t control
, sflt_data_flag_t flags
)
1553 int sosendflags
= 0;
1555 /* reject if this is a subflow socket */
1556 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1559 if (flags
& sock_data_filt_flag_oob
)
1560 sosendflags
= MSG_OOB
;
1561 return (sosend(so
, (struct sockaddr
*)(uintptr_t)to
, NULL
,
1562 data
, control
, sosendflags
));
1566 sockopt_direction(sockopt_t sopt
)
1568 return ((sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
);
1572 sockopt_level(sockopt_t sopt
)
1574 return (sopt
->sopt_level
);
1578 sockopt_name(sockopt_t sopt
)
1580 return (sopt
->sopt_name
);
1584 sockopt_valsize(sockopt_t sopt
)
1586 return (sopt
->sopt_valsize
);
1590 sockopt_copyin(sockopt_t sopt
, void *data
, size_t len
)
1592 return (sooptcopyin(sopt
, data
, len
, len
));
1596 sockopt_copyout(sockopt_t sopt
, void *data
, size_t len
)
1598 return (sooptcopyout(sopt
, data
, len
));