2 * Copyright (c) 2003-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/kpi_socketfilter.h>
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
60 struct socket_filter_entry
{
61 struct socket_filter_entry
*sfe_next_onsocket
;
62 struct socket_filter_entry
*sfe_next_onfilter
;
63 struct socket_filter_entry
*sfe_next_oncleanup
;
65 struct socket_filter
*sfe_filter
;
66 struct socket
*sfe_socket
;
73 struct socket_filter
{
74 TAILQ_ENTRY(socket_filter
) sf_protosw_next
;
75 TAILQ_ENTRY(socket_filter
) sf_global_next
;
76 struct socket_filter_entry
*sf_entry_head
;
78 struct protosw
*sf_proto
;
79 struct sflt_filter sf_filter
;
80 u_int32_t sf_refcount
;
83 TAILQ_HEAD(socket_filter_list
, socket_filter
);
85 static struct socket_filter_list sock_filter_head
;
86 static lck_rw_t
*sock_filter_lock
= NULL
;
87 static lck_mtx_t
*sock_filter_cleanup_lock
= NULL
;
88 static struct socket_filter_entry
*sock_filter_cleanup_entries
= NULL
;
89 static thread_t sock_filter_cleanup_thread
= NULL
;
91 static void sflt_cleanup_thread(void *, wait_result_t
);
92 static void sflt_detach_locked(struct socket_filter_entry
*entry
);
94 #pragma mark -- Internal State Management --
96 __private_extern__
void
99 lck_grp_attr_t
*grp_attrib
= NULL
;
100 lck_attr_t
*lck_attrib
= NULL
;
101 lck_grp_t
*lck_group
= NULL
;
103 TAILQ_INIT(&sock_filter_head
);
105 /* Allocate a rw lock */
106 grp_attrib
= lck_grp_attr_alloc_init();
107 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
108 lck_grp_attr_free(grp_attrib
);
109 lck_attrib
= lck_attr_alloc_init();
110 sock_filter_lock
= lck_rw_alloc_init(lck_group
, lck_attrib
);
111 sock_filter_cleanup_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
112 lck_grp_free(lck_group
);
113 lck_attr_free(lck_attrib
);
117 sflt_retain_locked(struct socket_filter
*filter
)
119 filter
->sf_refcount
++;
123 sflt_release_locked(struct socket_filter
*filter
)
125 filter
->sf_refcount
--;
126 if (filter
->sf_refcount
== 0) {
127 /* Call the unregistered function */
128 if (filter
->sf_filter
.sf_unregistered
) {
129 lck_rw_unlock_exclusive(sock_filter_lock
);
130 filter
->sf_filter
.sf_unregistered(
131 filter
->sf_filter
.sf_handle
);
132 lck_rw_lock_exclusive(sock_filter_lock
);
136 FREE(filter
, M_IFADDR
);
141 sflt_entry_retain(struct socket_filter_entry
*entry
)
143 if (OSIncrementAtomic(&entry
->sfe_refcount
) <= 0) {
144 panic("sflt_entry_retain - sfe_refcount <= 0\n");
150 sflt_entry_release(struct socket_filter_entry
*entry
)
152 SInt32 old
= OSDecrementAtomic(&entry
->sfe_refcount
);
154 /* That was the last reference */
156 /* Take the cleanup lock */
157 lck_mtx_lock(sock_filter_cleanup_lock
);
159 /* Put this item on the cleanup list */
160 entry
->sfe_next_oncleanup
= sock_filter_cleanup_entries
;
161 sock_filter_cleanup_entries
= entry
;
163 /* If the item is the first item in the list */
164 if (entry
->sfe_next_oncleanup
== NULL
) {
165 if (sock_filter_cleanup_thread
== NULL
) {
166 /* Create a thread */
167 kernel_thread_start(sflt_cleanup_thread
,
168 NULL
, &sock_filter_cleanup_thread
);
170 /* Wakeup the thread */
171 wakeup(&sock_filter_cleanup_entries
);
175 /* Drop the cleanup lock */
176 lck_mtx_unlock(sock_filter_cleanup_lock
);
177 } else if (old
<= 0) {
178 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
185 sflt_cleanup_thread(void *blah
, wait_result_t blah2
)
187 #pragma unused(blah, blah2)
189 lck_mtx_lock(sock_filter_cleanup_lock
);
190 while (sock_filter_cleanup_entries
== NULL
) {
191 /* Sleep until we've got something better to do */
192 msleep(&sock_filter_cleanup_entries
,
193 sock_filter_cleanup_lock
, PWAIT
,
194 "sflt_cleanup", NULL
);
197 /* Pull the current list of dead items */
198 struct socket_filter_entry
*dead
= sock_filter_cleanup_entries
;
199 sock_filter_cleanup_entries
= NULL
;
202 lck_mtx_unlock(sock_filter_cleanup_lock
);
204 /* Take the socket filter lock */
205 lck_rw_lock_exclusive(sock_filter_lock
);
207 /* Cleanup every dead item */
208 struct socket_filter_entry
*entry
;
209 for (entry
= dead
; entry
; entry
= dead
) {
210 struct socket_filter_entry
**nextpp
;
212 dead
= entry
->sfe_next_oncleanup
;
214 /* Call detach function if necessary - drop the lock */
215 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
216 entry
->sfe_filter
->sf_filter
.sf_detach
) {
217 entry
->sfe_flags
|= SFEF_NODETACH
;
218 lck_rw_unlock_exclusive(sock_filter_lock
);
221 * Warning - passing a potentially
222 * dead socket may be bad
224 entry
->sfe_filter
->sf_filter
. sf_detach(
225 entry
->sfe_cookie
, entry
->sfe_socket
);
227 lck_rw_lock_exclusive(sock_filter_lock
);
231 * Pull entry off the socket list --
232 * if the socket still exists
234 if ((entry
->sfe_flags
& SFEF_NOSOCKET
) == 0) {
235 for (nextpp
= &entry
->sfe_socket
->so_filt
;
237 nextpp
= &(*nextpp
)->sfe_next_onsocket
) {
238 if (*nextpp
== entry
) {
240 entry
->sfe_next_onsocket
;
246 /* Pull entry off the filter list */
247 for (nextpp
= &entry
->sfe_filter
->sf_entry_head
;
248 *nextpp
; nextpp
= &(*nextpp
)->sfe_next_onfilter
) {
249 if (*nextpp
== entry
) {
250 *nextpp
= entry
->sfe_next_onfilter
;
256 * Release the filter -- may drop lock, but that's okay
258 sflt_release_locked(entry
->sfe_filter
);
259 entry
->sfe_socket
= NULL
;
260 entry
->sfe_filter
= NULL
;
261 FREE(entry
, M_IFADDR
);
264 /* Drop the socket filter lock */
265 lck_rw_unlock_exclusive(sock_filter_lock
);
271 sflt_attach_locked(struct socket
*so
, struct socket_filter
*filter
,
275 struct socket_filter_entry
*entry
= NULL
;
280 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onfilter
) {
281 if (entry
->sfe_filter
->sf_filter
.sf_handle
==
282 filter
->sf_filter
.sf_handle
)
285 /* allocate the socket filter entry */
286 MALLOC(entry
, struct socket_filter_entry
*, sizeof (*entry
), M_IFADDR
,
291 /* Initialize the socket filter entry */
292 entry
->sfe_cookie
= NULL
;
293 entry
->sfe_flags
= SFEF_ATTACHED
;
294 entry
->sfe_refcount
= 1; /* corresponds to SFEF_ATTACHED flag set */
296 /* Put the entry in the filter list */
297 sflt_retain_locked(filter
);
298 entry
->sfe_filter
= filter
;
299 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
300 filter
->sf_entry_head
= entry
;
302 /* Put the entry on the socket filter list */
303 entry
->sfe_socket
= so
;
304 entry
->sfe_next_onsocket
= so
->so_filt
;
307 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
308 /* Retain the entry while we call attach */
309 sflt_entry_retain(entry
);
312 * Release the filter lock --
313 * callers must be aware we will do this
315 lck_rw_unlock_exclusive(sock_filter_lock
);
317 /* Unlock the socket */
319 socket_unlock(so
, 0);
321 /* It's finally safe to call the filter function */
322 error
= entry
->sfe_filter
->sf_filter
.sf_attach(
323 &entry
->sfe_cookie
, so
);
325 /* Lock the socket again */
329 /* Lock the filters again */
330 lck_rw_lock_exclusive(sock_filter_lock
);
333 * If the attach function returns an error,
334 * this filter must be detached
337 /* don't call sf_detach */
338 entry
->sfe_flags
|= SFEF_NODETACH
;
339 sflt_detach_locked(entry
);
342 /* Release the retain we held through the attach call */
343 sflt_entry_release(entry
);
350 sflt_attach_internal(socket_t socket
, sflt_handle handle
)
352 if (socket
== NULL
|| handle
== 0)
357 lck_rw_lock_exclusive(sock_filter_lock
);
359 struct socket_filter
*filter
= NULL
;
360 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
361 if (filter
->sf_filter
.sf_handle
== handle
) break;
365 result
= sflt_attach_locked(socket
, filter
, 1);
368 lck_rw_unlock_exclusive(sock_filter_lock
);
374 sflt_detach_locked(struct socket_filter_entry
*entry
)
376 if ((entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
377 entry
->sfe_flags
&= ~SFEF_ATTACHED
;
378 sflt_entry_release(entry
);
382 #pragma mark -- Socket Layer Hooks --
384 __private_extern__
void
385 sflt_initsock(struct socket
*so
)
388 * Point to the real protosw, as so_proto might have been
389 * pointed to a modified version.
391 struct protosw
*proto
= so
->so_proto
->pr_protosw
;
393 lck_rw_lock_shared(sock_filter_lock
);
394 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
395 /* Promote lock to exclusive */
396 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock
))
397 lck_rw_lock_exclusive(sock_filter_lock
);
400 * Warning: A filter unregistering will be pulled out of
401 * the list. This could happen while we drop the lock in
402 * sftl_attach_locked or sflt_release_locked. For this
403 * reason we retain a reference on the filter (or next_filter)
404 * while calling this function. This protects us from a panic,
405 * but it could result in a socket being created without all
406 * of the global filters if we're attaching a filter as it
407 * is removed, if that's possible.
409 struct socket_filter
*filter
=
410 TAILQ_FIRST(&proto
->pr_filter_head
);
412 sflt_retain_locked(filter
);
415 struct socket_filter
*filter_next
;
417 * Warning: sflt_attach_private_locked
420 sflt_attach_locked(so
, filter
, 0);
422 filter_next
= TAILQ_NEXT(filter
, sf_protosw_next
);
424 sflt_retain_locked(filter_next
);
427 * Warning: filt_release_locked may remove
428 * the filter from the queue
430 sflt_release_locked(filter
);
431 filter
= filter_next
;
434 lck_rw_done(sock_filter_lock
);
440 * Detaches all filters from the socket.
442 __private_extern__
void
443 sflt_termsock(struct socket
*so
)
445 lck_rw_lock_exclusive(sock_filter_lock
);
447 struct socket_filter_entry
*entry
;
449 while ((entry
= so
->so_filt
) != NULL
) {
450 /* Pull filter off the socket */
451 so
->so_filt
= entry
->sfe_next_onsocket
;
452 entry
->sfe_flags
|= SFEF_NOSOCKET
;
455 sflt_detach_locked(entry
);
458 * On sflt_termsock, we can't return until the detach function
459 * has been called. Call the detach function - this is gross
460 * because the socket filter entry could be freed when we drop
461 * the lock, so we make copies on the stack and retain
462 * everything we need before dropping the lock.
464 if ((entry
->sfe_flags
& SFEF_NODETACH
) == 0 &&
465 entry
->sfe_filter
->sf_filter
.sf_detach
) {
466 void *sfe_cookie
= entry
->sfe_cookie
;
467 struct socket_filter
*sfe_filter
= entry
->sfe_filter
;
469 /* Retain the socket filter */
470 sflt_retain_locked(sfe_filter
);
472 /* Mark that we've called the detach function */
473 entry
->sfe_flags
|= SFEF_NODETACH
;
475 /* Drop the lock before calling the detach function */
476 lck_rw_unlock_exclusive(sock_filter_lock
);
477 sfe_filter
->sf_filter
.sf_detach(sfe_cookie
, so
);
478 lck_rw_lock_exclusive(sock_filter_lock
);
480 /* Release the filter */
481 sflt_release_locked(sfe_filter
);
485 lck_rw_unlock_exclusive(sock_filter_lock
);
490 sflt_notify_internal(struct socket
*so
, sflt_event_t event
, void *param
,
493 if (so
->so_filt
== NULL
)
496 struct socket_filter_entry
*entry
;
499 lck_rw_lock_shared(sock_filter_lock
);
500 for (entry
= so
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
501 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
502 entry
->sfe_filter
->sf_filter
.sf_notify
&&
503 ((handle
&& entry
->sfe_filter
->sf_filter
.sf_handle
!=
504 handle
) || !handle
)) {
506 * Retain the filter entry and release
507 * the socket filter lock
509 sflt_entry_retain(entry
);
510 lck_rw_unlock_shared(sock_filter_lock
);
512 /* If the socket isn't already unlocked, unlock it */
515 socket_unlock(so
, 0);
518 /* Finally call the filter */
519 entry
->sfe_filter
->sf_filter
.sf_notify(
520 entry
->sfe_cookie
, so
, event
, param
);
523 * Take the socket filter lock again
524 * and release the entry
526 lck_rw_lock_shared(sock_filter_lock
);
527 sflt_entry_release(entry
);
530 lck_rw_unlock_shared(sock_filter_lock
);
537 __private_extern__
void
538 sflt_notify(struct socket
*so
, sflt_event_t event
, void *param
)
540 sflt_notify_internal(so
, event
, param
, 0);
544 sflt_notify_after_register(struct socket
*so
, sflt_event_t event
,
547 sflt_notify_internal(so
, event
, NULL
, handle
);
550 __private_extern__
int
551 sflt_ioctl(struct socket
*so
, u_long cmd
, caddr_t data
)
553 if (so
->so_filt
== NULL
)
556 struct socket_filter_entry
*entry
;
560 lck_rw_lock_shared(sock_filter_lock
);
561 for (entry
= so
->so_filt
; entry
&& error
== 0;
562 entry
= entry
->sfe_next_onsocket
) {
563 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
564 entry
->sfe_filter
->sf_filter
.sf_ioctl
) {
566 * Retain the filter entry and release
567 * the socket filter lock
569 sflt_entry_retain(entry
);
570 lck_rw_unlock_shared(sock_filter_lock
);
572 /* If the socket isn't already unlocked, unlock it */
574 socket_unlock(so
, 0);
578 /* Call the filter */
579 error
= entry
->sfe_filter
->sf_filter
.sf_ioctl(
580 entry
->sfe_cookie
, so
, cmd
, data
);
583 * Take the socket filter lock again
584 * and release the entry
586 lck_rw_lock_shared(sock_filter_lock
);
587 sflt_entry_release(entry
);
590 lck_rw_unlock_shared(sock_filter_lock
);
599 __private_extern__
int
600 sflt_bind(struct socket
*so
, const struct sockaddr
*nam
)
602 if (so
->so_filt
== NULL
)
605 struct socket_filter_entry
*entry
;
609 lck_rw_lock_shared(sock_filter_lock
);
610 for (entry
= so
->so_filt
; entry
&& error
== 0;
611 entry
= entry
->sfe_next_onsocket
) {
612 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
613 entry
->sfe_filter
->sf_filter
.sf_bind
) {
615 * Retain the filter entry and
616 * release the socket filter lock
618 sflt_entry_retain(entry
);
619 lck_rw_unlock_shared(sock_filter_lock
);
621 /* If the socket isn't already unlocked, unlock it */
623 socket_unlock(so
, 0);
627 /* Call the filter */
628 error
= entry
->sfe_filter
->sf_filter
.sf_bind(
629 entry
->sfe_cookie
, so
, nam
);
632 * Take the socket filter lock again and
635 lck_rw_lock_shared(sock_filter_lock
);
636 sflt_entry_release(entry
);
639 lck_rw_unlock_shared(sock_filter_lock
);
648 __private_extern__
int
649 sflt_listen(struct socket
*so
)
651 if (so
->so_filt
== NULL
)
654 struct socket_filter_entry
*entry
;
658 lck_rw_lock_shared(sock_filter_lock
);
659 for (entry
= so
->so_filt
; entry
&& error
== 0;
660 entry
= entry
->sfe_next_onsocket
) {
661 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
662 entry
->sfe_filter
->sf_filter
.sf_listen
) {
664 * Retain the filter entry and release
665 * the socket filter lock
667 sflt_entry_retain(entry
);
668 lck_rw_unlock_shared(sock_filter_lock
);
670 /* If the socket isn't already unlocked, unlock it */
672 socket_unlock(so
, 0);
676 /* Call the filter */
677 error
= entry
->sfe_filter
->sf_filter
.sf_listen(
678 entry
->sfe_cookie
, so
);
681 * Take the socket filter lock again
682 * and release the entry
684 lck_rw_lock_shared(sock_filter_lock
);
685 sflt_entry_release(entry
);
688 lck_rw_unlock_shared(sock_filter_lock
);
697 __private_extern__
int
698 sflt_accept(struct socket
*head
, struct socket
*so
,
699 const struct sockaddr
*local
, const struct sockaddr
*remote
)
701 if (so
->so_filt
== NULL
)
704 struct socket_filter_entry
*entry
;
708 lck_rw_lock_shared(sock_filter_lock
);
709 for (entry
= so
->so_filt
; entry
&& error
== 0;
710 entry
= entry
->sfe_next_onsocket
) {
711 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
712 entry
->sfe_filter
->sf_filter
.sf_accept
) {
714 * Retain the filter entry and
715 * release the socket filter lock
717 sflt_entry_retain(entry
);
718 lck_rw_unlock_shared(sock_filter_lock
);
720 /* If the socket isn't already unlocked, unlock it */
722 socket_unlock(so
, 0);
726 /* Call the filter */
727 error
= entry
->sfe_filter
->sf_filter
.sf_accept(
728 entry
->sfe_cookie
, head
, so
, local
, remote
);
731 * Take the socket filter lock again
732 * and release the entry
734 lck_rw_lock_shared(sock_filter_lock
);
735 sflt_entry_release(entry
);
738 lck_rw_unlock_shared(sock_filter_lock
);
747 __private_extern__
int
748 sflt_getsockname(struct socket
*so
, struct sockaddr
**local
)
750 if (so
->so_filt
== NULL
)
753 struct socket_filter_entry
*entry
;
757 lck_rw_lock_shared(sock_filter_lock
);
758 for (entry
= so
->so_filt
; entry
&& error
== 0;
759 entry
= entry
->sfe_next_onsocket
) {
760 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
761 entry
->sfe_filter
->sf_filter
.sf_getsockname
) {
763 * Retain the filter entry and
764 * release the socket filter lock
766 sflt_entry_retain(entry
);
767 lck_rw_unlock_shared(sock_filter_lock
);
769 /* If the socket isn't already unlocked, unlock it */
771 socket_unlock(so
, 0);
775 /* Call the filter */
776 error
= entry
->sfe_filter
->sf_filter
.sf_getsockname(
777 entry
->sfe_cookie
, so
, local
);
780 * Take the socket filter lock again
781 * and release the entry
783 lck_rw_lock_shared(sock_filter_lock
);
784 sflt_entry_release(entry
);
787 lck_rw_unlock_shared(sock_filter_lock
);
796 __private_extern__
int
797 sflt_getpeername(struct socket
*so
, struct sockaddr
**remote
)
799 if (so
->so_filt
== NULL
)
802 struct socket_filter_entry
*entry
;
806 lck_rw_lock_shared(sock_filter_lock
);
807 for (entry
= so
->so_filt
; entry
&& error
== 0;
808 entry
= entry
->sfe_next_onsocket
) {
809 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
810 entry
->sfe_filter
->sf_filter
.sf_getpeername
) {
812 * Retain the filter entry and release
813 * the socket filter lock
815 sflt_entry_retain(entry
);
816 lck_rw_unlock_shared(sock_filter_lock
);
818 /* If the socket isn't already unlocked, unlock it */
820 socket_unlock(so
, 0);
824 /* Call the filter */
825 error
= entry
->sfe_filter
->sf_filter
.sf_getpeername(
826 entry
->sfe_cookie
, so
, remote
);
829 * Take the socket filter lock again
830 * and release the entry
832 lck_rw_lock_shared(sock_filter_lock
);
833 sflt_entry_release(entry
);
836 lck_rw_unlock_shared(sock_filter_lock
);
845 __private_extern__
int
846 sflt_connectin(struct socket
*so
, const struct sockaddr
*remote
)
848 if (so
->so_filt
== NULL
)
851 struct socket_filter_entry
*entry
;
855 lck_rw_lock_shared(sock_filter_lock
);
856 for (entry
= so
->so_filt
; entry
&& error
== 0;
857 entry
= entry
->sfe_next_onsocket
) {
858 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
859 entry
->sfe_filter
->sf_filter
.sf_connect_in
) {
861 * Retain the filter entry and release
862 * the socket filter lock
864 sflt_entry_retain(entry
);
865 lck_rw_unlock_shared(sock_filter_lock
);
867 /* If the socket isn't already unlocked, unlock it */
869 socket_unlock(so
, 0);
873 /* Call the filter */
874 error
= entry
->sfe_filter
->sf_filter
.sf_connect_in(
875 entry
->sfe_cookie
, so
, remote
);
878 * Take the socket filter lock again
879 * and release the entry
881 lck_rw_lock_shared(sock_filter_lock
);
882 sflt_entry_release(entry
);
885 lck_rw_unlock_shared(sock_filter_lock
);
895 sflt_connectout_common(struct socket
*so
, const struct sockaddr
*nam
)
897 struct socket_filter_entry
*entry
;
901 lck_rw_lock_shared(sock_filter_lock
);
902 for (entry
= so
->so_filt
; entry
&& error
== 0;
903 entry
= entry
->sfe_next_onsocket
) {
904 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
905 entry
->sfe_filter
->sf_filter
.sf_connect_out
) {
907 * Retain the filter entry and release
908 * the socket filter lock
910 sflt_entry_retain(entry
);
911 lck_rw_unlock_shared(sock_filter_lock
);
913 /* If the socket isn't already unlocked, unlock it */
915 socket_unlock(so
, 0);
919 /* Call the filter */
920 error
= entry
->sfe_filter
->sf_filter
.sf_connect_out(
921 entry
->sfe_cookie
, so
, nam
);
924 * Take the socket filter lock again
925 * and release the entry
927 lck_rw_lock_shared(sock_filter_lock
);
928 sflt_entry_release(entry
);
931 lck_rw_unlock_shared(sock_filter_lock
);
940 __private_extern__
int
941 sflt_connectout(struct socket
*so
, const struct sockaddr
*nam
)
943 char buf
[SOCK_MAXADDRLEN
];
947 if (so
->so_filt
== NULL
)
951 * Workaround for rdar://23362120
952 * Always pass a buffer that can hold an IPv6 socket address
954 bzero(buf
, sizeof (buf
));
955 bcopy(nam
, buf
, nam
->sa_len
);
956 sa
= (struct sockaddr
*)buf
;
958 error
= sflt_connectout_common(so
, sa
);
963 * If the address was modified, copy it back
965 if (bcmp(sa
, nam
, nam
->sa_len
) != 0) {
966 bcopy(sa
, (struct sockaddr
*)(uintptr_t)nam
, nam
->sa_len
);
972 __private_extern__
int
973 sflt_connectxout(struct socket
*so
, struct sockaddr_list
**dst_sl0
)
975 struct sockaddr_list
*dst_sl
;
976 struct sockaddr_entry
*se
, *tse
;
980 if (so
->so_filt
== NULL
)
983 /* make a copy as sflt_connectout() releases socket lock */
984 dst_sl
= sockaddrlist_dup(*dst_sl0
, M_WAITOK
);
989 * Hmm; we don't yet have a connectx socket filter callback,
990 * so the closest thing to do is to probably call sflt_connectout()
991 * as many times as there are addresses in the list, and bail
992 * as soon as we get an error.
994 TAILQ_FOREACH_SAFE(se
, &dst_sl
->sl_head
, se_link
, tse
) {
995 char buf
[SOCK_MAXADDRLEN
];
998 VERIFY(se
->se_addr
!= NULL
);
1001 * Workaround for rdar://23362120
1002 * Always pass a buffer that can hold an IPv6 socket address
1004 bzero(buf
, sizeof (buf
));
1005 bcopy(se
->se_addr
, buf
, se
->se_addr
->sa_len
);
1006 sa
= (struct sockaddr
*)buf
;
1008 error
= sflt_connectout_common(so
, sa
);
1013 * If the address was modified, copy it back
1015 if (bcmp(se
->se_addr
, sa
, se
->se_addr
->sa_len
) != 0) {
1016 bcopy(sa
, se
->se_addr
, se
->se_addr
->sa_len
);
1021 if (error
!= 0 || !modified
) {
1022 /* leave the original as is */
1023 sockaddrlist_free(dst_sl
);
1026 * At least one address was modified and there were no errors;
1027 * ditch the original and return the modified list.
1029 sockaddrlist_free(*dst_sl0
);
1036 __private_extern__
int
1037 sflt_setsockopt(struct socket
*so
, struct sockopt
*sopt
)
1039 if (so
->so_filt
== NULL
)
1042 struct socket_filter_entry
*entry
;
1046 lck_rw_lock_shared(sock_filter_lock
);
1047 for (entry
= so
->so_filt
; entry
&& error
== 0;
1048 entry
= entry
->sfe_next_onsocket
) {
1049 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1050 entry
->sfe_filter
->sf_filter
.sf_setoption
) {
1052 * Retain the filter entry and release
1053 * the socket filter lock
1055 sflt_entry_retain(entry
);
1056 lck_rw_unlock_shared(sock_filter_lock
);
1058 /* If the socket isn't already unlocked, unlock it */
1059 if (unlocked
== 0) {
1060 socket_unlock(so
, 0);
1064 /* Call the filter */
1065 error
= entry
->sfe_filter
->sf_filter
.sf_setoption(
1066 entry
->sfe_cookie
, so
, sopt
);
1069 * Take the socket filter lock again
1070 * and release the entry
1072 lck_rw_lock_shared(sock_filter_lock
);
1073 sflt_entry_release(entry
);
1076 lck_rw_unlock_shared(sock_filter_lock
);
1085 __private_extern__
int
1086 sflt_getsockopt(struct socket
*so
, struct sockopt
*sopt
)
1088 if (so
->so_filt
== NULL
)
1091 struct socket_filter_entry
*entry
;
1095 lck_rw_lock_shared(sock_filter_lock
);
1096 for (entry
= so
->so_filt
; entry
&& error
== 0;
1097 entry
= entry
->sfe_next_onsocket
) {
1098 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1099 entry
->sfe_filter
->sf_filter
.sf_getoption
) {
1101 * Retain the filter entry and release
1102 * the socket filter lock
1104 sflt_entry_retain(entry
);
1105 lck_rw_unlock_shared(sock_filter_lock
);
1107 /* If the socket isn't already unlocked, unlock it */
1108 if (unlocked
== 0) {
1109 socket_unlock(so
, 0);
1113 /* Call the filter */
1114 error
= entry
->sfe_filter
->sf_filter
.sf_getoption(
1115 entry
->sfe_cookie
, so
, sopt
);
1118 * Take the socket filter lock again
1119 * and release the entry
1121 lck_rw_lock_shared(sock_filter_lock
);
1122 sflt_entry_release(entry
);
1125 lck_rw_unlock_shared(sock_filter_lock
);
1134 __private_extern__
int
1135 sflt_data_out(struct socket
*so
, const struct sockaddr
*to
, mbuf_t
*data
,
1136 mbuf_t
*control
, sflt_data_flag_t flags
)
1138 if (so
->so_filt
== NULL
)
1141 struct socket_filter_entry
*entry
;
1143 int setsendthread
= 0;
1146 lck_rw_lock_shared(sock_filter_lock
);
1147 for (entry
= so
->so_filt
; entry
&& error
== 0;
1148 entry
= entry
->sfe_next_onsocket
) {
1149 /* skip if this is a subflow socket */
1150 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1152 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1153 entry
->sfe_filter
->sf_filter
.sf_data_out
) {
1155 * Retain the filter entry and
1156 * release the socket filter lock
1158 sflt_entry_retain(entry
);
1159 lck_rw_unlock_shared(sock_filter_lock
);
1161 /* If the socket isn't already unlocked, unlock it */
1162 if (unlocked
== 0) {
1163 if (so
->so_send_filt_thread
== NULL
) {
1165 so
->so_send_filt_thread
=
1168 socket_unlock(so
, 0);
1172 /* Call the filter */
1173 error
= entry
->sfe_filter
->sf_filter
.sf_data_out(
1174 entry
->sfe_cookie
, so
, to
, data
, control
, flags
);
1177 * Take the socket filter lock again
1178 * and release the entry
1180 lck_rw_lock_shared(sock_filter_lock
);
1181 sflt_entry_release(entry
);
1184 lck_rw_unlock_shared(sock_filter_lock
);
1189 so
->so_send_filt_thread
= NULL
;
1195 __private_extern__
int
1196 sflt_data_in(struct socket
*so
, const struct sockaddr
*from
, mbuf_t
*data
,
1197 mbuf_t
*control
, sflt_data_flag_t flags
)
1199 if (so
->so_filt
== NULL
)
1202 struct socket_filter_entry
*entry
;
1206 lck_rw_lock_shared(sock_filter_lock
);
1208 for (entry
= so
->so_filt
; entry
&& (error
== 0);
1209 entry
= entry
->sfe_next_onsocket
) {
1210 /* skip if this is a subflow socket */
1211 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1213 if ((entry
->sfe_flags
& SFEF_ATTACHED
) &&
1214 entry
->sfe_filter
->sf_filter
.sf_data_in
) {
1216 * Retain the filter entry and
1217 * release the socket filter lock
1219 sflt_entry_retain(entry
);
1220 lck_rw_unlock_shared(sock_filter_lock
);
1222 /* If the socket isn't already unlocked, unlock it */
1223 if (unlocked
== 0) {
1225 socket_unlock(so
, 0);
1228 /* Call the filter */
1229 error
= entry
->sfe_filter
->sf_filter
.sf_data_in(
1230 entry
->sfe_cookie
, so
, from
, data
, control
, flags
);
1233 * Take the socket filter lock again
1234 * and release the entry
1236 lck_rw_lock_shared(sock_filter_lock
);
1237 sflt_entry_release(entry
);
1240 lck_rw_unlock_shared(sock_filter_lock
);
1249 #pragma mark -- KPI --
1252 sflt_attach(socket_t socket
, sflt_handle handle
)
1254 socket_lock(socket
, 1);
1255 errno_t result
= sflt_attach_internal(socket
, handle
);
1256 socket_unlock(socket
, 1);
1261 sflt_detach(socket_t socket
, sflt_handle handle
)
1263 struct socket_filter_entry
*entry
;
1266 if (socket
== NULL
|| handle
== 0)
1269 lck_rw_lock_exclusive(sock_filter_lock
);
1270 for (entry
= socket
->so_filt
; entry
; entry
= entry
->sfe_next_onsocket
) {
1271 if (entry
->sfe_filter
->sf_filter
.sf_handle
== handle
&&
1272 (entry
->sfe_flags
& SFEF_ATTACHED
) != 0) {
1277 if (entry
!= NULL
) {
1278 sflt_detach_locked(entry
);
1280 lck_rw_unlock_exclusive(sock_filter_lock
);
1286 struct solist
*next
;
1291 sflt_register(const struct sflt_filter
*filter
, int domain
, int type
,
1294 struct socket_filter
*sock_filt
= NULL
;
1295 struct socket_filter
*match
= NULL
;
1301 struct solist
*solisthead
= NULL
, *solist
= NULL
;
1303 if ((domain
!= PF_INET
) && (domain
!= PF_INET6
))
1306 pr
= pffindproto(domain
, protocol
, type
);
1310 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
||
1311 filter
->sf_handle
== 0 || filter
->sf_name
== NULL
)
1314 /* Allocate the socket filter */
1315 MALLOC(sock_filt
, struct socket_filter
*, sizeof (*sock_filt
),
1316 M_IFADDR
, M_WAITOK
);
1317 if (sock_filt
== NULL
) {
1321 bzero(sock_filt
, sizeof (*sock_filt
));
1323 /* Legacy sflt_filter length; current structure minus extended */
1324 len
= sizeof (*filter
) - sizeof (struct sflt_filter_ext
);
1326 * Include extended fields if filter defines SFLT_EXTENDED.
1327 * We've zeroed out our internal sflt_filter placeholder,
1328 * so any unused portion would have been taken care of.
1330 if (filter
->sf_flags
& SFLT_EXTENDED
) {
1331 unsigned int ext_len
= filter
->sf_len
;
1333 if (ext_len
> sizeof (struct sflt_filter_ext
))
1334 ext_len
= sizeof (struct sflt_filter_ext
);
1338 bcopy(filter
, &sock_filt
->sf_filter
, len
);
1340 lck_rw_lock_exclusive(sock_filter_lock
);
1341 /* Look for an existing entry */
1342 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
1343 if (match
->sf_filter
.sf_handle
==
1344 sock_filt
->sf_filter
.sf_handle
) {
1349 /* Add the entry only if there was no existing entry */
1350 if (match
== NULL
) {
1351 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
1352 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1353 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
,
1355 sock_filt
->sf_proto
= pr
;
1357 sflt_retain_locked(sock_filt
);
1359 lck_rw_unlock_exclusive(sock_filter_lock
);
1361 if (match
!= NULL
) {
1362 FREE(sock_filt
, M_IFADDR
);
1366 if (!(filter
->sf_flags
& SFLT_EXTENDED_REGISTRY
))
1370 * Setup the filter on the TCP and UDP sockets already created.
1372 #define SOLIST_ADD(_so) do { \
1373 solist->next = solisthead; \
1374 sock_retain((_so)); \
1375 solist->so = (_so); \
1376 solisthead = solist; \
1378 if (protocol
== IPPROTO_TCP
) {
1379 lck_rw_lock_shared(tcbinfo
.ipi_lock
);
1380 LIST_FOREACH(inp
, tcbinfo
.ipi_listhead
, inp_list
) {
1381 so
= inp
->inp_socket
;
1382 if (so
== NULL
|| (so
->so_state
& SS_DEFUNCT
) ||
1383 (!(so
->so_flags
& SOF_MP_SUBFLOW
) &&
1384 (so
->so_state
& SS_NOFDREF
)) ||
1385 !SOCK_CHECK_DOM(so
, domain
) ||
1386 !SOCK_CHECK_TYPE(so
, type
))
1388 MALLOC(solist
, struct solist
*, sizeof (*solist
),
1389 M_IFADDR
, M_NOWAIT
);
1394 lck_rw_done(tcbinfo
.ipi_lock
);
1395 } else if (protocol
== IPPROTO_UDP
) {
1396 lck_rw_lock_shared(udbinfo
.ipi_lock
);
1397 LIST_FOREACH(inp
, udbinfo
.ipi_listhead
, inp_list
) {
1398 so
= inp
->inp_socket
;
1399 if (so
== NULL
|| (so
->so_state
& SS_DEFUNCT
) ||
1400 (!(so
->so_flags
& SOF_MP_SUBFLOW
) &&
1401 (so
->so_state
& SS_NOFDREF
)) ||
1402 !SOCK_CHECK_DOM(so
, domain
) ||
1403 !SOCK_CHECK_TYPE(so
, type
))
1405 MALLOC(solist
, struct solist
*, sizeof (*solist
),
1406 M_IFADDR
, M_NOWAIT
);
1411 lck_rw_done(udbinfo
.ipi_lock
);
1413 /* XXX it's possible to walk the raw socket list as well */
1416 while (solisthead
) {
1417 sflt_handle handle
= filter
->sf_handle
;
1419 so
= solisthead
->so
;
1422 if (so
->so_state
& SS_ISCONNECTING
)
1423 sflt_notify_after_register(so
, sock_evt_connecting
,
1425 else if (so
->so_state
& SS_ISCONNECTED
)
1426 sflt_notify_after_register(so
, sock_evt_connected
,
1428 else if ((so
->so_state
&
1429 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
)) ==
1430 (SS_ISDISCONNECTING
|SS_CANTRCVMORE
|SS_CANTSENDMORE
))
1431 sflt_notify_after_register(so
, sock_evt_disconnecting
,
1433 else if ((so
->so_state
&
1434 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
)) ==
1435 (SS_CANTRCVMORE
|SS_CANTSENDMORE
|SS_ISDISCONNECTED
))
1436 sflt_notify_after_register(so
, sock_evt_disconnected
,
1438 else if (so
->so_state
& SS_CANTSENDMORE
)
1439 sflt_notify_after_register(so
, sock_evt_cantsendmore
,
1441 else if (so
->so_state
& SS_CANTRCVMORE
)
1442 sflt_notify_after_register(so
, sock_evt_cantrecvmore
,
1444 socket_unlock(so
, 0);
1445 /* XXX no easy way to post the sock_evt_closing event */
1447 solist
= solisthead
;
1448 solisthead
= solisthead
->next
;
1449 FREE(solist
, M_IFADDR
);
1456 sflt_unregister(sflt_handle handle
)
1458 struct socket_filter
*filter
;
1459 lck_rw_lock_exclusive(sock_filter_lock
);
1461 /* Find the entry by the handle */
1462 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
1463 if (filter
->sf_filter
.sf_handle
== handle
)
1468 /* Remove it from the global list */
1469 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
1471 /* Remove it from the protosw list */
1472 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
1473 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
,
1474 filter
, sf_protosw_next
);
1477 /* Detach from any sockets */
1478 struct socket_filter_entry
*entry
= NULL
;
1480 for (entry
= filter
->sf_entry_head
; entry
;
1481 entry
= entry
->sfe_next_onfilter
) {
1482 sflt_detach_locked(entry
);
1485 /* Release the filter */
1486 sflt_release_locked(filter
);
1489 lck_rw_unlock_exclusive(sock_filter_lock
);
1498 sock_inject_data_in(socket_t so
, const struct sockaddr
*from
, mbuf_t data
,
1499 mbuf_t control
, sflt_data_flag_t flags
)
1503 if (so
== NULL
|| data
== NULL
)
1506 if (flags
& sock_data_filt_flag_oob
) {
1512 /* reject if this is a subflow socket */
1513 if (so
->so_flags
& SOF_MP_SUBFLOW
) {
1519 if (sbappendaddr(&so
->so_rcv
,
1520 (struct sockaddr
*)(uintptr_t)from
, data
, control
, NULL
))
1526 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
1531 if (flags
& sock_data_filt_flag_record
) {
1532 if (control
|| from
) {
1536 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
1541 if (sbappend(&so
->so_rcv
, data
))
1544 socket_unlock(so
, 1);
1549 sock_inject_data_out(socket_t so
, const struct sockaddr
*to
, mbuf_t data
,
1550 mbuf_t control
, sflt_data_flag_t flags
)
1552 int sosendflags
= 0;
1554 /* reject if this is a subflow socket */
1555 if (so
->so_flags
& SOF_MP_SUBFLOW
)
1558 if (flags
& sock_data_filt_flag_oob
)
1559 sosendflags
= MSG_OOB
;
1560 return (sosend(so
, (struct sockaddr
*)(uintptr_t)to
, NULL
,
1561 data
, control
, sosendflags
));
1565 sockopt_direction(sockopt_t sopt
)
1567 return ((sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
);
1571 sockopt_level(sockopt_t sopt
)
1573 return (sopt
->sopt_level
);
1577 sockopt_name(sockopt_t sopt
)
1579 return (sopt
->sopt_name
);
1583 sockopt_valsize(sockopt_t sopt
)
1585 return (sopt
->sopt_valsize
);
1589 sockopt_copyin(sockopt_t sopt
, void *data
, size_t len
)
1591 return (sooptcopyin(sopt
, data
, len
, len
));
1595 sockopt_copyout(sockopt_t sopt
, void *data
, size_t len
)
1597 return (sooptcopyout(sopt
, data
, len
));