]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <net/net_api_stats.h>
44 #include <netinet/in_var.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip_var.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_var.h>
49 #include <netinet/udp.h>
50 #include <netinet/udp_var.h>
51
52 #include <libkern/libkern.h>
53 #include <libkern/OSAtomic.h>
54 #include <os/refcnt.h>
55
56 #include <stdbool.h>
57 #include <string.h>
58
59 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
60 #define SFEF_NODETACH 0x2 /* Detach should not be called */
61 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
62
63 struct socket_filter_entry {
64 struct socket_filter_entry *sfe_next_onsocket;
65 struct socket_filter_entry *sfe_next_onfilter;
66 struct socket_filter_entry *sfe_next_oncleanup;
67
68 struct socket_filter *sfe_filter;
69 struct socket *sfe_socket;
70 void *sfe_cookie;
71
72 uint32_t sfe_flags;
73 int32_t sfe_refcount;
74 };
75
76 struct socket_filter {
77 TAILQ_ENTRY(socket_filter) sf_protosw_next;
78 TAILQ_ENTRY(socket_filter) sf_global_next;
79 struct socket_filter_entry *sf_entry_head;
80
81 struct protosw *sf_proto;
82 struct sflt_filter sf_filter;
83 struct os_refcnt sf_refcount;
84 };
85
86 TAILQ_HEAD(socket_filter_list, socket_filter);
87
88 static struct socket_filter_list sock_filter_head;
89 static lck_rw_t *sock_filter_lock = NULL;
90 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
91 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
92 static thread_t sock_filter_cleanup_thread = NULL;
93
94 static void sflt_cleanup_thread(void *, wait_result_t);
95 static void sflt_detach_locked(struct socket_filter_entry *entry);
96
97 #undef sflt_register
98 static errno_t sflt_register_common(const struct sflt_filter *filter, int domain,
99 int type, int protocol, bool is_internal);
100 errno_t sflt_register(const struct sflt_filter *filter, int domain,
101 int type, int protocol);
102
103
104 #pragma mark -- Internal State Management --
105
106 __private_extern__ int
107 sflt_permission_check(struct inpcb *inp)
108 {
109 /* Only IPv4 or IPv6 sockets can bypass filters */
110 if (!(inp->inp_vflag & INP_IPV4) &&
111 !(inp->inp_vflag & INP_IPV6)) {
112 return 0;
113 }
114 /* Sockets that have this entitlement bypass socket filters. */
115 if (INP_INTCOPROC_ALLOWED(inp)) {
116 return 1;
117 }
118 /* Sockets bound to an intcoproc interface bypass socket filters. */
119 if ((inp->inp_flags & INP_BOUND_IF) &&
120 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
121 return 1;
122 }
123 #if NECP
124 /*
125 * Make sure that the NECP policy is populated.
126 * If result is not populated, the policy ID will be
127 * NECP_KERNEL_POLICY_ID_NONE. Note that if the result
128 * is populated, but there was no match, it will be
129 * NECP_KERNEL_POLICY_ID_NO_MATCH.
130 * Do not call inp_update_necp_policy() to avoid scoping
131 * a socket prior to calls to bind().
132 */
133 if (inp->inp_policyresult.policy_id == NECP_KERNEL_POLICY_ID_NONE) {
134 necp_socket_find_policy_match(inp, NULL, NULL, 0);
135 }
136
137 /* If the filter unit is marked to be "no filter", bypass filters */
138 if (inp->inp_policyresult.results.filter_control_unit ==
139 NECP_FILTER_UNIT_NO_FILTER) {
140 return 1;
141 }
142 #endif /* NECP */
143 return 0;
144 }
145
146 __private_extern__ void
147 sflt_init(void)
148 {
149 lck_grp_attr_t *grp_attrib = NULL;
150 lck_attr_t *lck_attrib = NULL;
151 lck_grp_t *lck_group = NULL;
152
153 TAILQ_INIT(&sock_filter_head);
154
155 /* Allocate a rw lock */
156 grp_attrib = lck_grp_attr_alloc_init();
157 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
158 lck_grp_attr_free(grp_attrib);
159 lck_attrib = lck_attr_alloc_init();
160 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
161 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
162 lck_grp_free(lck_group);
163 lck_attr_free(lck_attrib);
164 }
165
166 static void
167 sflt_retain_locked(struct socket_filter *filter)
168 {
169 os_ref_retain_locked(&filter->sf_refcount);
170 }
171
172 static void
173 sflt_release_locked(struct socket_filter *filter)
174 {
175 if (os_ref_release_locked(&filter->sf_refcount) == 0) {
176 /* Call the unregistered function */
177 if (filter->sf_filter.sf_unregistered) {
178 lck_rw_unlock_exclusive(sock_filter_lock);
179 filter->sf_filter.sf_unregistered(
180 filter->sf_filter.sf_handle);
181 lck_rw_lock_exclusive(sock_filter_lock);
182 }
183
184 /* Free the entry */
185 FREE(filter, M_IFADDR);
186 }
187 }
188
189 static void
190 sflt_entry_retain(struct socket_filter_entry *entry)
191 {
192 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
193 panic("sflt_entry_retain - sfe_refcount <= 0\n");
194 /* NOTREACHED */
195 }
196 }
197
198 static void
199 sflt_entry_release(struct socket_filter_entry *entry)
200 {
201 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
202 if (old == 1) {
203 /* That was the last reference */
204
205 /* Take the cleanup lock */
206 lck_mtx_lock(sock_filter_cleanup_lock);
207
208 /* Put this item on the cleanup list */
209 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
210 sock_filter_cleanup_entries = entry;
211
212 /* If the item is the first item in the list */
213 if (entry->sfe_next_oncleanup == NULL) {
214 if (sock_filter_cleanup_thread == NULL) {
215 /* Create a thread */
216 kernel_thread_start(sflt_cleanup_thread,
217 NULL, &sock_filter_cleanup_thread);
218 } else {
219 /* Wakeup the thread */
220 wakeup(&sock_filter_cleanup_entries);
221 }
222 }
223
224 /* Drop the cleanup lock */
225 lck_mtx_unlock(sock_filter_cleanup_lock);
226 } else if (old <= 0) {
227 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
228 (int)old);
229 /* NOTREACHED */
230 }
231 }
232
233 __attribute__((noreturn))
234 static void
235 sflt_cleanup_thread(void *blah, wait_result_t blah2)
236 {
237 #pragma unused(blah, blah2)
238 while (1) {
239 lck_mtx_lock(sock_filter_cleanup_lock);
240 while (sock_filter_cleanup_entries == NULL) {
241 /* Sleep until we've got something better to do */
242 msleep(&sock_filter_cleanup_entries,
243 sock_filter_cleanup_lock, PWAIT,
244 "sflt_cleanup", NULL);
245 }
246
247 /* Pull the current list of dead items */
248 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
249 sock_filter_cleanup_entries = NULL;
250
251 /* Drop the lock */
252 lck_mtx_unlock(sock_filter_cleanup_lock);
253
254 /* Take the socket filter lock */
255 lck_rw_lock_exclusive(sock_filter_lock);
256
257 /* Cleanup every dead item */
258 struct socket_filter_entry *entry;
259 for (entry = dead; entry; entry = dead) {
260 struct socket_filter_entry **nextpp;
261
262 dead = entry->sfe_next_oncleanup;
263
264 /* Call detach function if necessary - drop the lock */
265 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
266 entry->sfe_filter->sf_filter.sf_detach) {
267 entry->sfe_flags |= SFEF_NODETACH;
268 lck_rw_unlock_exclusive(sock_filter_lock);
269
270 /*
271 * Warning - passing a potentially
272 * dead socket may be bad
273 */
274 entry->sfe_filter->sf_filter.sf_detach(
275 entry->sfe_cookie, entry->sfe_socket);
276
277 lck_rw_lock_exclusive(sock_filter_lock);
278 }
279
280 /*
281 * Pull entry off the socket list --
282 * if the socket still exists
283 */
284 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
285 for (nextpp = &entry->sfe_socket->so_filt;
286 *nextpp;
287 nextpp = &(*nextpp)->sfe_next_onsocket) {
288 if (*nextpp == entry) {
289 *nextpp =
290 entry->sfe_next_onsocket;
291 break;
292 }
293 }
294 }
295
296 /* Pull entry off the filter list */
297 for (nextpp = &entry->sfe_filter->sf_entry_head;
298 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
299 if (*nextpp == entry) {
300 *nextpp = entry->sfe_next_onfilter;
301 break;
302 }
303 }
304
305 /*
306 * Release the filter -- may drop lock, but that's okay
307 */
308 sflt_release_locked(entry->sfe_filter);
309 entry->sfe_socket = NULL;
310 entry->sfe_filter = NULL;
311 FREE(entry, M_IFADDR);
312 }
313
314 /* Drop the socket filter lock */
315 lck_rw_unlock_exclusive(sock_filter_lock);
316 }
317 /* NOTREACHED */
318 }
319
320 static int
321 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
322 int socklocked)
323 {
324 int error = 0;
325 struct socket_filter_entry *entry = NULL;
326
327 if (sflt_permission_check(sotoinpcb(so))) {
328 return 0;
329 }
330
331 if (filter == NULL) {
332 return ENOENT;
333 }
334
335 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
336 if (entry->sfe_filter->sf_filter.sf_handle ==
337 filter->sf_filter.sf_handle) {
338 return EEXIST;
339 }
340 }
341 /* allocate the socket filter entry */
342 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR,
343 M_WAITOK);
344 if (entry == NULL) {
345 return ENOMEM;
346 }
347
348 /* Initialize the socket filter entry */
349 entry->sfe_cookie = NULL;
350 entry->sfe_flags = SFEF_ATTACHED;
351 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
352
353 /* Put the entry in the filter list */
354 sflt_retain_locked(filter);
355 entry->sfe_filter = filter;
356 entry->sfe_next_onfilter = filter->sf_entry_head;
357 filter->sf_entry_head = entry;
358
359 /* Put the entry on the socket filter list */
360 entry->sfe_socket = so;
361 entry->sfe_next_onsocket = so->so_filt;
362 so->so_filt = entry;
363
364 if (entry->sfe_filter->sf_filter.sf_attach) {
365 /* Retain the entry while we call attach */
366 sflt_entry_retain(entry);
367
368 /*
369 * Release the filter lock --
370 * callers must be aware we will do this
371 */
372 lck_rw_unlock_exclusive(sock_filter_lock);
373
374 /* Unlock the socket */
375 if (socklocked) {
376 socket_unlock(so, 0);
377 }
378
379 /* It's finally safe to call the filter function */
380 error = entry->sfe_filter->sf_filter.sf_attach(
381 &entry->sfe_cookie, so);
382
383 /* Lock the socket again */
384 if (socklocked) {
385 socket_lock(so, 0);
386 }
387
388 /* Lock the filters again */
389 lck_rw_lock_exclusive(sock_filter_lock);
390
391 /*
392 * If the attach function returns an error,
393 * this filter must be detached
394 */
395 if (error) {
396 /* don't call sf_detach */
397 entry->sfe_flags |= SFEF_NODETACH;
398 sflt_detach_locked(entry);
399 }
400
401 /* Release the retain we held through the attach call */
402 sflt_entry_release(entry);
403 }
404
405 return error;
406 }
407
408 errno_t
409 sflt_attach_internal(socket_t socket, sflt_handle handle)
410 {
411 if (socket == NULL || handle == 0) {
412 return EINVAL;
413 }
414
415 int result = EINVAL;
416
417 lck_rw_lock_exclusive(sock_filter_lock);
418
419 struct socket_filter *filter = NULL;
420 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
421 if (filter->sf_filter.sf_handle == handle) {
422 break;
423 }
424 }
425
426 if (filter) {
427 result = sflt_attach_locked(socket, filter, 1);
428 }
429
430 lck_rw_unlock_exclusive(sock_filter_lock);
431
432 return result;
433 }
434
435 static void
436 sflt_detach_locked(struct socket_filter_entry *entry)
437 {
438 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
439 entry->sfe_flags &= ~SFEF_ATTACHED;
440 sflt_entry_release(entry);
441 }
442 }
443
444 #pragma mark -- Socket Layer Hooks --
445
446 __private_extern__ void
447 sflt_initsock(struct socket *so)
448 {
449 /*
450 * Point to the real protosw, as so_proto might have been
451 * pointed to a modified version.
452 */
453 struct protosw *proto = so->so_proto->pr_protosw;
454
455 lck_rw_lock_shared(sock_filter_lock);
456 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
457 /* Promote lock to exclusive */
458 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock)) {
459 lck_rw_lock_exclusive(sock_filter_lock);
460 }
461
462 /*
463 * Warning: A filter unregistering will be pulled out of
464 * the list. This could happen while we drop the lock in
465 * sftl_attach_locked or sflt_release_locked. For this
466 * reason we retain a reference on the filter (or next_filter)
467 * while calling this function. This protects us from a panic,
468 * but it could result in a socket being created without all
469 * of the global filters if we're attaching a filter as it
470 * is removed, if that's possible.
471 */
472 struct socket_filter *filter =
473 TAILQ_FIRST(&proto->pr_filter_head);
474
475 sflt_retain_locked(filter);
476
477 while (filter) {
478 struct socket_filter *filter_next;
479 /*
480 * Warning: sflt_attach_private_locked
481 * will drop the lock
482 */
483 sflt_attach_locked(so, filter, 0);
484
485 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
486 if (filter_next) {
487 sflt_retain_locked(filter_next);
488 }
489
490 /*
491 * Warning: filt_release_locked may remove
492 * the filter from the queue
493 */
494 sflt_release_locked(filter);
495 filter = filter_next;
496 }
497 }
498 lck_rw_done(sock_filter_lock);
499 }
500
501 /*
502 * sflt_termsock
503 *
504 * Detaches all filters from the socket.
505 */
506 __private_extern__ void
507 sflt_termsock(struct socket *so)
508 {
509 lck_rw_lock_exclusive(sock_filter_lock);
510
511 struct socket_filter_entry *entry;
512
513 while ((entry = so->so_filt) != NULL) {
514 /* Pull filter off the socket */
515 so->so_filt = entry->sfe_next_onsocket;
516 entry->sfe_flags |= SFEF_NOSOCKET;
517
518 /* Call detach */
519 sflt_detach_locked(entry);
520
521 /*
522 * On sflt_termsock, we can't return until the detach function
523 * has been called. Call the detach function - this is gross
524 * because the socket filter entry could be freed when we drop
525 * the lock, so we make copies on the stack and retain
526 * everything we need before dropping the lock.
527 */
528 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
529 entry->sfe_filter->sf_filter.sf_detach) {
530 void *sfe_cookie = entry->sfe_cookie;
531 struct socket_filter *sfe_filter = entry->sfe_filter;
532
533 /* Retain the socket filter */
534 sflt_retain_locked(sfe_filter);
535
536 /* Mark that we've called the detach function */
537 entry->sfe_flags |= SFEF_NODETACH;
538
539 /* Drop the lock before calling the detach function */
540 lck_rw_unlock_exclusive(sock_filter_lock);
541 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
542 lck_rw_lock_exclusive(sock_filter_lock);
543
544 /* Release the filter */
545 sflt_release_locked(sfe_filter);
546 }
547 }
548
549 lck_rw_unlock_exclusive(sock_filter_lock);
550 }
551
552
553 static void
554 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
555 sflt_handle handle)
556 {
557 if (so->so_filt == NULL) {
558 return;
559 }
560
561 struct socket_filter_entry *entry;
562 int unlocked = 0;
563
564 lck_rw_lock_shared(sock_filter_lock);
565 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
566 if ((entry->sfe_flags & SFEF_ATTACHED) &&
567 entry->sfe_filter->sf_filter.sf_notify &&
568 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
569 handle) || !handle)) {
570 /*
571 * Retain the filter entry and release
572 * the socket filter lock
573 */
574 sflt_entry_retain(entry);
575 lck_rw_unlock_shared(sock_filter_lock);
576
577 /* If the socket isn't already unlocked, unlock it */
578 if (unlocked == 0) {
579 unlocked = 1;
580 socket_unlock(so, 0);
581 }
582
583 /* Finally call the filter */
584 entry->sfe_filter->sf_filter.sf_notify(
585 entry->sfe_cookie, so, event, param);
586
587 /*
588 * Take the socket filter lock again
589 * and release the entry
590 */
591 lck_rw_lock_shared(sock_filter_lock);
592 sflt_entry_release(entry);
593 }
594 }
595 lck_rw_unlock_shared(sock_filter_lock);
596
597 if (unlocked != 0) {
598 socket_lock(so, 0);
599 }
600 }
601
602 __private_extern__ void
603 sflt_notify(struct socket *so, sflt_event_t event, void *param)
604 {
605 sflt_notify_internal(so, event, param, 0);
606 }
607
608 static void
609 sflt_notify_after_register(struct socket *so, sflt_event_t event,
610 sflt_handle handle)
611 {
612 sflt_notify_internal(so, event, NULL, handle);
613 }
614
615 __private_extern__ int
616 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
617 {
618 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
619 return 0;
620 }
621
622 struct socket_filter_entry *entry;
623 int unlocked = 0;
624 int error = 0;
625
626 lck_rw_lock_shared(sock_filter_lock);
627 for (entry = so->so_filt; entry && error == 0;
628 entry = entry->sfe_next_onsocket) {
629 if ((entry->sfe_flags & SFEF_ATTACHED) &&
630 entry->sfe_filter->sf_filter.sf_ioctl) {
631 /*
632 * Retain the filter entry and release
633 * the socket filter lock
634 */
635 sflt_entry_retain(entry);
636 lck_rw_unlock_shared(sock_filter_lock);
637
638 /* If the socket isn't already unlocked, unlock it */
639 if (unlocked == 0) {
640 socket_unlock(so, 0);
641 unlocked = 1;
642 }
643
644 /* Call the filter */
645 error = entry->sfe_filter->sf_filter.sf_ioctl(
646 entry->sfe_cookie, so, cmd, data);
647
648 /*
649 * Take the socket filter lock again
650 * and release the entry
651 */
652 lck_rw_lock_shared(sock_filter_lock);
653 sflt_entry_release(entry);
654 }
655 }
656 lck_rw_unlock_shared(sock_filter_lock);
657
658 if (unlocked) {
659 socket_lock(so, 0);
660 }
661
662 return error;
663 }
664
665 __private_extern__ int
666 sflt_bind(struct socket *so, const struct sockaddr *nam)
667 {
668 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
669 return 0;
670 }
671
672 struct socket_filter_entry *entry;
673 int unlocked = 0;
674 int error = 0;
675
676 lck_rw_lock_shared(sock_filter_lock);
677 for (entry = so->so_filt; entry && error == 0;
678 entry = entry->sfe_next_onsocket) {
679 if ((entry->sfe_flags & SFEF_ATTACHED) &&
680 entry->sfe_filter->sf_filter.sf_bind) {
681 /*
682 * Retain the filter entry and
683 * release the socket filter lock
684 */
685 sflt_entry_retain(entry);
686 lck_rw_unlock_shared(sock_filter_lock);
687
688 /* If the socket isn't already unlocked, unlock it */
689 if (unlocked == 0) {
690 socket_unlock(so, 0);
691 unlocked = 1;
692 }
693
694 /* Call the filter */
695 error = entry->sfe_filter->sf_filter.sf_bind(
696 entry->sfe_cookie, so, nam);
697
698 /*
699 * Take the socket filter lock again and
700 * release the entry
701 */
702 lck_rw_lock_shared(sock_filter_lock);
703 sflt_entry_release(entry);
704 }
705 }
706 lck_rw_unlock_shared(sock_filter_lock);
707
708 if (unlocked) {
709 socket_lock(so, 0);
710 }
711
712 return error;
713 }
714
715 __private_extern__ int
716 sflt_listen(struct socket *so)
717 {
718 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
719 return 0;
720 }
721
722 struct socket_filter_entry *entry;
723 int unlocked = 0;
724 int error = 0;
725
726 lck_rw_lock_shared(sock_filter_lock);
727 for (entry = so->so_filt; entry && error == 0;
728 entry = entry->sfe_next_onsocket) {
729 if ((entry->sfe_flags & SFEF_ATTACHED) &&
730 entry->sfe_filter->sf_filter.sf_listen) {
731 /*
732 * Retain the filter entry and release
733 * the socket filter lock
734 */
735 sflt_entry_retain(entry);
736 lck_rw_unlock_shared(sock_filter_lock);
737
738 /* If the socket isn't already unlocked, unlock it */
739 if (unlocked == 0) {
740 socket_unlock(so, 0);
741 unlocked = 1;
742 }
743
744 /* Call the filter */
745 error = entry->sfe_filter->sf_filter.sf_listen(
746 entry->sfe_cookie, so);
747
748 /*
749 * Take the socket filter lock again
750 * and release the entry
751 */
752 lck_rw_lock_shared(sock_filter_lock);
753 sflt_entry_release(entry);
754 }
755 }
756 lck_rw_unlock_shared(sock_filter_lock);
757
758 if (unlocked) {
759 socket_lock(so, 0);
760 }
761
762 return error;
763 }
764
765 __private_extern__ int
766 sflt_accept(struct socket *head, struct socket *so,
767 const struct sockaddr *local, const struct sockaddr *remote)
768 {
769 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
770 return 0;
771 }
772
773 struct socket_filter_entry *entry;
774 int unlocked = 0;
775 int error = 0;
776
777 lck_rw_lock_shared(sock_filter_lock);
778 for (entry = so->so_filt; entry && error == 0;
779 entry = entry->sfe_next_onsocket) {
780 if ((entry->sfe_flags & SFEF_ATTACHED) &&
781 entry->sfe_filter->sf_filter.sf_accept) {
782 /*
783 * Retain the filter entry and
784 * release the socket filter lock
785 */
786 sflt_entry_retain(entry);
787 lck_rw_unlock_shared(sock_filter_lock);
788
789 /* If the socket isn't already unlocked, unlock it */
790 if (unlocked == 0) {
791 socket_unlock(so, 0);
792 unlocked = 1;
793 }
794
795 /* Call the filter */
796 error = entry->sfe_filter->sf_filter.sf_accept(
797 entry->sfe_cookie, head, so, local, remote);
798
799 /*
800 * Take the socket filter lock again
801 * and release the entry
802 */
803 lck_rw_lock_shared(sock_filter_lock);
804 sflt_entry_release(entry);
805 }
806 }
807 lck_rw_unlock_shared(sock_filter_lock);
808
809 if (unlocked) {
810 socket_lock(so, 0);
811 }
812
813 return error;
814 }
815
816 __private_extern__ int
817 sflt_getsockname(struct socket *so, struct sockaddr **local)
818 {
819 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
820 return 0;
821 }
822
823 struct socket_filter_entry *entry;
824 int unlocked = 0;
825 int error = 0;
826
827 lck_rw_lock_shared(sock_filter_lock);
828 for (entry = so->so_filt; entry && error == 0;
829 entry = entry->sfe_next_onsocket) {
830 if ((entry->sfe_flags & SFEF_ATTACHED) &&
831 entry->sfe_filter->sf_filter.sf_getsockname) {
832 /*
833 * Retain the filter entry and
834 * release the socket filter lock
835 */
836 sflt_entry_retain(entry);
837 lck_rw_unlock_shared(sock_filter_lock);
838
839 /* If the socket isn't already unlocked, unlock it */
840 if (unlocked == 0) {
841 socket_unlock(so, 0);
842 unlocked = 1;
843 }
844
845 /* Call the filter */
846 error = entry->sfe_filter->sf_filter.sf_getsockname(
847 entry->sfe_cookie, so, local);
848
849 /*
850 * Take the socket filter lock again
851 * and release the entry
852 */
853 lck_rw_lock_shared(sock_filter_lock);
854 sflt_entry_release(entry);
855 }
856 }
857 lck_rw_unlock_shared(sock_filter_lock);
858
859 if (unlocked) {
860 socket_lock(so, 0);
861 }
862
863 return error;
864 }
865
866 __private_extern__ int
867 sflt_getpeername(struct socket *so, struct sockaddr **remote)
868 {
869 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
870 return 0;
871 }
872
873 struct socket_filter_entry *entry;
874 int unlocked = 0;
875 int error = 0;
876
877 lck_rw_lock_shared(sock_filter_lock);
878 for (entry = so->so_filt; entry && error == 0;
879 entry = entry->sfe_next_onsocket) {
880 if ((entry->sfe_flags & SFEF_ATTACHED) &&
881 entry->sfe_filter->sf_filter.sf_getpeername) {
882 /*
883 * Retain the filter entry and release
884 * the socket filter lock
885 */
886 sflt_entry_retain(entry);
887 lck_rw_unlock_shared(sock_filter_lock);
888
889 /* If the socket isn't already unlocked, unlock it */
890 if (unlocked == 0) {
891 socket_unlock(so, 0);
892 unlocked = 1;
893 }
894
895 /* Call the filter */
896 error = entry->sfe_filter->sf_filter.sf_getpeername(
897 entry->sfe_cookie, so, remote);
898
899 /*
900 * Take the socket filter lock again
901 * and release the entry
902 */
903 lck_rw_lock_shared(sock_filter_lock);
904 sflt_entry_release(entry);
905 }
906 }
907 lck_rw_unlock_shared(sock_filter_lock);
908
909 if (unlocked) {
910 socket_lock(so, 0);
911 }
912
913 return error;
914 }
915
916 __private_extern__ int
917 sflt_connectin(struct socket *so, const struct sockaddr *remote)
918 {
919 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
920 return 0;
921 }
922
923 struct socket_filter_entry *entry;
924 int unlocked = 0;
925 int error = 0;
926
927 lck_rw_lock_shared(sock_filter_lock);
928 for (entry = so->so_filt; entry && error == 0;
929 entry = entry->sfe_next_onsocket) {
930 if ((entry->sfe_flags & SFEF_ATTACHED) &&
931 entry->sfe_filter->sf_filter.sf_connect_in) {
932 /*
933 * Retain the filter entry and release
934 * the socket filter lock
935 */
936 sflt_entry_retain(entry);
937 lck_rw_unlock_shared(sock_filter_lock);
938
939 /* If the socket isn't already unlocked, unlock it */
940 if (unlocked == 0) {
941 socket_unlock(so, 0);
942 unlocked = 1;
943 }
944
945 /* Call the filter */
946 error = entry->sfe_filter->sf_filter.sf_connect_in(
947 entry->sfe_cookie, so, remote);
948
949 /*
950 * Take the socket filter lock again
951 * and release the entry
952 */
953 lck_rw_lock_shared(sock_filter_lock);
954 sflt_entry_release(entry);
955 }
956 }
957 lck_rw_unlock_shared(sock_filter_lock);
958
959 if (unlocked) {
960 socket_lock(so, 0);
961 }
962
963 return error;
964 }
965
966 static int
967 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
968 {
969 struct socket_filter_entry *entry;
970 int unlocked = 0;
971 int error = 0;
972
973 lck_rw_lock_shared(sock_filter_lock);
974 for (entry = so->so_filt; entry && error == 0;
975 entry = entry->sfe_next_onsocket) {
976 if ((entry->sfe_flags & SFEF_ATTACHED) &&
977 entry->sfe_filter->sf_filter.sf_connect_out) {
978 /*
979 * Retain the filter entry and release
980 * the socket filter lock
981 */
982 sflt_entry_retain(entry);
983 lck_rw_unlock_shared(sock_filter_lock);
984
985 /* If the socket isn't already unlocked, unlock it */
986 if (unlocked == 0) {
987 socket_unlock(so, 0);
988 unlocked = 1;
989 }
990
991 /* Call the filter */
992 error = entry->sfe_filter->sf_filter.sf_connect_out(
993 entry->sfe_cookie, so, nam);
994
995 /*
996 * Take the socket filter lock again
997 * and release the entry
998 */
999 lck_rw_lock_shared(sock_filter_lock);
1000 sflt_entry_release(entry);
1001 }
1002 }
1003 lck_rw_unlock_shared(sock_filter_lock);
1004
1005 if (unlocked) {
1006 socket_lock(so, 0);
1007 }
1008
1009 return error;
1010 }
1011
1012 __private_extern__ int
1013 sflt_connectout(struct socket *so, const struct sockaddr *nam)
1014 {
1015 char buf[SOCK_MAXADDRLEN];
1016 struct sockaddr *sa;
1017 int error;
1018
1019 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1020 return 0;
1021 }
1022
1023 /*
1024 * Workaround for rdar://23362120
1025 * Always pass a buffer that can hold an IPv6 socket address
1026 */
1027 bzero(buf, sizeof(buf));
1028 bcopy(nam, buf, nam->sa_len);
1029 sa = (struct sockaddr *)buf;
1030
1031 error = sflt_connectout_common(so, sa);
1032 if (error != 0) {
1033 return error;
1034 }
1035
1036 /*
1037 * If the address was modified, copy it back
1038 */
1039 if (bcmp(sa, nam, nam->sa_len) != 0) {
1040 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
1041 }
1042
1043 return 0;
1044 }
1045
1046 __private_extern__ int
1047 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1048 {
1049 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1050 return 0;
1051 }
1052
1053 struct socket_filter_entry *entry;
1054 int unlocked = 0;
1055 int error = 0;
1056
1057 lck_rw_lock_shared(sock_filter_lock);
1058 for (entry = so->so_filt; entry && error == 0;
1059 entry = entry->sfe_next_onsocket) {
1060 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1061 entry->sfe_filter->sf_filter.sf_setoption) {
1062 /*
1063 * Retain the filter entry and release
1064 * the socket filter lock
1065 */
1066 sflt_entry_retain(entry);
1067 lck_rw_unlock_shared(sock_filter_lock);
1068
1069 /* If the socket isn't already unlocked, unlock it */
1070 if (unlocked == 0) {
1071 socket_unlock(so, 0);
1072 unlocked = 1;
1073 }
1074
1075 /* Call the filter */
1076 error = entry->sfe_filter->sf_filter.sf_setoption(
1077 entry->sfe_cookie, so, sopt);
1078
1079 /*
1080 * Take the socket filter lock again
1081 * and release the entry
1082 */
1083 lck_rw_lock_shared(sock_filter_lock);
1084 sflt_entry_release(entry);
1085 }
1086 }
1087 lck_rw_unlock_shared(sock_filter_lock);
1088
1089 if (unlocked) {
1090 socket_lock(so, 0);
1091 }
1092
1093 return error;
1094 }
1095
1096 __private_extern__ int
1097 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1098 {
1099 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1100 return 0;
1101 }
1102
1103 struct socket_filter_entry *entry;
1104 int unlocked = 0;
1105 int error = 0;
1106
1107 lck_rw_lock_shared(sock_filter_lock);
1108 for (entry = so->so_filt; entry && error == 0;
1109 entry = entry->sfe_next_onsocket) {
1110 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1111 entry->sfe_filter->sf_filter.sf_getoption) {
1112 /*
1113 * Retain the filter entry and release
1114 * the socket filter lock
1115 */
1116 sflt_entry_retain(entry);
1117 lck_rw_unlock_shared(sock_filter_lock);
1118
1119 /* If the socket isn't already unlocked, unlock it */
1120 if (unlocked == 0) {
1121 socket_unlock(so, 0);
1122 unlocked = 1;
1123 }
1124
1125 /* Call the filter */
1126 error = entry->sfe_filter->sf_filter.sf_getoption(
1127 entry->sfe_cookie, so, sopt);
1128
1129 /*
1130 * Take the socket filter lock again
1131 * and release the entry
1132 */
1133 lck_rw_lock_shared(sock_filter_lock);
1134 sflt_entry_release(entry);
1135 }
1136 }
1137 lck_rw_unlock_shared(sock_filter_lock);
1138
1139 if (unlocked) {
1140 socket_lock(so, 0);
1141 }
1142
1143 return error;
1144 }
1145
1146 __private_extern__ int
1147 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1148 mbuf_t *control, sflt_data_flag_t flags)
1149 {
1150 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1151 return 0;
1152 }
1153
1154 struct socket_filter_entry *entry;
1155 int unlocked = 0;
1156 int setsendthread = 0;
1157 int error = 0;
1158
1159 lck_rw_lock_shared(sock_filter_lock);
1160 for (entry = so->so_filt; entry && error == 0;
1161 entry = entry->sfe_next_onsocket) {
1162 /* skip if this is a subflow socket */
1163 if (so->so_flags & SOF_MP_SUBFLOW) {
1164 continue;
1165 }
1166 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1167 entry->sfe_filter->sf_filter.sf_data_out) {
1168 /*
1169 * Retain the filter entry and
1170 * release the socket filter lock
1171 */
1172 sflt_entry_retain(entry);
1173 lck_rw_unlock_shared(sock_filter_lock);
1174
1175 /* If the socket isn't already unlocked, unlock it */
1176 if (unlocked == 0) {
1177 if (so->so_send_filt_thread == NULL) {
1178 setsendthread = 1;
1179 so->so_send_filt_thread =
1180 current_thread();
1181 }
1182 socket_unlock(so, 0);
1183 unlocked = 1;
1184 }
1185
1186 /* Call the filter */
1187 error = entry->sfe_filter->sf_filter.sf_data_out(
1188 entry->sfe_cookie, so, to, data, control, flags);
1189
1190 /*
1191 * Take the socket filter lock again
1192 * and release the entry
1193 */
1194 lck_rw_lock_shared(sock_filter_lock);
1195 sflt_entry_release(entry);
1196 }
1197 }
1198 lck_rw_unlock_shared(sock_filter_lock);
1199
1200 if (unlocked) {
1201 socket_lock(so, 0);
1202 if (setsendthread) {
1203 so->so_send_filt_thread = NULL;
1204 }
1205 }
1206
1207 return error;
1208 }
1209
1210 __private_extern__ int
1211 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1212 mbuf_t *control, sflt_data_flag_t flags)
1213 {
1214 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1215 return 0;
1216 }
1217
1218 struct socket_filter_entry *entry;
1219 int error = 0;
1220 int unlocked = 0;
1221
1222 lck_rw_lock_shared(sock_filter_lock);
1223
1224 for (entry = so->so_filt; entry && (error == 0);
1225 entry = entry->sfe_next_onsocket) {
1226 /* skip if this is a subflow socket */
1227 if (so->so_flags & SOF_MP_SUBFLOW) {
1228 continue;
1229 }
1230 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1231 entry->sfe_filter->sf_filter.sf_data_in) {
1232 /*
1233 * Retain the filter entry and
1234 * release the socket filter lock
1235 */
1236 sflt_entry_retain(entry);
1237 lck_rw_unlock_shared(sock_filter_lock);
1238
1239 /* If the socket isn't already unlocked, unlock it */
1240 if (unlocked == 0) {
1241 unlocked = 1;
1242 socket_unlock(so, 0);
1243 }
1244
1245 /* Call the filter */
1246 error = entry->sfe_filter->sf_filter.sf_data_in(
1247 entry->sfe_cookie, so, from, data, control, flags);
1248
1249 /*
1250 * Take the socket filter lock again
1251 * and release the entry
1252 */
1253 lck_rw_lock_shared(sock_filter_lock);
1254 sflt_entry_release(entry);
1255 }
1256 }
1257 lck_rw_unlock_shared(sock_filter_lock);
1258
1259 if (unlocked) {
1260 socket_lock(so, 0);
1261 }
1262
1263 return error;
1264 }
1265
1266 #pragma mark -- KPI --
1267
1268 errno_t
1269 sflt_attach(socket_t socket, sflt_handle handle)
1270 {
1271 socket_lock(socket, 1);
1272 errno_t result = sflt_attach_internal(socket, handle);
1273 socket_unlock(socket, 1);
1274 return result;
1275 }
1276
1277 errno_t
1278 sflt_detach(socket_t socket, sflt_handle handle)
1279 {
1280 struct socket_filter_entry *entry;
1281 errno_t result = 0;
1282
1283 if (socket == NULL || handle == 0) {
1284 return EINVAL;
1285 }
1286
1287 lck_rw_lock_exclusive(sock_filter_lock);
1288 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1289 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1290 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1291 break;
1292 }
1293 }
1294
1295 if (entry != NULL) {
1296 sflt_detach_locked(entry);
1297 }
1298 lck_rw_unlock_exclusive(sock_filter_lock);
1299
1300 return result;
1301 }
1302
1303 struct solist {
1304 struct solist *next;
1305 struct socket *so;
1306 };
1307
1308 static errno_t
1309 sflt_register_common(const struct sflt_filter *filter, int domain, int type,
1310 int protocol, bool is_internal)
1311 {
1312 struct socket_filter *sock_filt = NULL;
1313 struct socket_filter *match = NULL;
1314 int error = 0;
1315 struct protosw *pr;
1316 unsigned int len;
1317 struct socket *so;
1318 struct inpcb *inp;
1319 struct solist *solisthead = NULL, *solist = NULL;
1320
1321 if ((domain != PF_INET) && (domain != PF_INET6)) {
1322 return ENOTSUP;
1323 }
1324
1325 pr = pffindproto(domain, protocol, type);
1326 if (pr == NULL) {
1327 return ENOENT;
1328 }
1329
1330 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1331 filter->sf_handle == 0 || filter->sf_name == NULL) {
1332 return EINVAL;
1333 }
1334
1335 /* Allocate the socket filter */
1336 MALLOC(sock_filt, struct socket_filter *, sizeof(*sock_filt),
1337 M_IFADDR, M_WAITOK);
1338 if (sock_filt == NULL) {
1339 return ENOBUFS;
1340 }
1341
1342 bzero(sock_filt, sizeof(*sock_filt));
1343
1344 /* Legacy sflt_filter length; current structure minus extended */
1345 len = sizeof(*filter) - sizeof(struct sflt_filter_ext);
1346 /*
1347 * Include extended fields if filter defines SFLT_EXTENDED.
1348 * We've zeroed out our internal sflt_filter placeholder,
1349 * so any unused portion would have been taken care of.
1350 */
1351 if (filter->sf_flags & SFLT_EXTENDED) {
1352 unsigned int ext_len = filter->sf_len;
1353
1354 if (ext_len > sizeof(struct sflt_filter_ext)) {
1355 ext_len = sizeof(struct sflt_filter_ext);
1356 }
1357
1358 len += ext_len;
1359 }
1360 bcopy(filter, &sock_filt->sf_filter, len);
1361
1362 lck_rw_lock_exclusive(sock_filter_lock);
1363 /* Look for an existing entry */
1364 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1365 if (match->sf_filter.sf_handle ==
1366 sock_filt->sf_filter.sf_handle) {
1367 break;
1368 }
1369 }
1370
1371 /* Add the entry only if there was no existing entry */
1372 if (match == NULL) {
1373 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1374 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1375 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1376 sf_protosw_next);
1377 sock_filt->sf_proto = pr;
1378 }
1379 os_ref_init(&sock_filt->sf_refcount, NULL);
1380
1381 OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count);
1382 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total);
1383 if (is_internal) {
1384 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total);
1385 }
1386 }
1387 lck_rw_unlock_exclusive(sock_filter_lock);
1388
1389 if (match != NULL) {
1390 FREE(sock_filt, M_IFADDR);
1391 return EEXIST;
1392 }
1393
1394 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) {
1395 return error;
1396 }
1397
1398 /*
1399 * Setup the filter on the TCP and UDP sockets already created.
1400 */
1401 #define SOLIST_ADD(_so) do { \
1402 solist->next = solisthead; \
1403 sock_retain((_so)); \
1404 solist->so = (_so); \
1405 solisthead = solist; \
1406 } while (0)
1407 if (protocol == IPPROTO_TCP) {
1408 lck_rw_lock_shared(tcbinfo.ipi_lock);
1409 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1410 so = inp->inp_socket;
1411 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1412 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1413 (so->so_state & SS_NOFDREF)) ||
1414 !SOCK_CHECK_DOM(so, domain) ||
1415 !SOCK_CHECK_TYPE(so, type)) {
1416 continue;
1417 }
1418 MALLOC(solist, struct solist *, sizeof(*solist),
1419 M_IFADDR, M_NOWAIT);
1420 if (!solist) {
1421 continue;
1422 }
1423 SOLIST_ADD(so);
1424 }
1425 lck_rw_done(tcbinfo.ipi_lock);
1426 } else if (protocol == IPPROTO_UDP) {
1427 lck_rw_lock_shared(udbinfo.ipi_lock);
1428 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1429 so = inp->inp_socket;
1430 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1431 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1432 (so->so_state & SS_NOFDREF)) ||
1433 !SOCK_CHECK_DOM(so, domain) ||
1434 !SOCK_CHECK_TYPE(so, type)) {
1435 continue;
1436 }
1437 MALLOC(solist, struct solist *, sizeof(*solist),
1438 M_IFADDR, M_NOWAIT);
1439 if (!solist) {
1440 continue;
1441 }
1442 SOLIST_ADD(so);
1443 }
1444 lck_rw_done(udbinfo.ipi_lock);
1445 }
1446 /* XXX it's possible to walk the raw socket list as well */
1447 #undef SOLIST_ADD
1448
1449 while (solisthead) {
1450 sflt_handle handle = filter->sf_handle;
1451
1452 so = solisthead->so;
1453 socket_lock(so, 0);
1454 sflt_initsock(so);
1455 if (so->so_state & SS_ISCONNECTING) {
1456 sflt_notify_after_register(so, sock_evt_connecting,
1457 handle);
1458 } else if (so->so_state & SS_ISCONNECTED) {
1459 sflt_notify_after_register(so, sock_evt_connected,
1460 handle);
1461 } else if ((so->so_state &
1462 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) ==
1463 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) {
1464 sflt_notify_after_register(so, sock_evt_disconnecting,
1465 handle);
1466 } else if ((so->so_state &
1467 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) ==
1468 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) {
1469 sflt_notify_after_register(so, sock_evt_disconnected,
1470 handle);
1471 } else if (so->so_state & SS_CANTSENDMORE) {
1472 sflt_notify_after_register(so, sock_evt_cantsendmore,
1473 handle);
1474 } else if (so->so_state & SS_CANTRCVMORE) {
1475 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1476 handle);
1477 }
1478 socket_unlock(so, 0);
1479 /* XXX no easy way to post the sock_evt_closing event */
1480 sock_release(so);
1481 solist = solisthead;
1482 solisthead = solisthead->next;
1483 FREE(solist, M_IFADDR);
1484 }
1485
1486 return error;
1487 }
1488
1489 errno_t
1490 sflt_register_internal(const struct sflt_filter *filter, int domain, int type,
1491 int protocol)
1492 {
1493 return sflt_register_common(filter, domain, type, protocol, true);
1494 }
1495
1496 errno_t
1497 sflt_register(const struct sflt_filter *filter, int domain, int type,
1498 int protocol)
1499 {
1500 return sflt_register_common(filter, domain, type, protocol, false);
1501 }
1502
1503 errno_t
1504 sflt_unregister(sflt_handle handle)
1505 {
1506 struct socket_filter *filter;
1507 lck_rw_lock_exclusive(sock_filter_lock);
1508
1509 /* Find the entry by the handle */
1510 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1511 if (filter->sf_filter.sf_handle == handle) {
1512 break;
1513 }
1514 }
1515
1516 if (filter) {
1517 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0);
1518
1519 /* Remove it from the global list */
1520 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1521
1522 /* Remove it from the protosw list */
1523 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1524 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1525 filter, sf_protosw_next);
1526 }
1527
1528 /* Detach from any sockets */
1529 struct socket_filter_entry *entry = NULL;
1530
1531 for (entry = filter->sf_entry_head; entry;
1532 entry = entry->sfe_next_onfilter) {
1533 sflt_detach_locked(entry);
1534 }
1535
1536 /* Release the filter */
1537 sflt_release_locked(filter);
1538 }
1539
1540 lck_rw_unlock_exclusive(sock_filter_lock);
1541
1542 if (filter == NULL) {
1543 return ENOENT;
1544 }
1545
1546 return 0;
1547 }
1548
1549 errno_t
1550 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1551 mbuf_t control, sflt_data_flag_t flags)
1552 {
1553 int error = 0;
1554
1555 if (so == NULL || data == NULL) {
1556 return EINVAL;
1557 }
1558
1559 if (flags & sock_data_filt_flag_oob) {
1560 return ENOTSUP;
1561 }
1562
1563 socket_lock(so, 1);
1564
1565 /* reject if this is a subflow socket */
1566 if (so->so_flags & SOF_MP_SUBFLOW) {
1567 error = ENOTSUP;
1568 goto done;
1569 }
1570
1571 if (from) {
1572 if (sbappendaddr(&so->so_rcv,
1573 (struct sockaddr *)(uintptr_t)from, data, control, NULL)) {
1574 sorwakeup(so);
1575 }
1576 goto done;
1577 }
1578
1579 if (control) {
1580 if (sbappendcontrol(&so->so_rcv, data, control, NULL)) {
1581 sorwakeup(so);
1582 }
1583 goto done;
1584 }
1585
1586 if (flags & sock_data_filt_flag_record) {
1587 if (control || from) {
1588 error = EINVAL;
1589 goto done;
1590 }
1591 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) {
1592 sorwakeup(so);
1593 }
1594 goto done;
1595 }
1596
1597 if (sbappend(&so->so_rcv, data)) {
1598 sorwakeup(so);
1599 }
1600 done:
1601 socket_unlock(so, 1);
1602 return error;
1603 }
1604
1605 errno_t
1606 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1607 mbuf_t control, sflt_data_flag_t flags)
1608 {
1609 int sosendflags = 0;
1610
1611 /* reject if this is a subflow socket */
1612 if (so->so_flags & SOF_MP_SUBFLOW) {
1613 return ENOTSUP;
1614 }
1615
1616 if (flags & sock_data_filt_flag_oob) {
1617 sosendflags = MSG_OOB;
1618 }
1619 return sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1620 data, control, sosendflags);
1621 }
1622
1623 sockopt_dir
1624 sockopt_direction(sockopt_t sopt)
1625 {
1626 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
1627 }
1628
1629 int
1630 sockopt_level(sockopt_t sopt)
1631 {
1632 return sopt->sopt_level;
1633 }
1634
1635 int
1636 sockopt_name(sockopt_t sopt)
1637 {
1638 return sopt->sopt_name;
1639 }
1640
1641 size_t
1642 sockopt_valsize(sockopt_t sopt)
1643 {
1644 return sopt->sopt_valsize;
1645 }
1646
1647 errno_t
1648 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1649 {
1650 return sooptcopyin(sopt, data, len, len);
1651 }
1652
1653 errno_t
1654 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1655 {
1656 return sooptcopyout(sopt, data, len);
1657 }