]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
597045a88fb7767f974ac5a92187a9e19c056391
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <net/net_api_stats.h>
44 #include <netinet/in_var.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip_var.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_var.h>
49 #include <netinet/udp.h>
50 #include <netinet/udp_var.h>
51
52 #include <libkern/libkern.h>
53 #include <libkern/OSAtomic.h>
54 #include <os/refcnt.h>
55
56 #include <stdbool.h>
57 #include <string.h>
58
59 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
60 #define SFEF_NODETACH 0x2 /* Detach should not be called */
61 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
62
63 struct socket_filter_entry {
64 struct socket_filter_entry *sfe_next_onsocket;
65 struct socket_filter_entry *sfe_next_onfilter;
66 struct socket_filter_entry *sfe_next_oncleanup;
67
68 struct socket_filter *sfe_filter;
69 struct socket *sfe_socket;
70 void *sfe_cookie;
71
72 uint32_t sfe_flags;
73 int32_t sfe_refcount;
74 };
75
76 struct socket_filter {
77 TAILQ_ENTRY(socket_filter) sf_protosw_next;
78 TAILQ_ENTRY(socket_filter) sf_global_next;
79 struct socket_filter_entry *sf_entry_head;
80
81 struct protosw *sf_proto;
82 struct sflt_filter sf_filter;
83 struct os_refcnt sf_refcount;
84 };
85
86 TAILQ_HEAD(socket_filter_list, socket_filter);
87
88 static struct socket_filter_list sock_filter_head;
89 static lck_rw_t *sock_filter_lock = NULL;
90 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
91 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
92 static thread_t sock_filter_cleanup_thread = NULL;
93
94 static void sflt_cleanup_thread(void *, wait_result_t);
95 static void sflt_detach_locked(struct socket_filter_entry *entry);
96
97 #undef sflt_register
98 static errno_t sflt_register_common(const struct sflt_filter *filter, int domain,
99 int type, int protocol, bool is_internal);
100 errno_t sflt_register(const struct sflt_filter *filter, int domain,
101 int type, int protocol);
102
103
104 #pragma mark -- Internal State Management --
105
106 __private_extern__ int
107 sflt_permission_check(struct inpcb *inp)
108 {
109 /*
110 * All these permissions only apply to the co-processor interface,
111 * so ignore IPv4.
112 */
113 if (!(inp->inp_vflag & INP_IPV6)) {
114 return 0;
115 }
116 /* Sockets that have this entitlement bypass socket filters. */
117 if (INP_INTCOPROC_ALLOWED(inp)) {
118 return 1;
119 }
120 if ((inp->inp_flags & INP_BOUND_IF) &&
121 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
122 return 1;
123 }
124 return 0;
125 }
126
127 __private_extern__ void
128 sflt_init(void)
129 {
130 lck_grp_attr_t *grp_attrib = NULL;
131 lck_attr_t *lck_attrib = NULL;
132 lck_grp_t *lck_group = NULL;
133
134 TAILQ_INIT(&sock_filter_head);
135
136 /* Allocate a rw lock */
137 grp_attrib = lck_grp_attr_alloc_init();
138 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
139 lck_grp_attr_free(grp_attrib);
140 lck_attrib = lck_attr_alloc_init();
141 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
142 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
143 lck_grp_free(lck_group);
144 lck_attr_free(lck_attrib);
145 }
146
147 static void
148 sflt_retain_locked(struct socket_filter *filter)
149 {
150 os_ref_retain_locked(&filter->sf_refcount);
151 }
152
153 static void
154 sflt_release_locked(struct socket_filter *filter)
155 {
156 if (os_ref_release_locked(&filter->sf_refcount) == 0) {
157 /* Call the unregistered function */
158 if (filter->sf_filter.sf_unregistered) {
159 lck_rw_unlock_exclusive(sock_filter_lock);
160 filter->sf_filter.sf_unregistered(
161 filter->sf_filter.sf_handle);
162 lck_rw_lock_exclusive(sock_filter_lock);
163 }
164
165 /* Free the entry */
166 FREE(filter, M_IFADDR);
167 }
168 }
169
170 static void
171 sflt_entry_retain(struct socket_filter_entry *entry)
172 {
173 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
174 panic("sflt_entry_retain - sfe_refcount <= 0\n");
175 /* NOTREACHED */
176 }
177 }
178
179 static void
180 sflt_entry_release(struct socket_filter_entry *entry)
181 {
182 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
183 if (old == 1) {
184 /* That was the last reference */
185
186 /* Take the cleanup lock */
187 lck_mtx_lock(sock_filter_cleanup_lock);
188
189 /* Put this item on the cleanup list */
190 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
191 sock_filter_cleanup_entries = entry;
192
193 /* If the item is the first item in the list */
194 if (entry->sfe_next_oncleanup == NULL) {
195 if (sock_filter_cleanup_thread == NULL) {
196 /* Create a thread */
197 kernel_thread_start(sflt_cleanup_thread,
198 NULL, &sock_filter_cleanup_thread);
199 } else {
200 /* Wakeup the thread */
201 wakeup(&sock_filter_cleanup_entries);
202 }
203 }
204
205 /* Drop the cleanup lock */
206 lck_mtx_unlock(sock_filter_cleanup_lock);
207 } else if (old <= 0) {
208 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
209 (int)old);
210 /* NOTREACHED */
211 }
212 }
213
214 __attribute__((noreturn))
215 static void
216 sflt_cleanup_thread(void *blah, wait_result_t blah2)
217 {
218 #pragma unused(blah, blah2)
219 while (1) {
220 lck_mtx_lock(sock_filter_cleanup_lock);
221 while (sock_filter_cleanup_entries == NULL) {
222 /* Sleep until we've got something better to do */
223 msleep(&sock_filter_cleanup_entries,
224 sock_filter_cleanup_lock, PWAIT,
225 "sflt_cleanup", NULL);
226 }
227
228 /* Pull the current list of dead items */
229 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
230 sock_filter_cleanup_entries = NULL;
231
232 /* Drop the lock */
233 lck_mtx_unlock(sock_filter_cleanup_lock);
234
235 /* Take the socket filter lock */
236 lck_rw_lock_exclusive(sock_filter_lock);
237
238 /* Cleanup every dead item */
239 struct socket_filter_entry *entry;
240 for (entry = dead; entry; entry = dead) {
241 struct socket_filter_entry **nextpp;
242
243 dead = entry->sfe_next_oncleanup;
244
245 /* Call detach function if necessary - drop the lock */
246 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
247 entry->sfe_filter->sf_filter.sf_detach) {
248 entry->sfe_flags |= SFEF_NODETACH;
249 lck_rw_unlock_exclusive(sock_filter_lock);
250
251 /*
252 * Warning - passing a potentially
253 * dead socket may be bad
254 */
255 entry->sfe_filter->sf_filter.sf_detach(
256 entry->sfe_cookie, entry->sfe_socket);
257
258 lck_rw_lock_exclusive(sock_filter_lock);
259 }
260
261 /*
262 * Pull entry off the socket list --
263 * if the socket still exists
264 */
265 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
266 for (nextpp = &entry->sfe_socket->so_filt;
267 *nextpp;
268 nextpp = &(*nextpp)->sfe_next_onsocket) {
269 if (*nextpp == entry) {
270 *nextpp =
271 entry->sfe_next_onsocket;
272 break;
273 }
274 }
275 }
276
277 /* Pull entry off the filter list */
278 for (nextpp = &entry->sfe_filter->sf_entry_head;
279 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
280 if (*nextpp == entry) {
281 *nextpp = entry->sfe_next_onfilter;
282 break;
283 }
284 }
285
286 /*
287 * Release the filter -- may drop lock, but that's okay
288 */
289 sflt_release_locked(entry->sfe_filter);
290 entry->sfe_socket = NULL;
291 entry->sfe_filter = NULL;
292 FREE(entry, M_IFADDR);
293 }
294
295 /* Drop the socket filter lock */
296 lck_rw_unlock_exclusive(sock_filter_lock);
297 }
298 /* NOTREACHED */
299 }
300
301 static int
302 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
303 int socklocked)
304 {
305 int error = 0;
306 struct socket_filter_entry *entry = NULL;
307
308 if (sflt_permission_check(sotoinpcb(so))) {
309 return 0;
310 }
311
312 if (filter == NULL) {
313 return ENOENT;
314 }
315
316 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
317 if (entry->sfe_filter->sf_filter.sf_handle ==
318 filter->sf_filter.sf_handle) {
319 return EEXIST;
320 }
321 }
322 /* allocate the socket filter entry */
323 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR,
324 M_WAITOK);
325 if (entry == NULL) {
326 return ENOMEM;
327 }
328
329 /* Initialize the socket filter entry */
330 entry->sfe_cookie = NULL;
331 entry->sfe_flags = SFEF_ATTACHED;
332 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
333
334 /* Put the entry in the filter list */
335 sflt_retain_locked(filter);
336 entry->sfe_filter = filter;
337 entry->sfe_next_onfilter = filter->sf_entry_head;
338 filter->sf_entry_head = entry;
339
340 /* Put the entry on the socket filter list */
341 entry->sfe_socket = so;
342 entry->sfe_next_onsocket = so->so_filt;
343 so->so_filt = entry;
344
345 if (entry->sfe_filter->sf_filter.sf_attach) {
346 /* Retain the entry while we call attach */
347 sflt_entry_retain(entry);
348
349 /*
350 * Release the filter lock --
351 * callers must be aware we will do this
352 */
353 lck_rw_unlock_exclusive(sock_filter_lock);
354
355 /* Unlock the socket */
356 if (socklocked) {
357 socket_unlock(so, 0);
358 }
359
360 /* It's finally safe to call the filter function */
361 error = entry->sfe_filter->sf_filter.sf_attach(
362 &entry->sfe_cookie, so);
363
364 /* Lock the socket again */
365 if (socklocked) {
366 socket_lock(so, 0);
367 }
368
369 /* Lock the filters again */
370 lck_rw_lock_exclusive(sock_filter_lock);
371
372 /*
373 * If the attach function returns an error,
374 * this filter must be detached
375 */
376 if (error) {
377 /* don't call sf_detach */
378 entry->sfe_flags |= SFEF_NODETACH;
379 sflt_detach_locked(entry);
380 }
381
382 /* Release the retain we held through the attach call */
383 sflt_entry_release(entry);
384 }
385
386 return error;
387 }
388
389 errno_t
390 sflt_attach_internal(socket_t socket, sflt_handle handle)
391 {
392 if (socket == NULL || handle == 0) {
393 return EINVAL;
394 }
395
396 int result = EINVAL;
397
398 lck_rw_lock_exclusive(sock_filter_lock);
399
400 struct socket_filter *filter = NULL;
401 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
402 if (filter->sf_filter.sf_handle == handle) {
403 break;
404 }
405 }
406
407 if (filter) {
408 result = sflt_attach_locked(socket, filter, 1);
409 }
410
411 lck_rw_unlock_exclusive(sock_filter_lock);
412
413 return result;
414 }
415
416 static void
417 sflt_detach_locked(struct socket_filter_entry *entry)
418 {
419 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
420 entry->sfe_flags &= ~SFEF_ATTACHED;
421 sflt_entry_release(entry);
422 }
423 }
424
425 #pragma mark -- Socket Layer Hooks --
426
427 __private_extern__ void
428 sflt_initsock(struct socket *so)
429 {
430 /*
431 * Point to the real protosw, as so_proto might have been
432 * pointed to a modified version.
433 */
434 struct protosw *proto = so->so_proto->pr_protosw;
435
436 lck_rw_lock_shared(sock_filter_lock);
437 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
438 /* Promote lock to exclusive */
439 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock)) {
440 lck_rw_lock_exclusive(sock_filter_lock);
441 }
442
443 /*
444 * Warning: A filter unregistering will be pulled out of
445 * the list. This could happen while we drop the lock in
446 * sftl_attach_locked or sflt_release_locked. For this
447 * reason we retain a reference on the filter (or next_filter)
448 * while calling this function. This protects us from a panic,
449 * but it could result in a socket being created without all
450 * of the global filters if we're attaching a filter as it
451 * is removed, if that's possible.
452 */
453 struct socket_filter *filter =
454 TAILQ_FIRST(&proto->pr_filter_head);
455
456 sflt_retain_locked(filter);
457
458 while (filter) {
459 struct socket_filter *filter_next;
460 /*
461 * Warning: sflt_attach_private_locked
462 * will drop the lock
463 */
464 sflt_attach_locked(so, filter, 0);
465
466 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
467 if (filter_next) {
468 sflt_retain_locked(filter_next);
469 }
470
471 /*
472 * Warning: filt_release_locked may remove
473 * the filter from the queue
474 */
475 sflt_release_locked(filter);
476 filter = filter_next;
477 }
478 }
479 lck_rw_done(sock_filter_lock);
480 }
481
482 /*
483 * sflt_termsock
484 *
485 * Detaches all filters from the socket.
486 */
487 __private_extern__ void
488 sflt_termsock(struct socket *so)
489 {
490 lck_rw_lock_exclusive(sock_filter_lock);
491
492 struct socket_filter_entry *entry;
493
494 while ((entry = so->so_filt) != NULL) {
495 /* Pull filter off the socket */
496 so->so_filt = entry->sfe_next_onsocket;
497 entry->sfe_flags |= SFEF_NOSOCKET;
498
499 /* Call detach */
500 sflt_detach_locked(entry);
501
502 /*
503 * On sflt_termsock, we can't return until the detach function
504 * has been called. Call the detach function - this is gross
505 * because the socket filter entry could be freed when we drop
506 * the lock, so we make copies on the stack and retain
507 * everything we need before dropping the lock.
508 */
509 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
510 entry->sfe_filter->sf_filter.sf_detach) {
511 void *sfe_cookie = entry->sfe_cookie;
512 struct socket_filter *sfe_filter = entry->sfe_filter;
513
514 /* Retain the socket filter */
515 sflt_retain_locked(sfe_filter);
516
517 /* Mark that we've called the detach function */
518 entry->sfe_flags |= SFEF_NODETACH;
519
520 /* Drop the lock before calling the detach function */
521 lck_rw_unlock_exclusive(sock_filter_lock);
522 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
523 lck_rw_lock_exclusive(sock_filter_lock);
524
525 /* Release the filter */
526 sflt_release_locked(sfe_filter);
527 }
528 }
529
530 lck_rw_unlock_exclusive(sock_filter_lock);
531 }
532
533
534 static void
535 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
536 sflt_handle handle)
537 {
538 if (so->so_filt == NULL) {
539 return;
540 }
541
542 struct socket_filter_entry *entry;
543 int unlocked = 0;
544
545 lck_rw_lock_shared(sock_filter_lock);
546 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
547 if ((entry->sfe_flags & SFEF_ATTACHED) &&
548 entry->sfe_filter->sf_filter.sf_notify &&
549 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
550 handle) || !handle)) {
551 /*
552 * Retain the filter entry and release
553 * the socket filter lock
554 */
555 sflt_entry_retain(entry);
556 lck_rw_unlock_shared(sock_filter_lock);
557
558 /* If the socket isn't already unlocked, unlock it */
559 if (unlocked == 0) {
560 unlocked = 1;
561 socket_unlock(so, 0);
562 }
563
564 /* Finally call the filter */
565 entry->sfe_filter->sf_filter.sf_notify(
566 entry->sfe_cookie, so, event, param);
567
568 /*
569 * Take the socket filter lock again
570 * and release the entry
571 */
572 lck_rw_lock_shared(sock_filter_lock);
573 sflt_entry_release(entry);
574 }
575 }
576 lck_rw_unlock_shared(sock_filter_lock);
577
578 if (unlocked != 0) {
579 socket_lock(so, 0);
580 }
581 }
582
583 __private_extern__ void
584 sflt_notify(struct socket *so, sflt_event_t event, void *param)
585 {
586 sflt_notify_internal(so, event, param, 0);
587 }
588
589 static void
590 sflt_notify_after_register(struct socket *so, sflt_event_t event,
591 sflt_handle handle)
592 {
593 sflt_notify_internal(so, event, NULL, handle);
594 }
595
596 __private_extern__ int
597 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
598 {
599 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
600 return 0;
601 }
602
603 struct socket_filter_entry *entry;
604 int unlocked = 0;
605 int error = 0;
606
607 lck_rw_lock_shared(sock_filter_lock);
608 for (entry = so->so_filt; entry && error == 0;
609 entry = entry->sfe_next_onsocket) {
610 if ((entry->sfe_flags & SFEF_ATTACHED) &&
611 entry->sfe_filter->sf_filter.sf_ioctl) {
612 /*
613 * Retain the filter entry and release
614 * the socket filter lock
615 */
616 sflt_entry_retain(entry);
617 lck_rw_unlock_shared(sock_filter_lock);
618
619 /* If the socket isn't already unlocked, unlock it */
620 if (unlocked == 0) {
621 socket_unlock(so, 0);
622 unlocked = 1;
623 }
624
625 /* Call the filter */
626 error = entry->sfe_filter->sf_filter.sf_ioctl(
627 entry->sfe_cookie, so, cmd, data);
628
629 /*
630 * Take the socket filter lock again
631 * and release the entry
632 */
633 lck_rw_lock_shared(sock_filter_lock);
634 sflt_entry_release(entry);
635 }
636 }
637 lck_rw_unlock_shared(sock_filter_lock);
638
639 if (unlocked) {
640 socket_lock(so, 0);
641 }
642
643 return error;
644 }
645
646 __private_extern__ int
647 sflt_bind(struct socket *so, const struct sockaddr *nam)
648 {
649 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
650 return 0;
651 }
652
653 struct socket_filter_entry *entry;
654 int unlocked = 0;
655 int error = 0;
656
657 lck_rw_lock_shared(sock_filter_lock);
658 for (entry = so->so_filt; entry && error == 0;
659 entry = entry->sfe_next_onsocket) {
660 if ((entry->sfe_flags & SFEF_ATTACHED) &&
661 entry->sfe_filter->sf_filter.sf_bind) {
662 /*
663 * Retain the filter entry and
664 * release the socket filter lock
665 */
666 sflt_entry_retain(entry);
667 lck_rw_unlock_shared(sock_filter_lock);
668
669 /* If the socket isn't already unlocked, unlock it */
670 if (unlocked == 0) {
671 socket_unlock(so, 0);
672 unlocked = 1;
673 }
674
675 /* Call the filter */
676 error = entry->sfe_filter->sf_filter.sf_bind(
677 entry->sfe_cookie, so, nam);
678
679 /*
680 * Take the socket filter lock again and
681 * release the entry
682 */
683 lck_rw_lock_shared(sock_filter_lock);
684 sflt_entry_release(entry);
685 }
686 }
687 lck_rw_unlock_shared(sock_filter_lock);
688
689 if (unlocked) {
690 socket_lock(so, 0);
691 }
692
693 return error;
694 }
695
696 __private_extern__ int
697 sflt_listen(struct socket *so)
698 {
699 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
700 return 0;
701 }
702
703 struct socket_filter_entry *entry;
704 int unlocked = 0;
705 int error = 0;
706
707 lck_rw_lock_shared(sock_filter_lock);
708 for (entry = so->so_filt; entry && error == 0;
709 entry = entry->sfe_next_onsocket) {
710 if ((entry->sfe_flags & SFEF_ATTACHED) &&
711 entry->sfe_filter->sf_filter.sf_listen) {
712 /*
713 * Retain the filter entry and release
714 * the socket filter lock
715 */
716 sflt_entry_retain(entry);
717 lck_rw_unlock_shared(sock_filter_lock);
718
719 /* If the socket isn't already unlocked, unlock it */
720 if (unlocked == 0) {
721 socket_unlock(so, 0);
722 unlocked = 1;
723 }
724
725 /* Call the filter */
726 error = entry->sfe_filter->sf_filter.sf_listen(
727 entry->sfe_cookie, so);
728
729 /*
730 * Take the socket filter lock again
731 * and release the entry
732 */
733 lck_rw_lock_shared(sock_filter_lock);
734 sflt_entry_release(entry);
735 }
736 }
737 lck_rw_unlock_shared(sock_filter_lock);
738
739 if (unlocked) {
740 socket_lock(so, 0);
741 }
742
743 return error;
744 }
745
746 __private_extern__ int
747 sflt_accept(struct socket *head, struct socket *so,
748 const struct sockaddr *local, const struct sockaddr *remote)
749 {
750 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
751 return 0;
752 }
753
754 struct socket_filter_entry *entry;
755 int unlocked = 0;
756 int error = 0;
757
758 lck_rw_lock_shared(sock_filter_lock);
759 for (entry = so->so_filt; entry && error == 0;
760 entry = entry->sfe_next_onsocket) {
761 if ((entry->sfe_flags & SFEF_ATTACHED) &&
762 entry->sfe_filter->sf_filter.sf_accept) {
763 /*
764 * Retain the filter entry and
765 * release the socket filter lock
766 */
767 sflt_entry_retain(entry);
768 lck_rw_unlock_shared(sock_filter_lock);
769
770 /* If the socket isn't already unlocked, unlock it */
771 if (unlocked == 0) {
772 socket_unlock(so, 0);
773 unlocked = 1;
774 }
775
776 /* Call the filter */
777 error = entry->sfe_filter->sf_filter.sf_accept(
778 entry->sfe_cookie, head, so, local, remote);
779
780 /*
781 * Take the socket filter lock again
782 * and release the entry
783 */
784 lck_rw_lock_shared(sock_filter_lock);
785 sflt_entry_release(entry);
786 }
787 }
788 lck_rw_unlock_shared(sock_filter_lock);
789
790 if (unlocked) {
791 socket_lock(so, 0);
792 }
793
794 return error;
795 }
796
797 __private_extern__ int
798 sflt_getsockname(struct socket *so, struct sockaddr **local)
799 {
800 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
801 return 0;
802 }
803
804 struct socket_filter_entry *entry;
805 int unlocked = 0;
806 int error = 0;
807
808 lck_rw_lock_shared(sock_filter_lock);
809 for (entry = so->so_filt; entry && error == 0;
810 entry = entry->sfe_next_onsocket) {
811 if ((entry->sfe_flags & SFEF_ATTACHED) &&
812 entry->sfe_filter->sf_filter.sf_getsockname) {
813 /*
814 * Retain the filter entry and
815 * release the socket filter lock
816 */
817 sflt_entry_retain(entry);
818 lck_rw_unlock_shared(sock_filter_lock);
819
820 /* If the socket isn't already unlocked, unlock it */
821 if (unlocked == 0) {
822 socket_unlock(so, 0);
823 unlocked = 1;
824 }
825
826 /* Call the filter */
827 error = entry->sfe_filter->sf_filter.sf_getsockname(
828 entry->sfe_cookie, so, local);
829
830 /*
831 * Take the socket filter lock again
832 * and release the entry
833 */
834 lck_rw_lock_shared(sock_filter_lock);
835 sflt_entry_release(entry);
836 }
837 }
838 lck_rw_unlock_shared(sock_filter_lock);
839
840 if (unlocked) {
841 socket_lock(so, 0);
842 }
843
844 return error;
845 }
846
847 __private_extern__ int
848 sflt_getpeername(struct socket *so, struct sockaddr **remote)
849 {
850 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
851 return 0;
852 }
853
854 struct socket_filter_entry *entry;
855 int unlocked = 0;
856 int error = 0;
857
858 lck_rw_lock_shared(sock_filter_lock);
859 for (entry = so->so_filt; entry && error == 0;
860 entry = entry->sfe_next_onsocket) {
861 if ((entry->sfe_flags & SFEF_ATTACHED) &&
862 entry->sfe_filter->sf_filter.sf_getpeername) {
863 /*
864 * Retain the filter entry and release
865 * the socket filter lock
866 */
867 sflt_entry_retain(entry);
868 lck_rw_unlock_shared(sock_filter_lock);
869
870 /* If the socket isn't already unlocked, unlock it */
871 if (unlocked == 0) {
872 socket_unlock(so, 0);
873 unlocked = 1;
874 }
875
876 /* Call the filter */
877 error = entry->sfe_filter->sf_filter.sf_getpeername(
878 entry->sfe_cookie, so, remote);
879
880 /*
881 * Take the socket filter lock again
882 * and release the entry
883 */
884 lck_rw_lock_shared(sock_filter_lock);
885 sflt_entry_release(entry);
886 }
887 }
888 lck_rw_unlock_shared(sock_filter_lock);
889
890 if (unlocked) {
891 socket_lock(so, 0);
892 }
893
894 return error;
895 }
896
897 __private_extern__ int
898 sflt_connectin(struct socket *so, const struct sockaddr *remote)
899 {
900 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
901 return 0;
902 }
903
904 struct socket_filter_entry *entry;
905 int unlocked = 0;
906 int error = 0;
907
908 lck_rw_lock_shared(sock_filter_lock);
909 for (entry = so->so_filt; entry && error == 0;
910 entry = entry->sfe_next_onsocket) {
911 if ((entry->sfe_flags & SFEF_ATTACHED) &&
912 entry->sfe_filter->sf_filter.sf_connect_in) {
913 /*
914 * Retain the filter entry and release
915 * the socket filter lock
916 */
917 sflt_entry_retain(entry);
918 lck_rw_unlock_shared(sock_filter_lock);
919
920 /* If the socket isn't already unlocked, unlock it */
921 if (unlocked == 0) {
922 socket_unlock(so, 0);
923 unlocked = 1;
924 }
925
926 /* Call the filter */
927 error = entry->sfe_filter->sf_filter.sf_connect_in(
928 entry->sfe_cookie, so, remote);
929
930 /*
931 * Take the socket filter lock again
932 * and release the entry
933 */
934 lck_rw_lock_shared(sock_filter_lock);
935 sflt_entry_release(entry);
936 }
937 }
938 lck_rw_unlock_shared(sock_filter_lock);
939
940 if (unlocked) {
941 socket_lock(so, 0);
942 }
943
944 return error;
945 }
946
947 static int
948 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
949 {
950 struct socket_filter_entry *entry;
951 int unlocked = 0;
952 int error = 0;
953
954 lck_rw_lock_shared(sock_filter_lock);
955 for (entry = so->so_filt; entry && error == 0;
956 entry = entry->sfe_next_onsocket) {
957 if ((entry->sfe_flags & SFEF_ATTACHED) &&
958 entry->sfe_filter->sf_filter.sf_connect_out) {
959 /*
960 * Retain the filter entry and release
961 * the socket filter lock
962 */
963 sflt_entry_retain(entry);
964 lck_rw_unlock_shared(sock_filter_lock);
965
966 /* If the socket isn't already unlocked, unlock it */
967 if (unlocked == 0) {
968 socket_unlock(so, 0);
969 unlocked = 1;
970 }
971
972 /* Call the filter */
973 error = entry->sfe_filter->sf_filter.sf_connect_out(
974 entry->sfe_cookie, so, nam);
975
976 /*
977 * Take the socket filter lock again
978 * and release the entry
979 */
980 lck_rw_lock_shared(sock_filter_lock);
981 sflt_entry_release(entry);
982 }
983 }
984 lck_rw_unlock_shared(sock_filter_lock);
985
986 if (unlocked) {
987 socket_lock(so, 0);
988 }
989
990 return error;
991 }
992
993 __private_extern__ int
994 sflt_connectout(struct socket *so, const struct sockaddr *nam)
995 {
996 char buf[SOCK_MAXADDRLEN];
997 struct sockaddr *sa;
998 int error;
999
1000 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1001 return 0;
1002 }
1003
1004 /*
1005 * Workaround for rdar://23362120
1006 * Always pass a buffer that can hold an IPv6 socket address
1007 */
1008 bzero(buf, sizeof(buf));
1009 bcopy(nam, buf, nam->sa_len);
1010 sa = (struct sockaddr *)buf;
1011
1012 error = sflt_connectout_common(so, sa);
1013 if (error != 0) {
1014 return error;
1015 }
1016
1017 /*
1018 * If the address was modified, copy it back
1019 */
1020 if (bcmp(sa, nam, nam->sa_len) != 0) {
1021 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
1022 }
1023
1024 return 0;
1025 }
1026
1027 __private_extern__ int
1028 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1029 {
1030 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1031 return 0;
1032 }
1033
1034 struct socket_filter_entry *entry;
1035 int unlocked = 0;
1036 int error = 0;
1037
1038 lck_rw_lock_shared(sock_filter_lock);
1039 for (entry = so->so_filt; entry && error == 0;
1040 entry = entry->sfe_next_onsocket) {
1041 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1042 entry->sfe_filter->sf_filter.sf_setoption) {
1043 /*
1044 * Retain the filter entry and release
1045 * the socket filter lock
1046 */
1047 sflt_entry_retain(entry);
1048 lck_rw_unlock_shared(sock_filter_lock);
1049
1050 /* If the socket isn't already unlocked, unlock it */
1051 if (unlocked == 0) {
1052 socket_unlock(so, 0);
1053 unlocked = 1;
1054 }
1055
1056 /* Call the filter */
1057 error = entry->sfe_filter->sf_filter.sf_setoption(
1058 entry->sfe_cookie, so, sopt);
1059
1060 /*
1061 * Take the socket filter lock again
1062 * and release the entry
1063 */
1064 lck_rw_lock_shared(sock_filter_lock);
1065 sflt_entry_release(entry);
1066 }
1067 }
1068 lck_rw_unlock_shared(sock_filter_lock);
1069
1070 if (unlocked) {
1071 socket_lock(so, 0);
1072 }
1073
1074 return error;
1075 }
1076
1077 __private_extern__ int
1078 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1079 {
1080 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1081 return 0;
1082 }
1083
1084 struct socket_filter_entry *entry;
1085 int unlocked = 0;
1086 int error = 0;
1087
1088 lck_rw_lock_shared(sock_filter_lock);
1089 for (entry = so->so_filt; entry && error == 0;
1090 entry = entry->sfe_next_onsocket) {
1091 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1092 entry->sfe_filter->sf_filter.sf_getoption) {
1093 /*
1094 * Retain the filter entry and release
1095 * the socket filter lock
1096 */
1097 sflt_entry_retain(entry);
1098 lck_rw_unlock_shared(sock_filter_lock);
1099
1100 /* If the socket isn't already unlocked, unlock it */
1101 if (unlocked == 0) {
1102 socket_unlock(so, 0);
1103 unlocked = 1;
1104 }
1105
1106 /* Call the filter */
1107 error = entry->sfe_filter->sf_filter.sf_getoption(
1108 entry->sfe_cookie, so, sopt);
1109
1110 /*
1111 * Take the socket filter lock again
1112 * and release the entry
1113 */
1114 lck_rw_lock_shared(sock_filter_lock);
1115 sflt_entry_release(entry);
1116 }
1117 }
1118 lck_rw_unlock_shared(sock_filter_lock);
1119
1120 if (unlocked) {
1121 socket_lock(so, 0);
1122 }
1123
1124 return error;
1125 }
1126
1127 __private_extern__ int
1128 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1129 mbuf_t *control, sflt_data_flag_t flags)
1130 {
1131 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1132 return 0;
1133 }
1134
1135 struct socket_filter_entry *entry;
1136 int unlocked = 0;
1137 int setsendthread = 0;
1138 int error = 0;
1139
1140 lck_rw_lock_shared(sock_filter_lock);
1141 for (entry = so->so_filt; entry && error == 0;
1142 entry = entry->sfe_next_onsocket) {
1143 /* skip if this is a subflow socket */
1144 if (so->so_flags & SOF_MP_SUBFLOW) {
1145 continue;
1146 }
1147 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1148 entry->sfe_filter->sf_filter.sf_data_out) {
1149 /*
1150 * Retain the filter entry and
1151 * release the socket filter lock
1152 */
1153 sflt_entry_retain(entry);
1154 lck_rw_unlock_shared(sock_filter_lock);
1155
1156 /* If the socket isn't already unlocked, unlock it */
1157 if (unlocked == 0) {
1158 if (so->so_send_filt_thread == NULL) {
1159 setsendthread = 1;
1160 so->so_send_filt_thread =
1161 current_thread();
1162 }
1163 socket_unlock(so, 0);
1164 unlocked = 1;
1165 }
1166
1167 /* Call the filter */
1168 error = entry->sfe_filter->sf_filter.sf_data_out(
1169 entry->sfe_cookie, so, to, data, control, flags);
1170
1171 /*
1172 * Take the socket filter lock again
1173 * and release the entry
1174 */
1175 lck_rw_lock_shared(sock_filter_lock);
1176 sflt_entry_release(entry);
1177 }
1178 }
1179 lck_rw_unlock_shared(sock_filter_lock);
1180
1181 if (unlocked) {
1182 socket_lock(so, 0);
1183 if (setsendthread) {
1184 so->so_send_filt_thread = NULL;
1185 }
1186 }
1187
1188 return error;
1189 }
1190
1191 __private_extern__ int
1192 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1193 mbuf_t *control, sflt_data_flag_t flags)
1194 {
1195 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1196 return 0;
1197 }
1198
1199 struct socket_filter_entry *entry;
1200 int error = 0;
1201 int unlocked = 0;
1202
1203 lck_rw_lock_shared(sock_filter_lock);
1204
1205 for (entry = so->so_filt; entry && (error == 0);
1206 entry = entry->sfe_next_onsocket) {
1207 /* skip if this is a subflow socket */
1208 if (so->so_flags & SOF_MP_SUBFLOW) {
1209 continue;
1210 }
1211 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1212 entry->sfe_filter->sf_filter.sf_data_in) {
1213 /*
1214 * Retain the filter entry and
1215 * release the socket filter lock
1216 */
1217 sflt_entry_retain(entry);
1218 lck_rw_unlock_shared(sock_filter_lock);
1219
1220 /* If the socket isn't already unlocked, unlock it */
1221 if (unlocked == 0) {
1222 unlocked = 1;
1223 socket_unlock(so, 0);
1224 }
1225
1226 /* Call the filter */
1227 error = entry->sfe_filter->sf_filter.sf_data_in(
1228 entry->sfe_cookie, so, from, data, control, flags);
1229
1230 /*
1231 * Take the socket filter lock again
1232 * and release the entry
1233 */
1234 lck_rw_lock_shared(sock_filter_lock);
1235 sflt_entry_release(entry);
1236 }
1237 }
1238 lck_rw_unlock_shared(sock_filter_lock);
1239
1240 if (unlocked) {
1241 socket_lock(so, 0);
1242 }
1243
1244 return error;
1245 }
1246
1247 #pragma mark -- KPI --
1248
1249 errno_t
1250 sflt_attach(socket_t socket, sflt_handle handle)
1251 {
1252 socket_lock(socket, 1);
1253 errno_t result = sflt_attach_internal(socket, handle);
1254 socket_unlock(socket, 1);
1255 return result;
1256 }
1257
1258 errno_t
1259 sflt_detach(socket_t socket, sflt_handle handle)
1260 {
1261 struct socket_filter_entry *entry;
1262 errno_t result = 0;
1263
1264 if (socket == NULL || handle == 0) {
1265 return EINVAL;
1266 }
1267
1268 lck_rw_lock_exclusive(sock_filter_lock);
1269 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1270 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1271 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1272 break;
1273 }
1274 }
1275
1276 if (entry != NULL) {
1277 sflt_detach_locked(entry);
1278 }
1279 lck_rw_unlock_exclusive(sock_filter_lock);
1280
1281 return result;
1282 }
1283
1284 struct solist {
1285 struct solist *next;
1286 struct socket *so;
1287 };
1288
1289 static errno_t
1290 sflt_register_common(const struct sflt_filter *filter, int domain, int type,
1291 int protocol, bool is_internal)
1292 {
1293 struct socket_filter *sock_filt = NULL;
1294 struct socket_filter *match = NULL;
1295 int error = 0;
1296 struct protosw *pr;
1297 unsigned int len;
1298 struct socket *so;
1299 struct inpcb *inp;
1300 struct solist *solisthead = NULL, *solist = NULL;
1301
1302 if ((domain != PF_INET) && (domain != PF_INET6)) {
1303 return ENOTSUP;
1304 }
1305
1306 pr = pffindproto(domain, protocol, type);
1307 if (pr == NULL) {
1308 return ENOENT;
1309 }
1310
1311 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1312 filter->sf_handle == 0 || filter->sf_name == NULL) {
1313 return EINVAL;
1314 }
1315
1316 /* Allocate the socket filter */
1317 MALLOC(sock_filt, struct socket_filter *, sizeof(*sock_filt),
1318 M_IFADDR, M_WAITOK);
1319 if (sock_filt == NULL) {
1320 return ENOBUFS;
1321 }
1322
1323 bzero(sock_filt, sizeof(*sock_filt));
1324
1325 /* Legacy sflt_filter length; current structure minus extended */
1326 len = sizeof(*filter) - sizeof(struct sflt_filter_ext);
1327 /*
1328 * Include extended fields if filter defines SFLT_EXTENDED.
1329 * We've zeroed out our internal sflt_filter placeholder,
1330 * so any unused portion would have been taken care of.
1331 */
1332 if (filter->sf_flags & SFLT_EXTENDED) {
1333 unsigned int ext_len = filter->sf_len;
1334
1335 if (ext_len > sizeof(struct sflt_filter_ext)) {
1336 ext_len = sizeof(struct sflt_filter_ext);
1337 }
1338
1339 len += ext_len;
1340 }
1341 bcopy(filter, &sock_filt->sf_filter, len);
1342
1343 lck_rw_lock_exclusive(sock_filter_lock);
1344 /* Look for an existing entry */
1345 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1346 if (match->sf_filter.sf_handle ==
1347 sock_filt->sf_filter.sf_handle) {
1348 break;
1349 }
1350 }
1351
1352 /* Add the entry only if there was no existing entry */
1353 if (match == NULL) {
1354 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1355 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1356 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1357 sf_protosw_next);
1358 sock_filt->sf_proto = pr;
1359 }
1360 os_ref_init(&sock_filt->sf_refcount, NULL);
1361
1362 OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count);
1363 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total);
1364 if (is_internal) {
1365 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total);
1366 }
1367 }
1368 lck_rw_unlock_exclusive(sock_filter_lock);
1369
1370 if (match != NULL) {
1371 FREE(sock_filt, M_IFADDR);
1372 return EEXIST;
1373 }
1374
1375 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) {
1376 return error;
1377 }
1378
1379 /*
1380 * Setup the filter on the TCP and UDP sockets already created.
1381 */
1382 #define SOLIST_ADD(_so) do { \
1383 solist->next = solisthead; \
1384 sock_retain((_so)); \
1385 solist->so = (_so); \
1386 solisthead = solist; \
1387 } while (0)
1388 if (protocol == IPPROTO_TCP) {
1389 lck_rw_lock_shared(tcbinfo.ipi_lock);
1390 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1391 so = inp->inp_socket;
1392 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1393 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1394 (so->so_state & SS_NOFDREF)) ||
1395 !SOCK_CHECK_DOM(so, domain) ||
1396 !SOCK_CHECK_TYPE(so, type)) {
1397 continue;
1398 }
1399 MALLOC(solist, struct solist *, sizeof(*solist),
1400 M_IFADDR, M_NOWAIT);
1401 if (!solist) {
1402 continue;
1403 }
1404 SOLIST_ADD(so);
1405 }
1406 lck_rw_done(tcbinfo.ipi_lock);
1407 } else if (protocol == IPPROTO_UDP) {
1408 lck_rw_lock_shared(udbinfo.ipi_lock);
1409 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1410 so = inp->inp_socket;
1411 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1412 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1413 (so->so_state & SS_NOFDREF)) ||
1414 !SOCK_CHECK_DOM(so, domain) ||
1415 !SOCK_CHECK_TYPE(so, type)) {
1416 continue;
1417 }
1418 MALLOC(solist, struct solist *, sizeof(*solist),
1419 M_IFADDR, M_NOWAIT);
1420 if (!solist) {
1421 continue;
1422 }
1423 SOLIST_ADD(so);
1424 }
1425 lck_rw_done(udbinfo.ipi_lock);
1426 }
1427 /* XXX it's possible to walk the raw socket list as well */
1428 #undef SOLIST_ADD
1429
1430 while (solisthead) {
1431 sflt_handle handle = filter->sf_handle;
1432
1433 so = solisthead->so;
1434 socket_lock(so, 0);
1435 sflt_initsock(so);
1436 if (so->so_state & SS_ISCONNECTING) {
1437 sflt_notify_after_register(so, sock_evt_connecting,
1438 handle);
1439 } else if (so->so_state & SS_ISCONNECTED) {
1440 sflt_notify_after_register(so, sock_evt_connected,
1441 handle);
1442 } else if ((so->so_state &
1443 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) ==
1444 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) {
1445 sflt_notify_after_register(so, sock_evt_disconnecting,
1446 handle);
1447 } else if ((so->so_state &
1448 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) ==
1449 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) {
1450 sflt_notify_after_register(so, sock_evt_disconnected,
1451 handle);
1452 } else if (so->so_state & SS_CANTSENDMORE) {
1453 sflt_notify_after_register(so, sock_evt_cantsendmore,
1454 handle);
1455 } else if (so->so_state & SS_CANTRCVMORE) {
1456 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1457 handle);
1458 }
1459 socket_unlock(so, 0);
1460 /* XXX no easy way to post the sock_evt_closing event */
1461 sock_release(so);
1462 solist = solisthead;
1463 solisthead = solisthead->next;
1464 FREE(solist, M_IFADDR);
1465 }
1466
1467 return error;
1468 }
1469
1470 errno_t
1471 sflt_register_internal(const struct sflt_filter *filter, int domain, int type,
1472 int protocol)
1473 {
1474 return sflt_register_common(filter, domain, type, protocol, true);
1475 }
1476
1477 errno_t
1478 sflt_register(const struct sflt_filter *filter, int domain, int type,
1479 int protocol)
1480 {
1481 return sflt_register_common(filter, domain, type, protocol, false);
1482 }
1483
1484 errno_t
1485 sflt_unregister(sflt_handle handle)
1486 {
1487 struct socket_filter *filter;
1488 lck_rw_lock_exclusive(sock_filter_lock);
1489
1490 /* Find the entry by the handle */
1491 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1492 if (filter->sf_filter.sf_handle == handle) {
1493 break;
1494 }
1495 }
1496
1497 if (filter) {
1498 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0);
1499
1500 /* Remove it from the global list */
1501 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1502
1503 /* Remove it from the protosw list */
1504 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1505 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1506 filter, sf_protosw_next);
1507 }
1508
1509 /* Detach from any sockets */
1510 struct socket_filter_entry *entry = NULL;
1511
1512 for (entry = filter->sf_entry_head; entry;
1513 entry = entry->sfe_next_onfilter) {
1514 sflt_detach_locked(entry);
1515 }
1516
1517 /* Release the filter */
1518 sflt_release_locked(filter);
1519 }
1520
1521 lck_rw_unlock_exclusive(sock_filter_lock);
1522
1523 if (filter == NULL) {
1524 return ENOENT;
1525 }
1526
1527 return 0;
1528 }
1529
1530 errno_t
1531 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1532 mbuf_t control, sflt_data_flag_t flags)
1533 {
1534 int error = 0;
1535
1536 if (so == NULL || data == NULL) {
1537 return EINVAL;
1538 }
1539
1540 if (flags & sock_data_filt_flag_oob) {
1541 return ENOTSUP;
1542 }
1543
1544 socket_lock(so, 1);
1545
1546 /* reject if this is a subflow socket */
1547 if (so->so_flags & SOF_MP_SUBFLOW) {
1548 error = ENOTSUP;
1549 goto done;
1550 }
1551
1552 if (from) {
1553 if (sbappendaddr(&so->so_rcv,
1554 (struct sockaddr *)(uintptr_t)from, data, control, NULL)) {
1555 sorwakeup(so);
1556 }
1557 goto done;
1558 }
1559
1560 if (control) {
1561 if (sbappendcontrol(&so->so_rcv, data, control, NULL)) {
1562 sorwakeup(so);
1563 }
1564 goto done;
1565 }
1566
1567 if (flags & sock_data_filt_flag_record) {
1568 if (control || from) {
1569 error = EINVAL;
1570 goto done;
1571 }
1572 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) {
1573 sorwakeup(so);
1574 }
1575 goto done;
1576 }
1577
1578 if (sbappend(&so->so_rcv, data)) {
1579 sorwakeup(so);
1580 }
1581 done:
1582 socket_unlock(so, 1);
1583 return error;
1584 }
1585
1586 errno_t
1587 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1588 mbuf_t control, sflt_data_flag_t flags)
1589 {
1590 int sosendflags = 0;
1591
1592 /* reject if this is a subflow socket */
1593 if (so->so_flags & SOF_MP_SUBFLOW) {
1594 return ENOTSUP;
1595 }
1596
1597 if (flags & sock_data_filt_flag_oob) {
1598 sosendflags = MSG_OOB;
1599 }
1600 return sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1601 data, control, sosendflags);
1602 }
1603
1604 sockopt_dir
1605 sockopt_direction(sockopt_t sopt)
1606 {
1607 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
1608 }
1609
1610 int
1611 sockopt_level(sockopt_t sopt)
1612 {
1613 return sopt->sopt_level;
1614 }
1615
1616 int
1617 sockopt_name(sockopt_t sopt)
1618 {
1619 return sopt->sopt_name;
1620 }
1621
1622 size_t
1623 sockopt_valsize(sockopt_t sopt)
1624 {
1625 return sopt->sopt_valsize;
1626 }
1627
1628 errno_t
1629 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1630 {
1631 return sooptcopyin(sopt, data, len, len);
1632 }
1633
1634 errno_t
1635 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1636 {
1637 return sooptcopyout(sopt, data, len);
1638 }