]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <net/net_api_stats.h>
44 #include <netinet/in_var.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip_var.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_var.h>
49 #include <netinet/udp.h>
50 #include <netinet/udp_var.h>
51
52 #include <libkern/libkern.h>
53 #include <libkern/OSAtomic.h>
54
55 #include <stdbool.h>
56 #include <string.h>
57
58 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
59 #define SFEF_NODETACH 0x2 /* Detach should not be called */
60 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
61
62 struct socket_filter_entry {
63 struct socket_filter_entry *sfe_next_onsocket;
64 struct socket_filter_entry *sfe_next_onfilter;
65 struct socket_filter_entry *sfe_next_oncleanup;
66
67 struct socket_filter *sfe_filter;
68 struct socket *sfe_socket;
69 void *sfe_cookie;
70
71 uint32_t sfe_flags;
72 int32_t sfe_refcount;
73 };
74
75 struct socket_filter {
76 TAILQ_ENTRY(socket_filter) sf_protosw_next;
77 TAILQ_ENTRY(socket_filter) sf_global_next;
78 struct socket_filter_entry *sf_entry_head;
79
80 struct protosw *sf_proto;
81 struct sflt_filter sf_filter;
82 u_int32_t sf_refcount;
83 };
84
85 TAILQ_HEAD(socket_filter_list, socket_filter);
86
87 static struct socket_filter_list sock_filter_head;
88 static lck_rw_t *sock_filter_lock = NULL;
89 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
90 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
91 static thread_t sock_filter_cleanup_thread = NULL;
92
93 static void sflt_cleanup_thread(void *, wait_result_t);
94 static void sflt_detach_locked(struct socket_filter_entry *entry);
95
96 #undef sflt_register
97 static errno_t sflt_register_common(const struct sflt_filter *filter, int domain,
98 int type, int protocol, bool is_internal);
99 errno_t sflt_register(const struct sflt_filter *filter, int domain,
100 int type, int protocol);
101
102
103 #pragma mark -- Internal State Management --
104
105 __private_extern__ int
106 sflt_permission_check(struct inpcb *inp)
107 {
108
109 /*
110 * All these permissions only apply to the co-processor interface,
111 * so ignore IPv4.
112 */
113 if (!(inp->inp_vflag & INP_IPV6)) {
114 return (0);
115 }
116 /* Sockets that have this entitlement bypass socket filters. */
117 if (INP_INTCOPROC_ALLOWED(inp)) {
118 return (1);
119 }
120 if ((inp->inp_flags & INP_BOUND_IF) &&
121 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
122 return (1);
123 }
124 return (0);
125 }
126
127 __private_extern__ void
128 sflt_init(void)
129 {
130 lck_grp_attr_t *grp_attrib = NULL;
131 lck_attr_t *lck_attrib = NULL;
132 lck_grp_t *lck_group = NULL;
133
134 TAILQ_INIT(&sock_filter_head);
135
136 /* Allocate a rw lock */
137 grp_attrib = lck_grp_attr_alloc_init();
138 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
139 lck_grp_attr_free(grp_attrib);
140 lck_attrib = lck_attr_alloc_init();
141 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
142 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
143 lck_grp_free(lck_group);
144 lck_attr_free(lck_attrib);
145 }
146
147 static void
148 sflt_retain_locked(struct socket_filter *filter)
149 {
150 filter->sf_refcount++;
151 }
152
153 static void
154 sflt_release_locked(struct socket_filter *filter)
155 {
156 filter->sf_refcount--;
157 if (filter->sf_refcount == 0) {
158 /* Call the unregistered function */
159 if (filter->sf_filter.sf_unregistered) {
160 lck_rw_unlock_exclusive(sock_filter_lock);
161 filter->sf_filter.sf_unregistered(
162 filter->sf_filter.sf_handle);
163 lck_rw_lock_exclusive(sock_filter_lock);
164 }
165
166 /* Free the entry */
167 FREE(filter, M_IFADDR);
168 }
169 }
170
171 static void
172 sflt_entry_retain(struct socket_filter_entry *entry)
173 {
174 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
175 panic("sflt_entry_retain - sfe_refcount <= 0\n");
176 /* NOTREACHED */
177 }
178 }
179
180 static void
181 sflt_entry_release(struct socket_filter_entry *entry)
182 {
183 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
184 if (old == 1) {
185 /* That was the last reference */
186
187 /* Take the cleanup lock */
188 lck_mtx_lock(sock_filter_cleanup_lock);
189
190 /* Put this item on the cleanup list */
191 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
192 sock_filter_cleanup_entries = entry;
193
194 /* If the item is the first item in the list */
195 if (entry->sfe_next_oncleanup == NULL) {
196 if (sock_filter_cleanup_thread == NULL) {
197 /* Create a thread */
198 kernel_thread_start(sflt_cleanup_thread,
199 NULL, &sock_filter_cleanup_thread);
200 } else {
201 /* Wakeup the thread */
202 wakeup(&sock_filter_cleanup_entries);
203 }
204 }
205
206 /* Drop the cleanup lock */
207 lck_mtx_unlock(sock_filter_cleanup_lock);
208 } else if (old <= 0) {
209 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
210 (int)old);
211 /* NOTREACHED */
212 }
213 }
214
215 __attribute__((noreturn))
216 static void
217 sflt_cleanup_thread(void *blah, wait_result_t blah2)
218 {
219 #pragma unused(blah, blah2)
220 while (1) {
221 lck_mtx_lock(sock_filter_cleanup_lock);
222 while (sock_filter_cleanup_entries == NULL) {
223 /* Sleep until we've got something better to do */
224 msleep(&sock_filter_cleanup_entries,
225 sock_filter_cleanup_lock, PWAIT,
226 "sflt_cleanup", NULL);
227 }
228
229 /* Pull the current list of dead items */
230 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
231 sock_filter_cleanup_entries = NULL;
232
233 /* Drop the lock */
234 lck_mtx_unlock(sock_filter_cleanup_lock);
235
236 /* Take the socket filter lock */
237 lck_rw_lock_exclusive(sock_filter_lock);
238
239 /* Cleanup every dead item */
240 struct socket_filter_entry *entry;
241 for (entry = dead; entry; entry = dead) {
242 struct socket_filter_entry **nextpp;
243
244 dead = entry->sfe_next_oncleanup;
245
246 /* Call detach function if necessary - drop the lock */
247 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
248 entry->sfe_filter->sf_filter.sf_detach) {
249 entry->sfe_flags |= SFEF_NODETACH;
250 lck_rw_unlock_exclusive(sock_filter_lock);
251
252 /*
253 * Warning - passing a potentially
254 * dead socket may be bad
255 */
256 entry->sfe_filter->sf_filter. sf_detach(
257 entry->sfe_cookie, entry->sfe_socket);
258
259 lck_rw_lock_exclusive(sock_filter_lock);
260 }
261
262 /*
263 * Pull entry off the socket list --
264 * if the socket still exists
265 */
266 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
267 for (nextpp = &entry->sfe_socket->so_filt;
268 *nextpp;
269 nextpp = &(*nextpp)->sfe_next_onsocket) {
270 if (*nextpp == entry) {
271 *nextpp =
272 entry->sfe_next_onsocket;
273 break;
274 }
275 }
276 }
277
278 /* Pull entry off the filter list */
279 for (nextpp = &entry->sfe_filter->sf_entry_head;
280 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
281 if (*nextpp == entry) {
282 *nextpp = entry->sfe_next_onfilter;
283 break;
284 }
285 }
286
287 /*
288 * Release the filter -- may drop lock, but that's okay
289 */
290 sflt_release_locked(entry->sfe_filter);
291 entry->sfe_socket = NULL;
292 entry->sfe_filter = NULL;
293 FREE(entry, M_IFADDR);
294 }
295
296 /* Drop the socket filter lock */
297 lck_rw_unlock_exclusive(sock_filter_lock);
298 }
299 /* NOTREACHED */
300 }
301
302 static int
303 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
304 int socklocked)
305 {
306 int error = 0;
307 struct socket_filter_entry *entry = NULL;
308
309 if (sflt_permission_check(sotoinpcb(so)))
310 return (0);
311
312 if (filter == NULL)
313 return (ENOENT);
314
315 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
316 if (entry->sfe_filter->sf_filter.sf_handle ==
317 filter->sf_filter.sf_handle)
318 return (EEXIST);
319 }
320 /* allocate the socket filter entry */
321 MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR,
322 M_WAITOK);
323 if (entry == NULL)
324 return (ENOMEM);
325
326 /* Initialize the socket filter entry */
327 entry->sfe_cookie = NULL;
328 entry->sfe_flags = SFEF_ATTACHED;
329 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
330
331 /* Put the entry in the filter list */
332 sflt_retain_locked(filter);
333 entry->sfe_filter = filter;
334 entry->sfe_next_onfilter = filter->sf_entry_head;
335 filter->sf_entry_head = entry;
336
337 /* Put the entry on the socket filter list */
338 entry->sfe_socket = so;
339 entry->sfe_next_onsocket = so->so_filt;
340 so->so_filt = entry;
341
342 if (entry->sfe_filter->sf_filter.sf_attach) {
343 /* Retain the entry while we call attach */
344 sflt_entry_retain(entry);
345
346 /*
347 * Release the filter lock --
348 * callers must be aware we will do this
349 */
350 lck_rw_unlock_exclusive(sock_filter_lock);
351
352 /* Unlock the socket */
353 if (socklocked)
354 socket_unlock(so, 0);
355
356 /* It's finally safe to call the filter function */
357 error = entry->sfe_filter->sf_filter.sf_attach(
358 &entry->sfe_cookie, so);
359
360 /* Lock the socket again */
361 if (socklocked)
362 socket_lock(so, 0);
363
364 /* Lock the filters again */
365 lck_rw_lock_exclusive(sock_filter_lock);
366
367 /*
368 * If the attach function returns an error,
369 * this filter must be detached
370 */
371 if (error) {
372 /* don't call sf_detach */
373 entry->sfe_flags |= SFEF_NODETACH;
374 sflt_detach_locked(entry);
375 }
376
377 /* Release the retain we held through the attach call */
378 sflt_entry_release(entry);
379 }
380
381 return (error);
382 }
383
384 errno_t
385 sflt_attach_internal(socket_t socket, sflt_handle handle)
386 {
387 if (socket == NULL || handle == 0)
388 return (EINVAL);
389
390 int result = EINVAL;
391
392 lck_rw_lock_exclusive(sock_filter_lock);
393
394 struct socket_filter *filter = NULL;
395 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
396 if (filter->sf_filter.sf_handle == handle) break;
397 }
398
399 if (filter) {
400 result = sflt_attach_locked(socket, filter, 1);
401 }
402
403 lck_rw_unlock_exclusive(sock_filter_lock);
404
405 return (result);
406 }
407
408 static void
409 sflt_detach_locked(struct socket_filter_entry *entry)
410 {
411 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
412 entry->sfe_flags &= ~SFEF_ATTACHED;
413 sflt_entry_release(entry);
414 }
415 }
416
417 #pragma mark -- Socket Layer Hooks --
418
419 __private_extern__ void
420 sflt_initsock(struct socket *so)
421 {
422 /*
423 * Point to the real protosw, as so_proto might have been
424 * pointed to a modified version.
425 */
426 struct protosw *proto = so->so_proto->pr_protosw;
427
428 lck_rw_lock_shared(sock_filter_lock);
429 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
430 /* Promote lock to exclusive */
431 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
432 lck_rw_lock_exclusive(sock_filter_lock);
433
434 /*
435 * Warning: A filter unregistering will be pulled out of
436 * the list. This could happen while we drop the lock in
437 * sftl_attach_locked or sflt_release_locked. For this
438 * reason we retain a reference on the filter (or next_filter)
439 * while calling this function. This protects us from a panic,
440 * but it could result in a socket being created without all
441 * of the global filters if we're attaching a filter as it
442 * is removed, if that's possible.
443 */
444 struct socket_filter *filter =
445 TAILQ_FIRST(&proto->pr_filter_head);
446
447 sflt_retain_locked(filter);
448
449 while (filter) {
450 struct socket_filter *filter_next;
451 /*
452 * Warning: sflt_attach_private_locked
453 * will drop the lock
454 */
455 sflt_attach_locked(so, filter, 0);
456
457 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
458 if (filter_next)
459 sflt_retain_locked(filter_next);
460
461 /*
462 * Warning: filt_release_locked may remove
463 * the filter from the queue
464 */
465 sflt_release_locked(filter);
466 filter = filter_next;
467 }
468 }
469 lck_rw_done(sock_filter_lock);
470 }
471
472 /*
473 * sflt_termsock
474 *
475 * Detaches all filters from the socket.
476 */
477 __private_extern__ void
478 sflt_termsock(struct socket *so)
479 {
480 lck_rw_lock_exclusive(sock_filter_lock);
481
482 struct socket_filter_entry *entry;
483
484 while ((entry = so->so_filt) != NULL) {
485 /* Pull filter off the socket */
486 so->so_filt = entry->sfe_next_onsocket;
487 entry->sfe_flags |= SFEF_NOSOCKET;
488
489 /* Call detach */
490 sflt_detach_locked(entry);
491
492 /*
493 * On sflt_termsock, we can't return until the detach function
494 * has been called. Call the detach function - this is gross
495 * because the socket filter entry could be freed when we drop
496 * the lock, so we make copies on the stack and retain
497 * everything we need before dropping the lock.
498 */
499 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
500 entry->sfe_filter->sf_filter.sf_detach) {
501 void *sfe_cookie = entry->sfe_cookie;
502 struct socket_filter *sfe_filter = entry->sfe_filter;
503
504 /* Retain the socket filter */
505 sflt_retain_locked(sfe_filter);
506
507 /* Mark that we've called the detach function */
508 entry->sfe_flags |= SFEF_NODETACH;
509
510 /* Drop the lock before calling the detach function */
511 lck_rw_unlock_exclusive(sock_filter_lock);
512 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
513 lck_rw_lock_exclusive(sock_filter_lock);
514
515 /* Release the filter */
516 sflt_release_locked(sfe_filter);
517 }
518 }
519
520 lck_rw_unlock_exclusive(sock_filter_lock);
521 }
522
523
524 static void
525 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
526 sflt_handle handle)
527 {
528 if (so->so_filt == NULL)
529 return;
530
531 struct socket_filter_entry *entry;
532 int unlocked = 0;
533
534 lck_rw_lock_shared(sock_filter_lock);
535 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
536 if ((entry->sfe_flags & SFEF_ATTACHED) &&
537 entry->sfe_filter->sf_filter.sf_notify &&
538 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
539 handle) || !handle)) {
540 /*
541 * Retain the filter entry and release
542 * the socket filter lock
543 */
544 sflt_entry_retain(entry);
545 lck_rw_unlock_shared(sock_filter_lock);
546
547 /* If the socket isn't already unlocked, unlock it */
548 if (unlocked == 0) {
549 unlocked = 1;
550 socket_unlock(so, 0);
551 }
552
553 /* Finally call the filter */
554 entry->sfe_filter->sf_filter.sf_notify(
555 entry->sfe_cookie, so, event, param);
556
557 /*
558 * Take the socket filter lock again
559 * and release the entry
560 */
561 lck_rw_lock_shared(sock_filter_lock);
562 sflt_entry_release(entry);
563 }
564 }
565 lck_rw_unlock_shared(sock_filter_lock);
566
567 if (unlocked != 0) {
568 socket_lock(so, 0);
569 }
570 }
571
572 __private_extern__ void
573 sflt_notify(struct socket *so, sflt_event_t event, void *param)
574 {
575 sflt_notify_internal(so, event, param, 0);
576 }
577
578 static void
579 sflt_notify_after_register(struct socket *so, sflt_event_t event,
580 sflt_handle handle)
581 {
582 sflt_notify_internal(so, event, NULL, handle);
583 }
584
585 __private_extern__ int
586 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
587 {
588 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
589 return (0);
590
591 struct socket_filter_entry *entry;
592 int unlocked = 0;
593 int error = 0;
594
595 lck_rw_lock_shared(sock_filter_lock);
596 for (entry = so->so_filt; entry && error == 0;
597 entry = entry->sfe_next_onsocket) {
598 if ((entry->sfe_flags & SFEF_ATTACHED) &&
599 entry->sfe_filter->sf_filter.sf_ioctl) {
600 /*
601 * Retain the filter entry and release
602 * the socket filter lock
603 */
604 sflt_entry_retain(entry);
605 lck_rw_unlock_shared(sock_filter_lock);
606
607 /* If the socket isn't already unlocked, unlock it */
608 if (unlocked == 0) {
609 socket_unlock(so, 0);
610 unlocked = 1;
611 }
612
613 /* Call the filter */
614 error = entry->sfe_filter->sf_filter.sf_ioctl(
615 entry->sfe_cookie, so, cmd, data);
616
617 /*
618 * Take the socket filter lock again
619 * and release the entry
620 */
621 lck_rw_lock_shared(sock_filter_lock);
622 sflt_entry_release(entry);
623 }
624 }
625 lck_rw_unlock_shared(sock_filter_lock);
626
627 if (unlocked) {
628 socket_lock(so, 0);
629 }
630
631 return (error);
632 }
633
634 __private_extern__ int
635 sflt_bind(struct socket *so, const struct sockaddr *nam)
636 {
637 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
638 return (0);
639
640 struct socket_filter_entry *entry;
641 int unlocked = 0;
642 int error = 0;
643
644 lck_rw_lock_shared(sock_filter_lock);
645 for (entry = so->so_filt; entry && error == 0;
646 entry = entry->sfe_next_onsocket) {
647 if ((entry->sfe_flags & SFEF_ATTACHED) &&
648 entry->sfe_filter->sf_filter.sf_bind) {
649 /*
650 * Retain the filter entry and
651 * release the socket filter lock
652 */
653 sflt_entry_retain(entry);
654 lck_rw_unlock_shared(sock_filter_lock);
655
656 /* If the socket isn't already unlocked, unlock it */
657 if (unlocked == 0) {
658 socket_unlock(so, 0);
659 unlocked = 1;
660 }
661
662 /* Call the filter */
663 error = entry->sfe_filter->sf_filter.sf_bind(
664 entry->sfe_cookie, so, nam);
665
666 /*
667 * Take the socket filter lock again and
668 * release the entry
669 */
670 lck_rw_lock_shared(sock_filter_lock);
671 sflt_entry_release(entry);
672 }
673 }
674 lck_rw_unlock_shared(sock_filter_lock);
675
676 if (unlocked) {
677 socket_lock(so, 0);
678 }
679
680 return (error);
681 }
682
683 __private_extern__ int
684 sflt_listen(struct socket *so)
685 {
686 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
687 return (0);
688
689 struct socket_filter_entry *entry;
690 int unlocked = 0;
691 int error = 0;
692
693 lck_rw_lock_shared(sock_filter_lock);
694 for (entry = so->so_filt; entry && error == 0;
695 entry = entry->sfe_next_onsocket) {
696 if ((entry->sfe_flags & SFEF_ATTACHED) &&
697 entry->sfe_filter->sf_filter.sf_listen) {
698 /*
699 * Retain the filter entry and release
700 * the socket filter lock
701 */
702 sflt_entry_retain(entry);
703 lck_rw_unlock_shared(sock_filter_lock);
704
705 /* If the socket isn't already unlocked, unlock it */
706 if (unlocked == 0) {
707 socket_unlock(so, 0);
708 unlocked = 1;
709 }
710
711 /* Call the filter */
712 error = entry->sfe_filter->sf_filter.sf_listen(
713 entry->sfe_cookie, so);
714
715 /*
716 * Take the socket filter lock again
717 * and release the entry
718 */
719 lck_rw_lock_shared(sock_filter_lock);
720 sflt_entry_release(entry);
721 }
722 }
723 lck_rw_unlock_shared(sock_filter_lock);
724
725 if (unlocked) {
726 socket_lock(so, 0);
727 }
728
729 return (error);
730 }
731
732 __private_extern__ int
733 sflt_accept(struct socket *head, struct socket *so,
734 const struct sockaddr *local, const struct sockaddr *remote)
735 {
736 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
737 return (0);
738
739 struct socket_filter_entry *entry;
740 int unlocked = 0;
741 int error = 0;
742
743 lck_rw_lock_shared(sock_filter_lock);
744 for (entry = so->so_filt; entry && error == 0;
745 entry = entry->sfe_next_onsocket) {
746 if ((entry->sfe_flags & SFEF_ATTACHED) &&
747 entry->sfe_filter->sf_filter.sf_accept) {
748 /*
749 * Retain the filter entry and
750 * release the socket filter lock
751 */
752 sflt_entry_retain(entry);
753 lck_rw_unlock_shared(sock_filter_lock);
754
755 /* If the socket isn't already unlocked, unlock it */
756 if (unlocked == 0) {
757 socket_unlock(so, 0);
758 unlocked = 1;
759 }
760
761 /* Call the filter */
762 error = entry->sfe_filter->sf_filter.sf_accept(
763 entry->sfe_cookie, head, so, local, remote);
764
765 /*
766 * Take the socket filter lock again
767 * and release the entry
768 */
769 lck_rw_lock_shared(sock_filter_lock);
770 sflt_entry_release(entry);
771 }
772 }
773 lck_rw_unlock_shared(sock_filter_lock);
774
775 if (unlocked) {
776 socket_lock(so, 0);
777 }
778
779 return (error);
780 }
781
782 __private_extern__ int
783 sflt_getsockname(struct socket *so, struct sockaddr **local)
784 {
785 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
786 return (0);
787
788 struct socket_filter_entry *entry;
789 int unlocked = 0;
790 int error = 0;
791
792 lck_rw_lock_shared(sock_filter_lock);
793 for (entry = so->so_filt; entry && error == 0;
794 entry = entry->sfe_next_onsocket) {
795 if ((entry->sfe_flags & SFEF_ATTACHED) &&
796 entry->sfe_filter->sf_filter.sf_getsockname) {
797 /*
798 * Retain the filter entry and
799 * release the socket filter lock
800 */
801 sflt_entry_retain(entry);
802 lck_rw_unlock_shared(sock_filter_lock);
803
804 /* If the socket isn't already unlocked, unlock it */
805 if (unlocked == 0) {
806 socket_unlock(so, 0);
807 unlocked = 1;
808 }
809
810 /* Call the filter */
811 error = entry->sfe_filter->sf_filter.sf_getsockname(
812 entry->sfe_cookie, so, local);
813
814 /*
815 * Take the socket filter lock again
816 * and release the entry
817 */
818 lck_rw_lock_shared(sock_filter_lock);
819 sflt_entry_release(entry);
820 }
821 }
822 lck_rw_unlock_shared(sock_filter_lock);
823
824 if (unlocked) {
825 socket_lock(so, 0);
826 }
827
828 return (error);
829 }
830
831 __private_extern__ int
832 sflt_getpeername(struct socket *so, struct sockaddr **remote)
833 {
834 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
835 return (0);
836
837 struct socket_filter_entry *entry;
838 int unlocked = 0;
839 int error = 0;
840
841 lck_rw_lock_shared(sock_filter_lock);
842 for (entry = so->so_filt; entry && error == 0;
843 entry = entry->sfe_next_onsocket) {
844 if ((entry->sfe_flags & SFEF_ATTACHED) &&
845 entry->sfe_filter->sf_filter.sf_getpeername) {
846 /*
847 * Retain the filter entry and release
848 * the socket filter lock
849 */
850 sflt_entry_retain(entry);
851 lck_rw_unlock_shared(sock_filter_lock);
852
853 /* If the socket isn't already unlocked, unlock it */
854 if (unlocked == 0) {
855 socket_unlock(so, 0);
856 unlocked = 1;
857 }
858
859 /* Call the filter */
860 error = entry->sfe_filter->sf_filter.sf_getpeername(
861 entry->sfe_cookie, so, remote);
862
863 /*
864 * Take the socket filter lock again
865 * and release the entry
866 */
867 lck_rw_lock_shared(sock_filter_lock);
868 sflt_entry_release(entry);
869 }
870 }
871 lck_rw_unlock_shared(sock_filter_lock);
872
873 if (unlocked) {
874 socket_lock(so, 0);
875 }
876
877 return (error);
878 }
879
880 __private_extern__ int
881 sflt_connectin(struct socket *so, const struct sockaddr *remote)
882 {
883 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
884 return (0);
885
886 struct socket_filter_entry *entry;
887 int unlocked = 0;
888 int error = 0;
889
890 lck_rw_lock_shared(sock_filter_lock);
891 for (entry = so->so_filt; entry && error == 0;
892 entry = entry->sfe_next_onsocket) {
893 if ((entry->sfe_flags & SFEF_ATTACHED) &&
894 entry->sfe_filter->sf_filter.sf_connect_in) {
895 /*
896 * Retain the filter entry and release
897 * the socket filter lock
898 */
899 sflt_entry_retain(entry);
900 lck_rw_unlock_shared(sock_filter_lock);
901
902 /* If the socket isn't already unlocked, unlock it */
903 if (unlocked == 0) {
904 socket_unlock(so, 0);
905 unlocked = 1;
906 }
907
908 /* Call the filter */
909 error = entry->sfe_filter->sf_filter.sf_connect_in(
910 entry->sfe_cookie, so, remote);
911
912 /*
913 * Take the socket filter lock again
914 * and release the entry
915 */
916 lck_rw_lock_shared(sock_filter_lock);
917 sflt_entry_release(entry);
918 }
919 }
920 lck_rw_unlock_shared(sock_filter_lock);
921
922 if (unlocked) {
923 socket_lock(so, 0);
924 }
925
926 return (error);
927 }
928
929 static int
930 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
931 {
932 struct socket_filter_entry *entry;
933 int unlocked = 0;
934 int error = 0;
935
936 lck_rw_lock_shared(sock_filter_lock);
937 for (entry = so->so_filt; entry && error == 0;
938 entry = entry->sfe_next_onsocket) {
939 if ((entry->sfe_flags & SFEF_ATTACHED) &&
940 entry->sfe_filter->sf_filter.sf_connect_out) {
941 /*
942 * Retain the filter entry and release
943 * the socket filter lock
944 */
945 sflt_entry_retain(entry);
946 lck_rw_unlock_shared(sock_filter_lock);
947
948 /* If the socket isn't already unlocked, unlock it */
949 if (unlocked == 0) {
950 socket_unlock(so, 0);
951 unlocked = 1;
952 }
953
954 /* Call the filter */
955 error = entry->sfe_filter->sf_filter.sf_connect_out(
956 entry->sfe_cookie, so, nam);
957
958 /*
959 * Take the socket filter lock again
960 * and release the entry
961 */
962 lck_rw_lock_shared(sock_filter_lock);
963 sflt_entry_release(entry);
964 }
965 }
966 lck_rw_unlock_shared(sock_filter_lock);
967
968 if (unlocked) {
969 socket_lock(so, 0);
970 }
971
972 return (error);
973 }
974
975 __private_extern__ int
976 sflt_connectout(struct socket *so, const struct sockaddr *nam)
977 {
978 char buf[SOCK_MAXADDRLEN];
979 struct sockaddr *sa;
980 int error;
981
982 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
983 return (0);
984
985 /*
986 * Workaround for rdar://23362120
987 * Always pass a buffer that can hold an IPv6 socket address
988 */
989 bzero(buf, sizeof (buf));
990 bcopy(nam, buf, nam->sa_len);
991 sa = (struct sockaddr *)buf;
992
993 error = sflt_connectout_common(so, sa);
994 if (error != 0)
995 return (error);
996
997 /*
998 * If the address was modified, copy it back
999 */
1000 if (bcmp(sa, nam, nam->sa_len) != 0) {
1001 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
1002 }
1003
1004 return (0);
1005 }
1006
1007 __private_extern__ int
1008 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1009 {
1010 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1011 return (0);
1012
1013 struct socket_filter_entry *entry;
1014 int unlocked = 0;
1015 int error = 0;
1016
1017 lck_rw_lock_shared(sock_filter_lock);
1018 for (entry = so->so_filt; entry && error == 0;
1019 entry = entry->sfe_next_onsocket) {
1020 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1021 entry->sfe_filter->sf_filter.sf_setoption) {
1022 /*
1023 * Retain the filter entry and release
1024 * the socket filter lock
1025 */
1026 sflt_entry_retain(entry);
1027 lck_rw_unlock_shared(sock_filter_lock);
1028
1029 /* If the socket isn't already unlocked, unlock it */
1030 if (unlocked == 0) {
1031 socket_unlock(so, 0);
1032 unlocked = 1;
1033 }
1034
1035 /* Call the filter */
1036 error = entry->sfe_filter->sf_filter.sf_setoption(
1037 entry->sfe_cookie, so, sopt);
1038
1039 /*
1040 * Take the socket filter lock again
1041 * and release the entry
1042 */
1043 lck_rw_lock_shared(sock_filter_lock);
1044 sflt_entry_release(entry);
1045 }
1046 }
1047 lck_rw_unlock_shared(sock_filter_lock);
1048
1049 if (unlocked) {
1050 socket_lock(so, 0);
1051 }
1052
1053 return (error);
1054 }
1055
1056 __private_extern__ int
1057 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1058 {
1059 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1060 return (0);
1061
1062 struct socket_filter_entry *entry;
1063 int unlocked = 0;
1064 int error = 0;
1065
1066 lck_rw_lock_shared(sock_filter_lock);
1067 for (entry = so->so_filt; entry && error == 0;
1068 entry = entry->sfe_next_onsocket) {
1069 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1070 entry->sfe_filter->sf_filter.sf_getoption) {
1071 /*
1072 * Retain the filter entry and release
1073 * the socket filter lock
1074 */
1075 sflt_entry_retain(entry);
1076 lck_rw_unlock_shared(sock_filter_lock);
1077
1078 /* If the socket isn't already unlocked, unlock it */
1079 if (unlocked == 0) {
1080 socket_unlock(so, 0);
1081 unlocked = 1;
1082 }
1083
1084 /* Call the filter */
1085 error = entry->sfe_filter->sf_filter.sf_getoption(
1086 entry->sfe_cookie, so, sopt);
1087
1088 /*
1089 * Take the socket filter lock again
1090 * and release the entry
1091 */
1092 lck_rw_lock_shared(sock_filter_lock);
1093 sflt_entry_release(entry);
1094 }
1095 }
1096 lck_rw_unlock_shared(sock_filter_lock);
1097
1098 if (unlocked) {
1099 socket_lock(so, 0);
1100 }
1101
1102 return (error);
1103 }
1104
1105 __private_extern__ int
1106 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1107 mbuf_t *control, sflt_data_flag_t flags)
1108 {
1109 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1110 return (0);
1111
1112 struct socket_filter_entry *entry;
1113 int unlocked = 0;
1114 int setsendthread = 0;
1115 int error = 0;
1116
1117 lck_rw_lock_shared(sock_filter_lock);
1118 for (entry = so->so_filt; entry && error == 0;
1119 entry = entry->sfe_next_onsocket) {
1120 /* skip if this is a subflow socket */
1121 if (so->so_flags & SOF_MP_SUBFLOW)
1122 continue;
1123 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1124 entry->sfe_filter->sf_filter.sf_data_out) {
1125 /*
1126 * Retain the filter entry and
1127 * release the socket filter lock
1128 */
1129 sflt_entry_retain(entry);
1130 lck_rw_unlock_shared(sock_filter_lock);
1131
1132 /* If the socket isn't already unlocked, unlock it */
1133 if (unlocked == 0) {
1134 if (so->so_send_filt_thread == NULL) {
1135 setsendthread = 1;
1136 so->so_send_filt_thread =
1137 current_thread();
1138 }
1139 socket_unlock(so, 0);
1140 unlocked = 1;
1141 }
1142
1143 /* Call the filter */
1144 error = entry->sfe_filter->sf_filter.sf_data_out(
1145 entry->sfe_cookie, so, to, data, control, flags);
1146
1147 /*
1148 * Take the socket filter lock again
1149 * and release the entry
1150 */
1151 lck_rw_lock_shared(sock_filter_lock);
1152 sflt_entry_release(entry);
1153 }
1154 }
1155 lck_rw_unlock_shared(sock_filter_lock);
1156
1157 if (unlocked) {
1158 socket_lock(so, 0);
1159 if (setsendthread)
1160 so->so_send_filt_thread = NULL;
1161 }
1162
1163 return (error);
1164 }
1165
1166 __private_extern__ int
1167 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1168 mbuf_t *control, sflt_data_flag_t flags)
1169 {
1170 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1171 return (0);
1172
1173 struct socket_filter_entry *entry;
1174 int error = 0;
1175 int unlocked = 0;
1176
1177 lck_rw_lock_shared(sock_filter_lock);
1178
1179 for (entry = so->so_filt; entry && (error == 0);
1180 entry = entry->sfe_next_onsocket) {
1181 /* skip if this is a subflow socket */
1182 if (so->so_flags & SOF_MP_SUBFLOW)
1183 continue;
1184 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1185 entry->sfe_filter->sf_filter.sf_data_in) {
1186 /*
1187 * Retain the filter entry and
1188 * release the socket filter lock
1189 */
1190 sflt_entry_retain(entry);
1191 lck_rw_unlock_shared(sock_filter_lock);
1192
1193 /* If the socket isn't already unlocked, unlock it */
1194 if (unlocked == 0) {
1195 unlocked = 1;
1196 socket_unlock(so, 0);
1197 }
1198
1199 /* Call the filter */
1200 error = entry->sfe_filter->sf_filter.sf_data_in(
1201 entry->sfe_cookie, so, from, data, control, flags);
1202
1203 /*
1204 * Take the socket filter lock again
1205 * and release the entry
1206 */
1207 lck_rw_lock_shared(sock_filter_lock);
1208 sflt_entry_release(entry);
1209 }
1210 }
1211 lck_rw_unlock_shared(sock_filter_lock);
1212
1213 if (unlocked) {
1214 socket_lock(so, 0);
1215 }
1216
1217 return (error);
1218 }
1219
1220 #pragma mark -- KPI --
1221
1222 errno_t
1223 sflt_attach(socket_t socket, sflt_handle handle)
1224 {
1225 socket_lock(socket, 1);
1226 errno_t result = sflt_attach_internal(socket, handle);
1227 socket_unlock(socket, 1);
1228 return (result);
1229 }
1230
1231 errno_t
1232 sflt_detach(socket_t socket, sflt_handle handle)
1233 {
1234 struct socket_filter_entry *entry;
1235 errno_t result = 0;
1236
1237 if (socket == NULL || handle == 0)
1238 return (EINVAL);
1239
1240 lck_rw_lock_exclusive(sock_filter_lock);
1241 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1242 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1243 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1244 break;
1245 }
1246 }
1247
1248 if (entry != NULL) {
1249 sflt_detach_locked(entry);
1250 }
1251 lck_rw_unlock_exclusive(sock_filter_lock);
1252
1253 return (result);
1254 }
1255
1256 struct solist {
1257 struct solist *next;
1258 struct socket *so;
1259 };
1260
1261 static errno_t
1262 sflt_register_common(const struct sflt_filter *filter, int domain, int type,
1263 int protocol, bool is_internal)
1264 {
1265 struct socket_filter *sock_filt = NULL;
1266 struct socket_filter *match = NULL;
1267 int error = 0;
1268 struct protosw *pr;
1269 unsigned int len;
1270 struct socket *so;
1271 struct inpcb *inp;
1272 struct solist *solisthead = NULL, *solist = NULL;
1273
1274 if ((domain != PF_INET) && (domain != PF_INET6))
1275 return (ENOTSUP);
1276
1277 pr = pffindproto(domain, protocol, type);
1278 if (pr == NULL)
1279 return (ENOENT);
1280
1281 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1282 filter->sf_handle == 0 || filter->sf_name == NULL)
1283 return (EINVAL);
1284
1285 /* Allocate the socket filter */
1286 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1287 M_IFADDR, M_WAITOK);
1288 if (sock_filt == NULL) {
1289 return (ENOBUFS);
1290 }
1291
1292 bzero(sock_filt, sizeof (*sock_filt));
1293
1294 /* Legacy sflt_filter length; current structure minus extended */
1295 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1296 /*
1297 * Include extended fields if filter defines SFLT_EXTENDED.
1298 * We've zeroed out our internal sflt_filter placeholder,
1299 * so any unused portion would have been taken care of.
1300 */
1301 if (filter->sf_flags & SFLT_EXTENDED) {
1302 unsigned int ext_len = filter->sf_len;
1303
1304 if (ext_len > sizeof (struct sflt_filter_ext))
1305 ext_len = sizeof (struct sflt_filter_ext);
1306
1307 len += ext_len;
1308 }
1309 bcopy(filter, &sock_filt->sf_filter, len);
1310
1311 lck_rw_lock_exclusive(sock_filter_lock);
1312 /* Look for an existing entry */
1313 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1314 if (match->sf_filter.sf_handle ==
1315 sock_filt->sf_filter.sf_handle) {
1316 break;
1317 }
1318 }
1319
1320 /* Add the entry only if there was no existing entry */
1321 if (match == NULL) {
1322 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1323 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1324 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1325 sf_protosw_next);
1326 sock_filt->sf_proto = pr;
1327 }
1328 sflt_retain_locked(sock_filt);
1329
1330 OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count);
1331 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total);
1332 if (is_internal) {
1333 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total);
1334 }
1335 }
1336 lck_rw_unlock_exclusive(sock_filter_lock);
1337
1338 if (match != NULL) {
1339 FREE(sock_filt, M_IFADDR);
1340 return (EEXIST);
1341 }
1342
1343 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY))
1344 return (error);
1345
1346 /*
1347 * Setup the filter on the TCP and UDP sockets already created.
1348 */
1349 #define SOLIST_ADD(_so) do { \
1350 solist->next = solisthead; \
1351 sock_retain((_so)); \
1352 solist->so = (_so); \
1353 solisthead = solist; \
1354 } while (0)
1355 if (protocol == IPPROTO_TCP) {
1356 lck_rw_lock_shared(tcbinfo.ipi_lock);
1357 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1358 so = inp->inp_socket;
1359 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1360 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1361 (so->so_state & SS_NOFDREF)) ||
1362 !SOCK_CHECK_DOM(so, domain) ||
1363 !SOCK_CHECK_TYPE(so, type))
1364 continue;
1365 MALLOC(solist, struct solist *, sizeof (*solist),
1366 M_IFADDR, M_NOWAIT);
1367 if (!solist)
1368 continue;
1369 SOLIST_ADD(so);
1370 }
1371 lck_rw_done(tcbinfo.ipi_lock);
1372 } else if (protocol == IPPROTO_UDP) {
1373 lck_rw_lock_shared(udbinfo.ipi_lock);
1374 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1375 so = inp->inp_socket;
1376 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1377 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1378 (so->so_state & SS_NOFDREF)) ||
1379 !SOCK_CHECK_DOM(so, domain) ||
1380 !SOCK_CHECK_TYPE(so, type))
1381 continue;
1382 MALLOC(solist, struct solist *, sizeof (*solist),
1383 M_IFADDR, M_NOWAIT);
1384 if (!solist)
1385 continue;
1386 SOLIST_ADD(so);
1387 }
1388 lck_rw_done(udbinfo.ipi_lock);
1389 }
1390 /* XXX it's possible to walk the raw socket list as well */
1391 #undef SOLIST_ADD
1392
1393 while (solisthead) {
1394 sflt_handle handle = filter->sf_handle;
1395
1396 so = solisthead->so;
1397 socket_lock(so, 0);
1398 sflt_initsock(so);
1399 if (so->so_state & SS_ISCONNECTING)
1400 sflt_notify_after_register(so, sock_evt_connecting,
1401 handle);
1402 else if (so->so_state & SS_ISCONNECTED)
1403 sflt_notify_after_register(so, sock_evt_connected,
1404 handle);
1405 else if ((so->so_state &
1406 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) ==
1407 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE))
1408 sflt_notify_after_register(so, sock_evt_disconnecting,
1409 handle);
1410 else if ((so->so_state &
1411 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) ==
1412 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED))
1413 sflt_notify_after_register(so, sock_evt_disconnected,
1414 handle);
1415 else if (so->so_state & SS_CANTSENDMORE)
1416 sflt_notify_after_register(so, sock_evt_cantsendmore,
1417 handle);
1418 else if (so->so_state & SS_CANTRCVMORE)
1419 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1420 handle);
1421 socket_unlock(so, 0);
1422 /* XXX no easy way to post the sock_evt_closing event */
1423 sock_release(so);
1424 solist = solisthead;
1425 solisthead = solisthead->next;
1426 FREE(solist, M_IFADDR);
1427 }
1428
1429 return (error);
1430 }
1431
1432 errno_t
1433 sflt_register_internal(const struct sflt_filter *filter, int domain, int type,
1434 int protocol)
1435 {
1436 return (sflt_register_common(filter, domain, type, protocol, true));
1437 }
1438
1439 errno_t
1440 sflt_register(const struct sflt_filter *filter, int domain, int type,
1441 int protocol)
1442 {
1443 return (sflt_register_common(filter, domain, type, protocol, false));
1444 }
1445
1446 errno_t
1447 sflt_unregister(sflt_handle handle)
1448 {
1449 struct socket_filter *filter;
1450 lck_rw_lock_exclusive(sock_filter_lock);
1451
1452 /* Find the entry by the handle */
1453 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1454 if (filter->sf_filter.sf_handle == handle)
1455 break;
1456 }
1457
1458 if (filter) {
1459 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0);
1460
1461 /* Remove it from the global list */
1462 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1463
1464 /* Remove it from the protosw list */
1465 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1466 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1467 filter, sf_protosw_next);
1468 }
1469
1470 /* Detach from any sockets */
1471 struct socket_filter_entry *entry = NULL;
1472
1473 for (entry = filter->sf_entry_head; entry;
1474 entry = entry->sfe_next_onfilter) {
1475 sflt_detach_locked(entry);
1476 }
1477
1478 /* Release the filter */
1479 sflt_release_locked(filter);
1480 }
1481
1482 lck_rw_unlock_exclusive(sock_filter_lock);
1483
1484 if (filter == NULL)
1485 return (ENOENT);
1486
1487 return (0);
1488 }
1489
1490 errno_t
1491 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1492 mbuf_t control, sflt_data_flag_t flags)
1493 {
1494 int error = 0;
1495
1496 if (so == NULL || data == NULL)
1497 return (EINVAL);
1498
1499 if (flags & sock_data_filt_flag_oob) {
1500 return (ENOTSUP);
1501 }
1502
1503 socket_lock(so, 1);
1504
1505 /* reject if this is a subflow socket */
1506 if (so->so_flags & SOF_MP_SUBFLOW) {
1507 error = ENOTSUP;
1508 goto done;
1509 }
1510
1511 if (from) {
1512 if (sbappendaddr(&so->so_rcv,
1513 (struct sockaddr *)(uintptr_t)from, data, control, NULL))
1514 sorwakeup(so);
1515 goto done;
1516 }
1517
1518 if (control) {
1519 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1520 sorwakeup(so);
1521 goto done;
1522 }
1523
1524 if (flags & sock_data_filt_flag_record) {
1525 if (control || from) {
1526 error = EINVAL;
1527 goto done;
1528 }
1529 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data))
1530 sorwakeup(so);
1531 goto done;
1532 }
1533
1534 if (sbappend(&so->so_rcv, data))
1535 sorwakeup(so);
1536 done:
1537 socket_unlock(so, 1);
1538 return (error);
1539 }
1540
1541 errno_t
1542 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1543 mbuf_t control, sflt_data_flag_t flags)
1544 {
1545 int sosendflags = 0;
1546
1547 /* reject if this is a subflow socket */
1548 if (so->so_flags & SOF_MP_SUBFLOW)
1549 return (ENOTSUP);
1550
1551 if (flags & sock_data_filt_flag_oob)
1552 sosendflags = MSG_OOB;
1553 return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1554 data, control, sosendflags));
1555 }
1556
1557 sockopt_dir
1558 sockopt_direction(sockopt_t sopt)
1559 {
1560 return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set);
1561 }
1562
1563 int
1564 sockopt_level(sockopt_t sopt)
1565 {
1566 return (sopt->sopt_level);
1567 }
1568
1569 int
1570 sockopt_name(sockopt_t sopt)
1571 {
1572 return (sopt->sopt_name);
1573 }
1574
1575 size_t
1576 sockopt_valsize(sockopt_t sopt)
1577 {
1578 return (sopt->sopt_valsize);
1579 }
1580
1581 errno_t
1582 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1583 {
1584 return (sooptcopyin(sopt, data, len, len));
1585 }
1586
1587 errno_t
1588 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1589 {
1590 return (sooptcopyout(sopt, data, len));
1591 }