]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
e4179c09ed988be42a60cf61f1cb5c4eb95c8cd1
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
50
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
53
54 #include <string.h>
55
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
59
60 struct socket_filter_entry {
61 struct socket_filter_entry *sfe_next_onsocket;
62 struct socket_filter_entry *sfe_next_onfilter;
63 struct socket_filter_entry *sfe_next_oncleanup;
64
65 struct socket_filter *sfe_filter;
66 struct socket *sfe_socket;
67 void *sfe_cookie;
68
69 uint32_t sfe_flags;
70 int32_t sfe_refcount;
71 };
72
73 struct socket_filter {
74 TAILQ_ENTRY(socket_filter) sf_protosw_next;
75 TAILQ_ENTRY(socket_filter) sf_global_next;
76 struct socket_filter_entry *sf_entry_head;
77
78 struct protosw *sf_proto;
79 struct sflt_filter sf_filter;
80 u_int32_t sf_refcount;
81 };
82
83 TAILQ_HEAD(socket_filter_list, socket_filter);
84
85 static struct socket_filter_list sock_filter_head;
86 static lck_rw_t *sock_filter_lock = NULL;
87 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
88 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
89 static thread_t sock_filter_cleanup_thread = NULL;
90
91 static void sflt_cleanup_thread(void *, wait_result_t);
92 static void sflt_detach_locked(struct socket_filter_entry *entry);
93
94 #pragma mark -- Internal State Management --
95
96 __private_extern__ void
97 sflt_init(void)
98 {
99 lck_grp_attr_t *grp_attrib = NULL;
100 lck_attr_t *lck_attrib = NULL;
101 lck_grp_t *lck_group = NULL;
102
103 TAILQ_INIT(&sock_filter_head);
104
105 /* Allocate a rw lock */
106 grp_attrib = lck_grp_attr_alloc_init();
107 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
108 lck_grp_attr_free(grp_attrib);
109 lck_attrib = lck_attr_alloc_init();
110 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
111 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
112 lck_grp_free(lck_group);
113 lck_attr_free(lck_attrib);
114 }
115
116 static void
117 sflt_retain_locked(struct socket_filter *filter)
118 {
119 filter->sf_refcount++;
120 }
121
122 static void
123 sflt_release_locked(struct socket_filter *filter)
124 {
125 filter->sf_refcount--;
126 if (filter->sf_refcount == 0) {
127 /* Call the unregistered function */
128 if (filter->sf_filter.sf_unregistered) {
129 lck_rw_unlock_exclusive(sock_filter_lock);
130 filter->sf_filter.sf_unregistered(
131 filter->sf_filter.sf_handle);
132 lck_rw_lock_exclusive(sock_filter_lock);
133 }
134
135 /* Free the entry */
136 FREE(filter, M_IFADDR);
137 }
138 }
139
140 static void
141 sflt_entry_retain(struct socket_filter_entry *entry)
142 {
143 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
144 panic("sflt_entry_retain - sfe_refcount <= 0\n");
145 /* NOTREACHED */
146 }
147 }
148
149 static void
150 sflt_entry_release(struct socket_filter_entry *entry)
151 {
152 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
153 if (old == 1) {
154 /* That was the last reference */
155
156 /* Take the cleanup lock */
157 lck_mtx_lock(sock_filter_cleanup_lock);
158
159 /* Put this item on the cleanup list */
160 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
161 sock_filter_cleanup_entries = entry;
162
163 /* If the item is the first item in the list */
164 if (entry->sfe_next_oncleanup == NULL) {
165 if (sock_filter_cleanup_thread == NULL) {
166 /* Create a thread */
167 kernel_thread_start(sflt_cleanup_thread,
168 NULL, &sock_filter_cleanup_thread);
169 } else {
170 /* Wakeup the thread */
171 wakeup(&sock_filter_cleanup_entries);
172 }
173 }
174
175 /* Drop the cleanup lock */
176 lck_mtx_unlock(sock_filter_cleanup_lock);
177 } else if (old <= 0) {
178 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
179 (int)old);
180 /* NOTREACHED */
181 }
182 }
183
184 __attribute__((noreturn))
185 static void
186 sflt_cleanup_thread(void *blah, wait_result_t blah2)
187 {
188 #pragma unused(blah, blah2)
189 while (1) {
190 lck_mtx_lock(sock_filter_cleanup_lock);
191 while (sock_filter_cleanup_entries == NULL) {
192 /* Sleep until we've got something better to do */
193 msleep(&sock_filter_cleanup_entries,
194 sock_filter_cleanup_lock, PWAIT,
195 "sflt_cleanup", NULL);
196 }
197
198 /* Pull the current list of dead items */
199 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
200 sock_filter_cleanup_entries = NULL;
201
202 /* Drop the lock */
203 lck_mtx_unlock(sock_filter_cleanup_lock);
204
205 /* Take the socket filter lock */
206 lck_rw_lock_exclusive(sock_filter_lock);
207
208 /* Cleanup every dead item */
209 struct socket_filter_entry *entry;
210 for (entry = dead; entry; entry = dead) {
211 struct socket_filter_entry **nextpp;
212
213 dead = entry->sfe_next_oncleanup;
214
215 /* Call detach function if necessary - drop the lock */
216 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
217 entry->sfe_filter->sf_filter.sf_detach) {
218 entry->sfe_flags |= SFEF_NODETACH;
219 lck_rw_unlock_exclusive(sock_filter_lock);
220
221 /*
222 * Warning - passing a potentially
223 * dead socket may be bad
224 */
225 entry->sfe_filter->sf_filter. sf_detach(
226 entry->sfe_cookie, entry->sfe_socket);
227
228 lck_rw_lock_exclusive(sock_filter_lock);
229 }
230
231 /*
232 * Pull entry off the socket list --
233 * if the socket still exists
234 */
235 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
236 for (nextpp = &entry->sfe_socket->so_filt;
237 *nextpp;
238 nextpp = &(*nextpp)->sfe_next_onsocket) {
239 if (*nextpp == entry) {
240 *nextpp =
241 entry->sfe_next_onsocket;
242 break;
243 }
244 }
245 }
246
247 /* Pull entry off the filter list */
248 for (nextpp = &entry->sfe_filter->sf_entry_head;
249 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
250 if (*nextpp == entry) {
251 *nextpp = entry->sfe_next_onfilter;
252 break;
253 }
254 }
255
256 /*
257 * Release the filter -- may drop lock, but that's okay
258 */
259 sflt_release_locked(entry->sfe_filter);
260 entry->sfe_socket = NULL;
261 entry->sfe_filter = NULL;
262 FREE(entry, M_IFADDR);
263 }
264
265 /* Drop the socket filter lock */
266 lck_rw_unlock_exclusive(sock_filter_lock);
267 }
268 /* NOTREACHED */
269 }
270
271 static int
272 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
273 int socklocked)
274 {
275 int error = 0;
276 struct socket_filter_entry *entry = NULL;
277
278 if (filter == NULL)
279 return (ENOENT);
280
281 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
282 if (entry->sfe_filter->sf_filter.sf_handle ==
283 filter->sf_filter.sf_handle)
284 return (EEXIST);
285 }
286 /* allocate the socket filter entry */
287 MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR,
288 M_WAITOK);
289 if (entry == NULL)
290 return (ENOMEM);
291
292 /* Initialize the socket filter entry */
293 entry->sfe_cookie = NULL;
294 entry->sfe_flags = SFEF_ATTACHED;
295 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
296
297 /* Put the entry in the filter list */
298 sflt_retain_locked(filter);
299 entry->sfe_filter = filter;
300 entry->sfe_next_onfilter = filter->sf_entry_head;
301 filter->sf_entry_head = entry;
302
303 /* Put the entry on the socket filter list */
304 entry->sfe_socket = so;
305 entry->sfe_next_onsocket = so->so_filt;
306 so->so_filt = entry;
307
308 if (entry->sfe_filter->sf_filter.sf_attach) {
309 /* Retain the entry while we call attach */
310 sflt_entry_retain(entry);
311
312 /*
313 * Release the filter lock --
314 * callers must be aware we will do this
315 */
316 lck_rw_unlock_exclusive(sock_filter_lock);
317
318 /* Unlock the socket */
319 if (socklocked)
320 socket_unlock(so, 0);
321
322 /* It's finally safe to call the filter function */
323 error = entry->sfe_filter->sf_filter.sf_attach(
324 &entry->sfe_cookie, so);
325
326 /* Lock the socket again */
327 if (socklocked)
328 socket_lock(so, 0);
329
330 /* Lock the filters again */
331 lck_rw_lock_exclusive(sock_filter_lock);
332
333 /*
334 * If the attach function returns an error,
335 * this filter must be detached
336 */
337 if (error) {
338 /* don't call sf_detach */
339 entry->sfe_flags |= SFEF_NODETACH;
340 sflt_detach_locked(entry);
341 }
342
343 /* Release the retain we held through the attach call */
344 sflt_entry_release(entry);
345 }
346
347 return (error);
348 }
349
350 errno_t
351 sflt_attach_internal(socket_t socket, sflt_handle handle)
352 {
353 if (socket == NULL || handle == 0)
354 return (EINVAL);
355
356 int result = EINVAL;
357
358 lck_rw_lock_exclusive(sock_filter_lock);
359
360 struct socket_filter *filter = NULL;
361 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
362 if (filter->sf_filter.sf_handle == handle) break;
363 }
364
365 if (filter) {
366 result = sflt_attach_locked(socket, filter, 1);
367 }
368
369 lck_rw_unlock_exclusive(sock_filter_lock);
370
371 return (result);
372 }
373
374 static void
375 sflt_detach_locked(struct socket_filter_entry *entry)
376 {
377 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
378 entry->sfe_flags &= ~SFEF_ATTACHED;
379 sflt_entry_release(entry);
380 }
381 }
382
383 #pragma mark -- Socket Layer Hooks --
384
385 __private_extern__ void
386 sflt_initsock(struct socket *so)
387 {
388 /*
389 * Point to the real protosw, as so_proto might have been
390 * pointed to a modified version.
391 */
392 struct protosw *proto = so->so_proto->pr_protosw;
393
394 lck_rw_lock_shared(sock_filter_lock);
395 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
396 /* Promote lock to exclusive */
397 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
398 lck_rw_lock_exclusive(sock_filter_lock);
399
400 /*
401 * Warning: A filter unregistering will be pulled out of
402 * the list. This could happen while we drop the lock in
403 * sftl_attach_locked or sflt_release_locked. For this
404 * reason we retain a reference on the filter (or next_filter)
405 * while calling this function. This protects us from a panic,
406 * but it could result in a socket being created without all
407 * of the global filters if we're attaching a filter as it
408 * is removed, if that's possible.
409 */
410 struct socket_filter *filter =
411 TAILQ_FIRST(&proto->pr_filter_head);
412
413 sflt_retain_locked(filter);
414
415 while (filter) {
416 struct socket_filter *filter_next;
417 /*
418 * Warning: sflt_attach_private_locked
419 * will drop the lock
420 */
421 sflt_attach_locked(so, filter, 0);
422
423 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
424 if (filter_next)
425 sflt_retain_locked(filter_next);
426
427 /*
428 * Warning: filt_release_locked may remove
429 * the filter from the queue
430 */
431 sflt_release_locked(filter);
432 filter = filter_next;
433 }
434 }
435 lck_rw_done(sock_filter_lock);
436 }
437
438 /*
439 * sflt_termsock
440 *
441 * Detaches all filters from the socket.
442 */
443 __private_extern__ void
444 sflt_termsock(struct socket *so)
445 {
446 lck_rw_lock_exclusive(sock_filter_lock);
447
448 struct socket_filter_entry *entry;
449
450 while ((entry = so->so_filt) != NULL) {
451 /* Pull filter off the socket */
452 so->so_filt = entry->sfe_next_onsocket;
453 entry->sfe_flags |= SFEF_NOSOCKET;
454
455 /* Call detach */
456 sflt_detach_locked(entry);
457
458 /*
459 * On sflt_termsock, we can't return until the detach function
460 * has been called. Call the detach function - this is gross
461 * because the socket filter entry could be freed when we drop
462 * the lock, so we make copies on the stack and retain
463 * everything we need before dropping the lock.
464 */
465 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
466 entry->sfe_filter->sf_filter.sf_detach) {
467 void *sfe_cookie = entry->sfe_cookie;
468 struct socket_filter *sfe_filter = entry->sfe_filter;
469
470 /* Retain the socket filter */
471 sflt_retain_locked(sfe_filter);
472
473 /* Mark that we've called the detach function */
474 entry->sfe_flags |= SFEF_NODETACH;
475
476 /* Drop the lock before calling the detach function */
477 lck_rw_unlock_exclusive(sock_filter_lock);
478 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
479 lck_rw_lock_exclusive(sock_filter_lock);
480
481 /* Release the filter */
482 sflt_release_locked(sfe_filter);
483 }
484 }
485
486 lck_rw_unlock_exclusive(sock_filter_lock);
487 }
488
489
490 static void
491 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
492 sflt_handle handle)
493 {
494 if (so->so_filt == NULL)
495 return;
496
497 struct socket_filter_entry *entry;
498 int unlocked = 0;
499
500 lck_rw_lock_shared(sock_filter_lock);
501 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
502 if ((entry->sfe_flags & SFEF_ATTACHED) &&
503 entry->sfe_filter->sf_filter.sf_notify &&
504 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
505 handle) || !handle)) {
506 /*
507 * Retain the filter entry and release
508 * the socket filter lock
509 */
510 sflt_entry_retain(entry);
511 lck_rw_unlock_shared(sock_filter_lock);
512
513 /* If the socket isn't already unlocked, unlock it */
514 if (unlocked == 0) {
515 unlocked = 1;
516 socket_unlock(so, 0);
517 }
518
519 /* Finally call the filter */
520 entry->sfe_filter->sf_filter.sf_notify(
521 entry->sfe_cookie, so, event, param);
522
523 /*
524 * Take the socket filter lock again
525 * and release the entry
526 */
527 lck_rw_lock_shared(sock_filter_lock);
528 sflt_entry_release(entry);
529 }
530 }
531 lck_rw_unlock_shared(sock_filter_lock);
532
533 if (unlocked != 0) {
534 socket_lock(so, 0);
535 }
536 }
537
538 __private_extern__ void
539 sflt_notify(struct socket *so, sflt_event_t event, void *param)
540 {
541 sflt_notify_internal(so, event, param, 0);
542 }
543
544 static void
545 sflt_notify_after_register(struct socket *so, sflt_event_t event,
546 sflt_handle handle)
547 {
548 sflt_notify_internal(so, event, NULL, handle);
549 }
550
551 __private_extern__ int
552 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
553 {
554 if (so->so_filt == NULL)
555 return (0);
556
557 struct socket_filter_entry *entry;
558 int unlocked = 0;
559 int error = 0;
560
561 lck_rw_lock_shared(sock_filter_lock);
562 for (entry = so->so_filt; entry && error == 0;
563 entry = entry->sfe_next_onsocket) {
564 if ((entry->sfe_flags & SFEF_ATTACHED) &&
565 entry->sfe_filter->sf_filter.sf_ioctl) {
566 /*
567 * Retain the filter entry and release
568 * the socket filter lock
569 */
570 sflt_entry_retain(entry);
571 lck_rw_unlock_shared(sock_filter_lock);
572
573 /* If the socket isn't already unlocked, unlock it */
574 if (unlocked == 0) {
575 socket_unlock(so, 0);
576 unlocked = 1;
577 }
578
579 /* Call the filter */
580 error = entry->sfe_filter->sf_filter.sf_ioctl(
581 entry->sfe_cookie, so, cmd, data);
582
583 /*
584 * Take the socket filter lock again
585 * and release the entry
586 */
587 lck_rw_lock_shared(sock_filter_lock);
588 sflt_entry_release(entry);
589 }
590 }
591 lck_rw_unlock_shared(sock_filter_lock);
592
593 if (unlocked) {
594 socket_lock(so, 0);
595 }
596
597 return (error);
598 }
599
600 __private_extern__ int
601 sflt_bind(struct socket *so, const struct sockaddr *nam)
602 {
603 if (so->so_filt == NULL)
604 return (0);
605
606 struct socket_filter_entry *entry;
607 int unlocked = 0;
608 int error = 0;
609
610 lck_rw_lock_shared(sock_filter_lock);
611 for (entry = so->so_filt; entry && error == 0;
612 entry = entry->sfe_next_onsocket) {
613 if ((entry->sfe_flags & SFEF_ATTACHED) &&
614 entry->sfe_filter->sf_filter.sf_bind) {
615 /*
616 * Retain the filter entry and
617 * release the socket filter lock
618 */
619 sflt_entry_retain(entry);
620 lck_rw_unlock_shared(sock_filter_lock);
621
622 /* If the socket isn't already unlocked, unlock it */
623 if (unlocked == 0) {
624 socket_unlock(so, 0);
625 unlocked = 1;
626 }
627
628 /* Call the filter */
629 error = entry->sfe_filter->sf_filter.sf_bind(
630 entry->sfe_cookie, so, nam);
631
632 /*
633 * Take the socket filter lock again and
634 * release the entry
635 */
636 lck_rw_lock_shared(sock_filter_lock);
637 sflt_entry_release(entry);
638 }
639 }
640 lck_rw_unlock_shared(sock_filter_lock);
641
642 if (unlocked) {
643 socket_lock(so, 0);
644 }
645
646 return (error);
647 }
648
649 __private_extern__ int
650 sflt_listen(struct socket *so)
651 {
652 if (so->so_filt == NULL)
653 return (0);
654
655 struct socket_filter_entry *entry;
656 int unlocked = 0;
657 int error = 0;
658
659 lck_rw_lock_shared(sock_filter_lock);
660 for (entry = so->so_filt; entry && error == 0;
661 entry = entry->sfe_next_onsocket) {
662 if ((entry->sfe_flags & SFEF_ATTACHED) &&
663 entry->sfe_filter->sf_filter.sf_listen) {
664 /*
665 * Retain the filter entry and release
666 * the socket filter lock
667 */
668 sflt_entry_retain(entry);
669 lck_rw_unlock_shared(sock_filter_lock);
670
671 /* If the socket isn't already unlocked, unlock it */
672 if (unlocked == 0) {
673 socket_unlock(so, 0);
674 unlocked = 1;
675 }
676
677 /* Call the filter */
678 error = entry->sfe_filter->sf_filter.sf_listen(
679 entry->sfe_cookie, so);
680
681 /*
682 * Take the socket filter lock again
683 * and release the entry
684 */
685 lck_rw_lock_shared(sock_filter_lock);
686 sflt_entry_release(entry);
687 }
688 }
689 lck_rw_unlock_shared(sock_filter_lock);
690
691 if (unlocked) {
692 socket_lock(so, 0);
693 }
694
695 return (error);
696 }
697
698 __private_extern__ int
699 sflt_accept(struct socket *head, struct socket *so,
700 const struct sockaddr *local, const struct sockaddr *remote)
701 {
702 if (so->so_filt == NULL)
703 return (0);
704
705 struct socket_filter_entry *entry;
706 int unlocked = 0;
707 int error = 0;
708
709 lck_rw_lock_shared(sock_filter_lock);
710 for (entry = so->so_filt; entry && error == 0;
711 entry = entry->sfe_next_onsocket) {
712 if ((entry->sfe_flags & SFEF_ATTACHED) &&
713 entry->sfe_filter->sf_filter.sf_accept) {
714 /*
715 * Retain the filter entry and
716 * release the socket filter lock
717 */
718 sflt_entry_retain(entry);
719 lck_rw_unlock_shared(sock_filter_lock);
720
721 /* If the socket isn't already unlocked, unlock it */
722 if (unlocked == 0) {
723 socket_unlock(so, 0);
724 unlocked = 1;
725 }
726
727 /* Call the filter */
728 error = entry->sfe_filter->sf_filter.sf_accept(
729 entry->sfe_cookie, head, so, local, remote);
730
731 /*
732 * Take the socket filter lock again
733 * and release the entry
734 */
735 lck_rw_lock_shared(sock_filter_lock);
736 sflt_entry_release(entry);
737 }
738 }
739 lck_rw_unlock_shared(sock_filter_lock);
740
741 if (unlocked) {
742 socket_lock(so, 0);
743 }
744
745 return (error);
746 }
747
748 __private_extern__ int
749 sflt_getsockname(struct socket *so, struct sockaddr **local)
750 {
751 if (so->so_filt == NULL)
752 return (0);
753
754 struct socket_filter_entry *entry;
755 int unlocked = 0;
756 int error = 0;
757
758 lck_rw_lock_shared(sock_filter_lock);
759 for (entry = so->so_filt; entry && error == 0;
760 entry = entry->sfe_next_onsocket) {
761 if ((entry->sfe_flags & SFEF_ATTACHED) &&
762 entry->sfe_filter->sf_filter.sf_getsockname) {
763 /*
764 * Retain the filter entry and
765 * release the socket filter lock
766 */
767 sflt_entry_retain(entry);
768 lck_rw_unlock_shared(sock_filter_lock);
769
770 /* If the socket isn't already unlocked, unlock it */
771 if (unlocked == 0) {
772 socket_unlock(so, 0);
773 unlocked = 1;
774 }
775
776 /* Call the filter */
777 error = entry->sfe_filter->sf_filter.sf_getsockname(
778 entry->sfe_cookie, so, local);
779
780 /*
781 * Take the socket filter lock again
782 * and release the entry
783 */
784 lck_rw_lock_shared(sock_filter_lock);
785 sflt_entry_release(entry);
786 }
787 }
788 lck_rw_unlock_shared(sock_filter_lock);
789
790 if (unlocked) {
791 socket_lock(so, 0);
792 }
793
794 return (error);
795 }
796
797 __private_extern__ int
798 sflt_getpeername(struct socket *so, struct sockaddr **remote)
799 {
800 if (so->so_filt == NULL)
801 return (0);
802
803 struct socket_filter_entry *entry;
804 int unlocked = 0;
805 int error = 0;
806
807 lck_rw_lock_shared(sock_filter_lock);
808 for (entry = so->so_filt; entry && error == 0;
809 entry = entry->sfe_next_onsocket) {
810 if ((entry->sfe_flags & SFEF_ATTACHED) &&
811 entry->sfe_filter->sf_filter.sf_getpeername) {
812 /*
813 * Retain the filter entry and release
814 * the socket filter lock
815 */
816 sflt_entry_retain(entry);
817 lck_rw_unlock_shared(sock_filter_lock);
818
819 /* If the socket isn't already unlocked, unlock it */
820 if (unlocked == 0) {
821 socket_unlock(so, 0);
822 unlocked = 1;
823 }
824
825 /* Call the filter */
826 error = entry->sfe_filter->sf_filter.sf_getpeername(
827 entry->sfe_cookie, so, remote);
828
829 /*
830 * Take the socket filter lock again
831 * and release the entry
832 */
833 lck_rw_lock_shared(sock_filter_lock);
834 sflt_entry_release(entry);
835 }
836 }
837 lck_rw_unlock_shared(sock_filter_lock);
838
839 if (unlocked) {
840 socket_lock(so, 0);
841 }
842
843 return (error);
844 }
845
846 __private_extern__ int
847 sflt_connectin(struct socket *so, const struct sockaddr *remote)
848 {
849 if (so->so_filt == NULL)
850 return (0);
851
852 struct socket_filter_entry *entry;
853 int unlocked = 0;
854 int error = 0;
855
856 lck_rw_lock_shared(sock_filter_lock);
857 for (entry = so->so_filt; entry && error == 0;
858 entry = entry->sfe_next_onsocket) {
859 if ((entry->sfe_flags & SFEF_ATTACHED) &&
860 entry->sfe_filter->sf_filter.sf_connect_in) {
861 /*
862 * Retain the filter entry and release
863 * the socket filter lock
864 */
865 sflt_entry_retain(entry);
866 lck_rw_unlock_shared(sock_filter_lock);
867
868 /* If the socket isn't already unlocked, unlock it */
869 if (unlocked == 0) {
870 socket_unlock(so, 0);
871 unlocked = 1;
872 }
873
874 /* Call the filter */
875 error = entry->sfe_filter->sf_filter.sf_connect_in(
876 entry->sfe_cookie, so, remote);
877
878 /*
879 * Take the socket filter lock again
880 * and release the entry
881 */
882 lck_rw_lock_shared(sock_filter_lock);
883 sflt_entry_release(entry);
884 }
885 }
886 lck_rw_unlock_shared(sock_filter_lock);
887
888 if (unlocked) {
889 socket_lock(so, 0);
890 }
891
892 return (error);
893 }
894
895 static int
896 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
897 {
898 struct socket_filter_entry *entry;
899 int unlocked = 0;
900 int error = 0;
901
902 lck_rw_lock_shared(sock_filter_lock);
903 for (entry = so->so_filt; entry && error == 0;
904 entry = entry->sfe_next_onsocket) {
905 if ((entry->sfe_flags & SFEF_ATTACHED) &&
906 entry->sfe_filter->sf_filter.sf_connect_out) {
907 /*
908 * Retain the filter entry and release
909 * the socket filter lock
910 */
911 sflt_entry_retain(entry);
912 lck_rw_unlock_shared(sock_filter_lock);
913
914 /* If the socket isn't already unlocked, unlock it */
915 if (unlocked == 0) {
916 socket_unlock(so, 0);
917 unlocked = 1;
918 }
919
920 /* Call the filter */
921 error = entry->sfe_filter->sf_filter.sf_connect_out(
922 entry->sfe_cookie, so, nam);
923
924 /*
925 * Take the socket filter lock again
926 * and release the entry
927 */
928 lck_rw_lock_shared(sock_filter_lock);
929 sflt_entry_release(entry);
930 }
931 }
932 lck_rw_unlock_shared(sock_filter_lock);
933
934 if (unlocked) {
935 socket_lock(so, 0);
936 }
937
938 return (error);
939 }
940
941 __private_extern__ int
942 sflt_connectout(struct socket *so, const struct sockaddr *nam)
943 {
944 char buf[SOCK_MAXADDRLEN];
945 struct sockaddr *sa;
946 int error;
947
948 if (so->so_filt == NULL)
949 return (0);
950
951 /*
952 * Workaround for rdar://23362120
953 * Always pass a buffer that can hold an IPv6 socket address
954 */
955 bzero(buf, sizeof (buf));
956 bcopy(nam, buf, nam->sa_len);
957 sa = (struct sockaddr *)buf;
958
959 error = sflt_connectout_common(so, sa);
960 if (error != 0)
961 return (error);
962
963 /*
964 * If the address was modified, copy it back
965 */
966 if (bcmp(sa, nam, nam->sa_len) != 0) {
967 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
968 }
969
970 return (0);
971 }
972
973 __private_extern__ int
974 sflt_connectxout(struct socket *so, struct sockaddr_list **dst_sl0)
975 {
976 struct sockaddr_list *dst_sl;
977 struct sockaddr_entry *se, *tse;
978 int modified = 0;
979 int error = 0;
980
981 if (so->so_filt == NULL)
982 return (0);
983
984 /* make a copy as sflt_connectout() releases socket lock */
985 dst_sl = sockaddrlist_dup(*dst_sl0, M_WAITOK);
986 if (dst_sl == NULL)
987 return (ENOBUFS);
988
989 /*
990 * Hmm; we don't yet have a connectx socket filter callback,
991 * so the closest thing to do is to probably call sflt_connectout()
992 * as many times as there are addresses in the list, and bail
993 * as soon as we get an error.
994 */
995 TAILQ_FOREACH_SAFE(se, &dst_sl->sl_head, se_link, tse) {
996 char buf[SOCK_MAXADDRLEN];
997 struct sockaddr *sa;
998
999 VERIFY(se->se_addr != NULL);
1000
1001 /*
1002 * Workaround for rdar://23362120
1003 * Always pass a buffer that can hold an IPv6 socket address
1004 */
1005 bzero(buf, sizeof (buf));
1006 bcopy(se->se_addr, buf, se->se_addr->sa_len);
1007 sa = (struct sockaddr *)buf;
1008
1009 error = sflt_connectout_common(so, sa);
1010 if (error != 0)
1011 break;
1012
1013 /*
1014 * If the address was modified, copy it back
1015 */
1016 if (bcmp(se->se_addr, sa, se->se_addr->sa_len) != 0) {
1017 bcopy(sa, se->se_addr, se->se_addr->sa_len);
1018 modified = 1;
1019 }
1020 }
1021
1022 if (error != 0 || !modified) {
1023 /* leave the original as is */
1024 sockaddrlist_free(dst_sl);
1025 } else {
1026 /*
1027 * At least one address was modified and there were no errors;
1028 * ditch the original and return the modified list.
1029 */
1030 sockaddrlist_free(*dst_sl0);
1031 *dst_sl0 = dst_sl;
1032 }
1033
1034 return (error);
1035 }
1036
1037 __private_extern__ int
1038 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1039 {
1040 if (so->so_filt == NULL)
1041 return (0);
1042
1043 struct socket_filter_entry *entry;
1044 int unlocked = 0;
1045 int error = 0;
1046
1047 lck_rw_lock_shared(sock_filter_lock);
1048 for (entry = so->so_filt; entry && error == 0;
1049 entry = entry->sfe_next_onsocket) {
1050 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1051 entry->sfe_filter->sf_filter.sf_setoption) {
1052 /*
1053 * Retain the filter entry and release
1054 * the socket filter lock
1055 */
1056 sflt_entry_retain(entry);
1057 lck_rw_unlock_shared(sock_filter_lock);
1058
1059 /* If the socket isn't already unlocked, unlock it */
1060 if (unlocked == 0) {
1061 socket_unlock(so, 0);
1062 unlocked = 1;
1063 }
1064
1065 /* Call the filter */
1066 error = entry->sfe_filter->sf_filter.sf_setoption(
1067 entry->sfe_cookie, so, sopt);
1068
1069 /*
1070 * Take the socket filter lock again
1071 * and release the entry
1072 */
1073 lck_rw_lock_shared(sock_filter_lock);
1074 sflt_entry_release(entry);
1075 }
1076 }
1077 lck_rw_unlock_shared(sock_filter_lock);
1078
1079 if (unlocked) {
1080 socket_lock(so, 0);
1081 }
1082
1083 return (error);
1084 }
1085
1086 __private_extern__ int
1087 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1088 {
1089 if (so->so_filt == NULL)
1090 return (0);
1091
1092 struct socket_filter_entry *entry;
1093 int unlocked = 0;
1094 int error = 0;
1095
1096 lck_rw_lock_shared(sock_filter_lock);
1097 for (entry = so->so_filt; entry && error == 0;
1098 entry = entry->sfe_next_onsocket) {
1099 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1100 entry->sfe_filter->sf_filter.sf_getoption) {
1101 /*
1102 * Retain the filter entry and release
1103 * the socket filter lock
1104 */
1105 sflt_entry_retain(entry);
1106 lck_rw_unlock_shared(sock_filter_lock);
1107
1108 /* If the socket isn't already unlocked, unlock it */
1109 if (unlocked == 0) {
1110 socket_unlock(so, 0);
1111 unlocked = 1;
1112 }
1113
1114 /* Call the filter */
1115 error = entry->sfe_filter->sf_filter.sf_getoption(
1116 entry->sfe_cookie, so, sopt);
1117
1118 /*
1119 * Take the socket filter lock again
1120 * and release the entry
1121 */
1122 lck_rw_lock_shared(sock_filter_lock);
1123 sflt_entry_release(entry);
1124 }
1125 }
1126 lck_rw_unlock_shared(sock_filter_lock);
1127
1128 if (unlocked) {
1129 socket_lock(so, 0);
1130 }
1131
1132 return (error);
1133 }
1134
1135 __private_extern__ int
1136 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1137 mbuf_t *control, sflt_data_flag_t flags)
1138 {
1139 if (so->so_filt == NULL)
1140 return (0);
1141
1142 struct socket_filter_entry *entry;
1143 int unlocked = 0;
1144 int setsendthread = 0;
1145 int error = 0;
1146
1147 lck_rw_lock_shared(sock_filter_lock);
1148 for (entry = so->so_filt; entry && error == 0;
1149 entry = entry->sfe_next_onsocket) {
1150 /* skip if this is a subflow socket */
1151 if (so->so_flags & SOF_MP_SUBFLOW)
1152 continue;
1153 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1154 entry->sfe_filter->sf_filter.sf_data_out) {
1155 /*
1156 * Retain the filter entry and
1157 * release the socket filter lock
1158 */
1159 sflt_entry_retain(entry);
1160 lck_rw_unlock_shared(sock_filter_lock);
1161
1162 /* If the socket isn't already unlocked, unlock it */
1163 if (unlocked == 0) {
1164 if (so->so_send_filt_thread == NULL) {
1165 setsendthread = 1;
1166 so->so_send_filt_thread =
1167 current_thread();
1168 }
1169 socket_unlock(so, 0);
1170 unlocked = 1;
1171 }
1172
1173 /* Call the filter */
1174 error = entry->sfe_filter->sf_filter.sf_data_out(
1175 entry->sfe_cookie, so, to, data, control, flags);
1176
1177 /*
1178 * Take the socket filter lock again
1179 * and release the entry
1180 */
1181 lck_rw_lock_shared(sock_filter_lock);
1182 sflt_entry_release(entry);
1183 }
1184 }
1185 lck_rw_unlock_shared(sock_filter_lock);
1186
1187 if (unlocked) {
1188 socket_lock(so, 0);
1189 if (setsendthread)
1190 so->so_send_filt_thread = NULL;
1191 }
1192
1193 return (error);
1194 }
1195
1196 __private_extern__ int
1197 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1198 mbuf_t *control, sflt_data_flag_t flags)
1199 {
1200 if (so->so_filt == NULL)
1201 return (0);
1202
1203 struct socket_filter_entry *entry;
1204 int error = 0;
1205 int unlocked = 0;
1206
1207 lck_rw_lock_shared(sock_filter_lock);
1208
1209 for (entry = so->so_filt; entry && (error == 0);
1210 entry = entry->sfe_next_onsocket) {
1211 /* skip if this is a subflow socket */
1212 if (so->so_flags & SOF_MP_SUBFLOW)
1213 continue;
1214 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1215 entry->sfe_filter->sf_filter.sf_data_in) {
1216 /*
1217 * Retain the filter entry and
1218 * release the socket filter lock
1219 */
1220 sflt_entry_retain(entry);
1221 lck_rw_unlock_shared(sock_filter_lock);
1222
1223 /* If the socket isn't already unlocked, unlock it */
1224 if (unlocked == 0) {
1225 unlocked = 1;
1226 socket_unlock(so, 0);
1227 }
1228
1229 /* Call the filter */
1230 error = entry->sfe_filter->sf_filter.sf_data_in(
1231 entry->sfe_cookie, so, from, data, control, flags);
1232
1233 /*
1234 * Take the socket filter lock again
1235 * and release the entry
1236 */
1237 lck_rw_lock_shared(sock_filter_lock);
1238 sflt_entry_release(entry);
1239 }
1240 }
1241 lck_rw_unlock_shared(sock_filter_lock);
1242
1243 if (unlocked) {
1244 socket_lock(so, 0);
1245 }
1246
1247 return (error);
1248 }
1249
1250 #pragma mark -- KPI --
1251
1252 errno_t
1253 sflt_attach(socket_t socket, sflt_handle handle)
1254 {
1255 socket_lock(socket, 1);
1256 errno_t result = sflt_attach_internal(socket, handle);
1257 socket_unlock(socket, 1);
1258 return (result);
1259 }
1260
1261 errno_t
1262 sflt_detach(socket_t socket, sflt_handle handle)
1263 {
1264 struct socket_filter_entry *entry;
1265 errno_t result = 0;
1266
1267 if (socket == NULL || handle == 0)
1268 return (EINVAL);
1269
1270 lck_rw_lock_exclusive(sock_filter_lock);
1271 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1272 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1273 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1274 break;
1275 }
1276 }
1277
1278 if (entry != NULL) {
1279 sflt_detach_locked(entry);
1280 }
1281 lck_rw_unlock_exclusive(sock_filter_lock);
1282
1283 return (result);
1284 }
1285
1286 struct solist {
1287 struct solist *next;
1288 struct socket *so;
1289 };
1290
1291 errno_t
1292 sflt_register(const struct sflt_filter *filter, int domain, int type,
1293 int protocol)
1294 {
1295 struct socket_filter *sock_filt = NULL;
1296 struct socket_filter *match = NULL;
1297 int error = 0;
1298 struct protosw *pr;
1299 unsigned int len;
1300 struct socket *so;
1301 struct inpcb *inp;
1302 struct solist *solisthead = NULL, *solist = NULL;
1303
1304 if ((domain != PF_INET) && (domain != PF_INET6))
1305 return (ENOTSUP);
1306
1307 pr = pffindproto(domain, protocol, type);
1308 if (pr == NULL)
1309 return (ENOENT);
1310
1311 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1312 filter->sf_handle == 0 || filter->sf_name == NULL)
1313 return (EINVAL);
1314
1315 /* Allocate the socket filter */
1316 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1317 M_IFADDR, M_WAITOK);
1318 if (sock_filt == NULL) {
1319 return (ENOBUFS);
1320 }
1321
1322 bzero(sock_filt, sizeof (*sock_filt));
1323
1324 /* Legacy sflt_filter length; current structure minus extended */
1325 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1326 /*
1327 * Include extended fields if filter defines SFLT_EXTENDED.
1328 * We've zeroed out our internal sflt_filter placeholder,
1329 * so any unused portion would have been taken care of.
1330 */
1331 if (filter->sf_flags & SFLT_EXTENDED) {
1332 unsigned int ext_len = filter->sf_len;
1333
1334 if (ext_len > sizeof (struct sflt_filter_ext))
1335 ext_len = sizeof (struct sflt_filter_ext);
1336
1337 len += ext_len;
1338 }
1339 bcopy(filter, &sock_filt->sf_filter, len);
1340
1341 lck_rw_lock_exclusive(sock_filter_lock);
1342 /* Look for an existing entry */
1343 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1344 if (match->sf_filter.sf_handle ==
1345 sock_filt->sf_filter.sf_handle) {
1346 break;
1347 }
1348 }
1349
1350 /* Add the entry only if there was no existing entry */
1351 if (match == NULL) {
1352 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1353 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1354 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1355 sf_protosw_next);
1356 sock_filt->sf_proto = pr;
1357 }
1358 sflt_retain_locked(sock_filt);
1359 }
1360 lck_rw_unlock_exclusive(sock_filter_lock);
1361
1362 if (match != NULL) {
1363 FREE(sock_filt, M_IFADDR);
1364 return (EEXIST);
1365 }
1366
1367 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY))
1368 return (error);
1369
1370 /*
1371 * Setup the filter on the TCP and UDP sockets already created.
1372 */
1373 #define SOLIST_ADD(_so) do { \
1374 solist->next = solisthead; \
1375 sock_retain((_so)); \
1376 solist->so = (_so); \
1377 solisthead = solist; \
1378 } while (0)
1379 if (protocol == IPPROTO_TCP) {
1380 lck_rw_lock_shared(tcbinfo.ipi_lock);
1381 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1382 so = inp->inp_socket;
1383 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1384 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1385 (so->so_state & SS_NOFDREF)) ||
1386 !SOCK_CHECK_DOM(so, domain) ||
1387 !SOCK_CHECK_TYPE(so, type))
1388 continue;
1389 MALLOC(solist, struct solist *, sizeof (*solist),
1390 M_IFADDR, M_NOWAIT);
1391 if (!solist)
1392 continue;
1393 SOLIST_ADD(so);
1394 }
1395 lck_rw_done(tcbinfo.ipi_lock);
1396 } else if (protocol == IPPROTO_UDP) {
1397 lck_rw_lock_shared(udbinfo.ipi_lock);
1398 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1399 so = inp->inp_socket;
1400 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1401 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1402 (so->so_state & SS_NOFDREF)) ||
1403 !SOCK_CHECK_DOM(so, domain) ||
1404 !SOCK_CHECK_TYPE(so, type))
1405 continue;
1406 MALLOC(solist, struct solist *, sizeof (*solist),
1407 M_IFADDR, M_NOWAIT);
1408 if (!solist)
1409 continue;
1410 SOLIST_ADD(so);
1411 }
1412 lck_rw_done(udbinfo.ipi_lock);
1413 }
1414 /* XXX it's possible to walk the raw socket list as well */
1415 #undef SOLIST_ADD
1416
1417 while (solisthead) {
1418 sflt_handle handle = filter->sf_handle;
1419
1420 so = solisthead->so;
1421 socket_lock(so, 0);
1422 sflt_initsock(so);
1423 if (so->so_state & SS_ISCONNECTING)
1424 sflt_notify_after_register(so, sock_evt_connecting,
1425 handle);
1426 else if (so->so_state & SS_ISCONNECTED)
1427 sflt_notify_after_register(so, sock_evt_connected,
1428 handle);
1429 else if ((so->so_state &
1430 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) ==
1431 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE))
1432 sflt_notify_after_register(so, sock_evt_disconnecting,
1433 handle);
1434 else if ((so->so_state &
1435 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) ==
1436 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED))
1437 sflt_notify_after_register(so, sock_evt_disconnected,
1438 handle);
1439 else if (so->so_state & SS_CANTSENDMORE)
1440 sflt_notify_after_register(so, sock_evt_cantsendmore,
1441 handle);
1442 else if (so->so_state & SS_CANTRCVMORE)
1443 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1444 handle);
1445 socket_unlock(so, 0);
1446 /* XXX no easy way to post the sock_evt_closing event */
1447 sock_release(so);
1448 solist = solisthead;
1449 solisthead = solisthead->next;
1450 FREE(solist, M_IFADDR);
1451 }
1452
1453 return (error);
1454 }
1455
1456 errno_t
1457 sflt_unregister(sflt_handle handle)
1458 {
1459 struct socket_filter *filter;
1460 lck_rw_lock_exclusive(sock_filter_lock);
1461
1462 /* Find the entry by the handle */
1463 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1464 if (filter->sf_filter.sf_handle == handle)
1465 break;
1466 }
1467
1468 if (filter) {
1469 /* Remove it from the global list */
1470 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1471
1472 /* Remove it from the protosw list */
1473 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1474 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1475 filter, sf_protosw_next);
1476 }
1477
1478 /* Detach from any sockets */
1479 struct socket_filter_entry *entry = NULL;
1480
1481 for (entry = filter->sf_entry_head; entry;
1482 entry = entry->sfe_next_onfilter) {
1483 sflt_detach_locked(entry);
1484 }
1485
1486 /* Release the filter */
1487 sflt_release_locked(filter);
1488 }
1489
1490 lck_rw_unlock_exclusive(sock_filter_lock);
1491
1492 if (filter == NULL)
1493 return (ENOENT);
1494
1495 return (0);
1496 }
1497
1498 errno_t
1499 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1500 mbuf_t control, sflt_data_flag_t flags)
1501 {
1502 int error = 0;
1503
1504 if (so == NULL || data == NULL)
1505 return (EINVAL);
1506
1507 if (flags & sock_data_filt_flag_oob) {
1508 return (ENOTSUP);
1509 }
1510
1511 socket_lock(so, 1);
1512
1513 /* reject if this is a subflow socket */
1514 if (so->so_flags & SOF_MP_SUBFLOW) {
1515 error = ENOTSUP;
1516 goto done;
1517 }
1518
1519 if (from) {
1520 if (sbappendaddr(&so->so_rcv,
1521 (struct sockaddr *)(uintptr_t)from, data, control, NULL))
1522 sorwakeup(so);
1523 goto done;
1524 }
1525
1526 if (control) {
1527 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1528 sorwakeup(so);
1529 goto done;
1530 }
1531
1532 if (flags & sock_data_filt_flag_record) {
1533 if (control || from) {
1534 error = EINVAL;
1535 goto done;
1536 }
1537 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data))
1538 sorwakeup(so);
1539 goto done;
1540 }
1541
1542 if (sbappend(&so->so_rcv, data))
1543 sorwakeup(so);
1544 done:
1545 socket_unlock(so, 1);
1546 return (error);
1547 }
1548
1549 errno_t
1550 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1551 mbuf_t control, sflt_data_flag_t flags)
1552 {
1553 int sosendflags = 0;
1554
1555 /* reject if this is a subflow socket */
1556 if (so->so_flags & SOF_MP_SUBFLOW)
1557 return (ENOTSUP);
1558
1559 if (flags & sock_data_filt_flag_oob)
1560 sosendflags = MSG_OOB;
1561 return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1562 data, control, sosendflags));
1563 }
1564
1565 sockopt_dir
1566 sockopt_direction(sockopt_t sopt)
1567 {
1568 return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set);
1569 }
1570
1571 int
1572 sockopt_level(sockopt_t sopt)
1573 {
1574 return (sopt->sopt_level);
1575 }
1576
1577 int
1578 sockopt_name(sockopt_t sopt)
1579 {
1580 return (sopt->sopt_name);
1581 }
1582
1583 size_t
1584 sockopt_valsize(sockopt_t sopt)
1585 {
1586 return (sopt->sopt_valsize);
1587 }
1588
1589 errno_t
1590 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1591 {
1592 return (sooptcopyin(sopt, data, len, len));
1593 }
1594
1595 errno_t
1596 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1597 {
1598 return (sooptcopyout(sopt, data, len));
1599 }