]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
1f35405f4e9ac0de0b10ea193a6a5e86d255c1d5
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
50
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
53
54 #include <string.h>
55
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
59
60 struct socket_filter_entry {
61 struct socket_filter_entry *sfe_next_onsocket;
62 struct socket_filter_entry *sfe_next_onfilter;
63 struct socket_filter_entry *sfe_next_oncleanup;
64
65 struct socket_filter *sfe_filter;
66 struct socket *sfe_socket;
67 void *sfe_cookie;
68
69 uint32_t sfe_flags;
70 int32_t sfe_refcount;
71 };
72
73 struct socket_filter {
74 TAILQ_ENTRY(socket_filter) sf_protosw_next;
75 TAILQ_ENTRY(socket_filter) sf_global_next;
76 struct socket_filter_entry *sf_entry_head;
77
78 struct protosw *sf_proto;
79 struct sflt_filter sf_filter;
80 u_int32_t sf_refcount;
81 };
82
83 TAILQ_HEAD(socket_filter_list, socket_filter);
84
85 static struct socket_filter_list sock_filter_head;
86 static lck_rw_t *sock_filter_lock = NULL;
87 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
88 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
89 static thread_t sock_filter_cleanup_thread = NULL;
90
91 static void sflt_cleanup_thread(void *, wait_result_t);
92 static void sflt_detach_locked(struct socket_filter_entry *entry);
93
94 #pragma mark -- Internal State Management --
95
96 __private_extern__ void
97 sflt_init(void)
98 {
99 lck_grp_attr_t *grp_attrib = NULL;
100 lck_attr_t *lck_attrib = NULL;
101 lck_grp_t *lck_group = NULL;
102
103 TAILQ_INIT(&sock_filter_head);
104
105 /* Allocate a rw lock */
106 grp_attrib = lck_grp_attr_alloc_init();
107 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
108 lck_grp_attr_free(grp_attrib);
109 lck_attrib = lck_attr_alloc_init();
110 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
111 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
112 lck_grp_free(lck_group);
113 lck_attr_free(lck_attrib);
114 }
115
116 static void
117 sflt_retain_locked(struct socket_filter *filter)
118 {
119 filter->sf_refcount++;
120 }
121
122 static void
123 sflt_release_locked(struct socket_filter *filter)
124 {
125 filter->sf_refcount--;
126 if (filter->sf_refcount == 0) {
127 /* Call the unregistered function */
128 if (filter->sf_filter.sf_unregistered) {
129 lck_rw_unlock_exclusive(sock_filter_lock);
130 filter->sf_filter.sf_unregistered(
131 filter->sf_filter.sf_handle);
132 lck_rw_lock_exclusive(sock_filter_lock);
133 }
134
135 /* Free the entry */
136 FREE(filter, M_IFADDR);
137 }
138 }
139
140 static void
141 sflt_entry_retain(struct socket_filter_entry *entry)
142 {
143 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
144 panic("sflt_entry_retain - sfe_refcount <= 0\n");
145 /* NOTREACHED */
146 }
147 }
148
149 static void
150 sflt_entry_release(struct socket_filter_entry *entry)
151 {
152 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
153 if (old == 1) {
154 /* That was the last reference */
155
156 /* Take the cleanup lock */
157 lck_mtx_lock(sock_filter_cleanup_lock);
158
159 /* Put this item on the cleanup list */
160 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
161 sock_filter_cleanup_entries = entry;
162
163 /* If the item is the first item in the list */
164 if (entry->sfe_next_oncleanup == NULL) {
165 if (sock_filter_cleanup_thread == NULL) {
166 /* Create a thread */
167 kernel_thread_start(sflt_cleanup_thread,
168 NULL, &sock_filter_cleanup_thread);
169 } else {
170 /* Wakeup the thread */
171 wakeup(&sock_filter_cleanup_entries);
172 }
173 }
174
175 /* Drop the cleanup lock */
176 lck_mtx_unlock(sock_filter_cleanup_lock);
177 } else if (old <= 0) {
178 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
179 (int)old);
180 /* NOTREACHED */
181 }
182 }
183
184 static void
185 sflt_cleanup_thread(void *blah, wait_result_t blah2)
186 {
187 #pragma unused(blah, blah2)
188 while (1) {
189 lck_mtx_lock(sock_filter_cleanup_lock);
190 while (sock_filter_cleanup_entries == NULL) {
191 /* Sleep until we've got something better to do */
192 msleep(&sock_filter_cleanup_entries,
193 sock_filter_cleanup_lock, PWAIT,
194 "sflt_cleanup", NULL);
195 }
196
197 /* Pull the current list of dead items */
198 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
199 sock_filter_cleanup_entries = NULL;
200
201 /* Drop the lock */
202 lck_mtx_unlock(sock_filter_cleanup_lock);
203
204 /* Take the socket filter lock */
205 lck_rw_lock_exclusive(sock_filter_lock);
206
207 /* Cleanup every dead item */
208 struct socket_filter_entry *entry;
209 for (entry = dead; entry; entry = dead) {
210 struct socket_filter_entry **nextpp;
211
212 dead = entry->sfe_next_oncleanup;
213
214 /* Call detach function if necessary - drop the lock */
215 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
216 entry->sfe_filter->sf_filter.sf_detach) {
217 entry->sfe_flags |= SFEF_NODETACH;
218 lck_rw_unlock_exclusive(sock_filter_lock);
219
220 /*
221 * Warning - passing a potentially
222 * dead socket may be bad
223 */
224 entry->sfe_filter->sf_filter. sf_detach(
225 entry->sfe_cookie, entry->sfe_socket);
226
227 lck_rw_lock_exclusive(sock_filter_lock);
228 }
229
230 /*
231 * Pull entry off the socket list --
232 * if the socket still exists
233 */
234 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
235 for (nextpp = &entry->sfe_socket->so_filt;
236 *nextpp;
237 nextpp = &(*nextpp)->sfe_next_onsocket) {
238 if (*nextpp == entry) {
239 *nextpp =
240 entry->sfe_next_onsocket;
241 break;
242 }
243 }
244 }
245
246 /* Pull entry off the filter list */
247 for (nextpp = &entry->sfe_filter->sf_entry_head;
248 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
249 if (*nextpp == entry) {
250 *nextpp = entry->sfe_next_onfilter;
251 break;
252 }
253 }
254
255 /*
256 * Release the filter -- may drop lock, but that's okay
257 */
258 sflt_release_locked(entry->sfe_filter);
259 entry->sfe_socket = NULL;
260 entry->sfe_filter = NULL;
261 FREE(entry, M_IFADDR);
262 }
263
264 /* Drop the socket filter lock */
265 lck_rw_unlock_exclusive(sock_filter_lock);
266 }
267 /* NOTREACHED */
268 }
269
270 static int
271 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
272 int socklocked)
273 {
274 int error = 0;
275 struct socket_filter_entry *entry = NULL;
276
277 if (filter == NULL)
278 return (ENOENT);
279
280 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
281 if (entry->sfe_filter->sf_filter.sf_handle ==
282 filter->sf_filter.sf_handle)
283 return (EEXIST);
284 }
285 /* allocate the socket filter entry */
286 MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR,
287 M_WAITOK);
288 if (entry == NULL)
289 return (ENOMEM);
290
291 /* Initialize the socket filter entry */
292 entry->sfe_cookie = NULL;
293 entry->sfe_flags = SFEF_ATTACHED;
294 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
295
296 /* Put the entry in the filter list */
297 sflt_retain_locked(filter);
298 entry->sfe_filter = filter;
299 entry->sfe_next_onfilter = filter->sf_entry_head;
300 filter->sf_entry_head = entry;
301
302 /* Put the entry on the socket filter list */
303 entry->sfe_socket = so;
304 entry->sfe_next_onsocket = so->so_filt;
305 so->so_filt = entry;
306
307 if (entry->sfe_filter->sf_filter.sf_attach) {
308 /* Retain the entry while we call attach */
309 sflt_entry_retain(entry);
310
311 /*
312 * Release the filter lock --
313 * callers must be aware we will do this
314 */
315 lck_rw_unlock_exclusive(sock_filter_lock);
316
317 /* Unlock the socket */
318 if (socklocked)
319 socket_unlock(so, 0);
320
321 /* It's finally safe to call the filter function */
322 error = entry->sfe_filter->sf_filter.sf_attach(
323 &entry->sfe_cookie, so);
324
325 /* Lock the socket again */
326 if (socklocked)
327 socket_lock(so, 0);
328
329 /* Lock the filters again */
330 lck_rw_lock_exclusive(sock_filter_lock);
331
332 /*
333 * If the attach function returns an error,
334 * this filter must be detached
335 */
336 if (error) {
337 /* don't call sf_detach */
338 entry->sfe_flags |= SFEF_NODETACH;
339 sflt_detach_locked(entry);
340 }
341
342 /* Release the retain we held through the attach call */
343 sflt_entry_release(entry);
344 }
345
346 return (error);
347 }
348
349 errno_t
350 sflt_attach_internal(socket_t socket, sflt_handle handle)
351 {
352 if (socket == NULL || handle == 0)
353 return (EINVAL);
354
355 int result = EINVAL;
356
357 lck_rw_lock_exclusive(sock_filter_lock);
358
359 struct socket_filter *filter = NULL;
360 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
361 if (filter->sf_filter.sf_handle == handle) break;
362 }
363
364 if (filter) {
365 result = sflt_attach_locked(socket, filter, 1);
366 }
367
368 lck_rw_unlock_exclusive(sock_filter_lock);
369
370 return (result);
371 }
372
373 static void
374 sflt_detach_locked(struct socket_filter_entry *entry)
375 {
376 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
377 entry->sfe_flags &= ~SFEF_ATTACHED;
378 sflt_entry_release(entry);
379 }
380 }
381
382 #pragma mark -- Socket Layer Hooks --
383
384 __private_extern__ void
385 sflt_initsock(struct socket *so)
386 {
387 /*
388 * Point to the real protosw, as so_proto might have been
389 * pointed to a modified version.
390 */
391 struct protosw *proto = so->so_proto->pr_protosw;
392
393 lck_rw_lock_shared(sock_filter_lock);
394 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
395 /* Promote lock to exclusive */
396 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
397 lck_rw_lock_exclusive(sock_filter_lock);
398
399 /*
400 * Warning: A filter unregistering will be pulled out of
401 * the list. This could happen while we drop the lock in
402 * sftl_attach_locked or sflt_release_locked. For this
403 * reason we retain a reference on the filter (or next_filter)
404 * while calling this function. This protects us from a panic,
405 * but it could result in a socket being created without all
406 * of the global filters if we're attaching a filter as it
407 * is removed, if that's possible.
408 */
409 struct socket_filter *filter =
410 TAILQ_FIRST(&proto->pr_filter_head);
411
412 sflt_retain_locked(filter);
413
414 while (filter) {
415 struct socket_filter *filter_next;
416 /*
417 * Warning: sflt_attach_private_locked
418 * will drop the lock
419 */
420 sflt_attach_locked(so, filter, 0);
421
422 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
423 if (filter_next)
424 sflt_retain_locked(filter_next);
425
426 /*
427 * Warning: filt_release_locked may remove
428 * the filter from the queue
429 */
430 sflt_release_locked(filter);
431 filter = filter_next;
432 }
433 }
434 lck_rw_done(sock_filter_lock);
435 }
436
437 /*
438 * sflt_termsock
439 *
440 * Detaches all filters from the socket.
441 */
442 __private_extern__ void
443 sflt_termsock(struct socket *so)
444 {
445 lck_rw_lock_exclusive(sock_filter_lock);
446
447 struct socket_filter_entry *entry;
448
449 while ((entry = so->so_filt) != NULL) {
450 /* Pull filter off the socket */
451 so->so_filt = entry->sfe_next_onsocket;
452 entry->sfe_flags |= SFEF_NOSOCKET;
453
454 /* Call detach */
455 sflt_detach_locked(entry);
456
457 /*
458 * On sflt_termsock, we can't return until the detach function
459 * has been called. Call the detach function - this is gross
460 * because the socket filter entry could be freed when we drop
461 * the lock, so we make copies on the stack and retain
462 * everything we need before dropping the lock.
463 */
464 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
465 entry->sfe_filter->sf_filter.sf_detach) {
466 void *sfe_cookie = entry->sfe_cookie;
467 struct socket_filter *sfe_filter = entry->sfe_filter;
468
469 /* Retain the socket filter */
470 sflt_retain_locked(sfe_filter);
471
472 /* Mark that we've called the detach function */
473 entry->sfe_flags |= SFEF_NODETACH;
474
475 /* Drop the lock before calling the detach function */
476 lck_rw_unlock_exclusive(sock_filter_lock);
477 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
478 lck_rw_lock_exclusive(sock_filter_lock);
479
480 /* Release the filter */
481 sflt_release_locked(sfe_filter);
482 }
483 }
484
485 lck_rw_unlock_exclusive(sock_filter_lock);
486 }
487
488
489 static void
490 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
491 sflt_handle handle)
492 {
493 if (so->so_filt == NULL)
494 return;
495
496 struct socket_filter_entry *entry;
497 int unlocked = 0;
498
499 lck_rw_lock_shared(sock_filter_lock);
500 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
501 if ((entry->sfe_flags & SFEF_ATTACHED) &&
502 entry->sfe_filter->sf_filter.sf_notify &&
503 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
504 handle) || !handle)) {
505 /*
506 * Retain the filter entry and release
507 * the socket filter lock
508 */
509 sflt_entry_retain(entry);
510 lck_rw_unlock_shared(sock_filter_lock);
511
512 /* If the socket isn't already unlocked, unlock it */
513 if (unlocked == 0) {
514 unlocked = 1;
515 socket_unlock(so, 0);
516 }
517
518 /* Finally call the filter */
519 entry->sfe_filter->sf_filter.sf_notify(
520 entry->sfe_cookie, so, event, param);
521
522 /*
523 * Take the socket filter lock again
524 * and release the entry
525 */
526 lck_rw_lock_shared(sock_filter_lock);
527 sflt_entry_release(entry);
528 }
529 }
530 lck_rw_unlock_shared(sock_filter_lock);
531
532 if (unlocked != 0) {
533 socket_lock(so, 0);
534 }
535 }
536
537 __private_extern__ void
538 sflt_notify(struct socket *so, sflt_event_t event, void *param)
539 {
540 sflt_notify_internal(so, event, param, 0);
541 }
542
543 static void
544 sflt_notify_after_register(struct socket *so, sflt_event_t event,
545 sflt_handle handle)
546 {
547 sflt_notify_internal(so, event, NULL, handle);
548 }
549
550 __private_extern__ int
551 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
552 {
553 if (so->so_filt == NULL)
554 return (0);
555
556 struct socket_filter_entry *entry;
557 int unlocked = 0;
558 int error = 0;
559
560 lck_rw_lock_shared(sock_filter_lock);
561 for (entry = so->so_filt; entry && error == 0;
562 entry = entry->sfe_next_onsocket) {
563 if ((entry->sfe_flags & SFEF_ATTACHED) &&
564 entry->sfe_filter->sf_filter.sf_ioctl) {
565 /*
566 * Retain the filter entry and release
567 * the socket filter lock
568 */
569 sflt_entry_retain(entry);
570 lck_rw_unlock_shared(sock_filter_lock);
571
572 /* If the socket isn't already unlocked, unlock it */
573 if (unlocked == 0) {
574 socket_unlock(so, 0);
575 unlocked = 1;
576 }
577
578 /* Call the filter */
579 error = entry->sfe_filter->sf_filter.sf_ioctl(
580 entry->sfe_cookie, so, cmd, data);
581
582 /*
583 * Take the socket filter lock again
584 * and release the entry
585 */
586 lck_rw_lock_shared(sock_filter_lock);
587 sflt_entry_release(entry);
588 }
589 }
590 lck_rw_unlock_shared(sock_filter_lock);
591
592 if (unlocked) {
593 socket_lock(so, 0);
594 }
595
596 return (error);
597 }
598
599 __private_extern__ int
600 sflt_bind(struct socket *so, const struct sockaddr *nam)
601 {
602 if (so->so_filt == NULL)
603 return (0);
604
605 struct socket_filter_entry *entry;
606 int unlocked = 0;
607 int error = 0;
608
609 lck_rw_lock_shared(sock_filter_lock);
610 for (entry = so->so_filt; entry && error == 0;
611 entry = entry->sfe_next_onsocket) {
612 if ((entry->sfe_flags & SFEF_ATTACHED) &&
613 entry->sfe_filter->sf_filter.sf_bind) {
614 /*
615 * Retain the filter entry and
616 * release the socket filter lock
617 */
618 sflt_entry_retain(entry);
619 lck_rw_unlock_shared(sock_filter_lock);
620
621 /* If the socket isn't already unlocked, unlock it */
622 if (unlocked == 0) {
623 socket_unlock(so, 0);
624 unlocked = 1;
625 }
626
627 /* Call the filter */
628 error = entry->sfe_filter->sf_filter.sf_bind(
629 entry->sfe_cookie, so, nam);
630
631 /*
632 * Take the socket filter lock again and
633 * release the entry
634 */
635 lck_rw_lock_shared(sock_filter_lock);
636 sflt_entry_release(entry);
637 }
638 }
639 lck_rw_unlock_shared(sock_filter_lock);
640
641 if (unlocked) {
642 socket_lock(so, 0);
643 }
644
645 return (error);
646 }
647
648 __private_extern__ int
649 sflt_listen(struct socket *so)
650 {
651 if (so->so_filt == NULL)
652 return (0);
653
654 struct socket_filter_entry *entry;
655 int unlocked = 0;
656 int error = 0;
657
658 lck_rw_lock_shared(sock_filter_lock);
659 for (entry = so->so_filt; entry && error == 0;
660 entry = entry->sfe_next_onsocket) {
661 if ((entry->sfe_flags & SFEF_ATTACHED) &&
662 entry->sfe_filter->sf_filter.sf_listen) {
663 /*
664 * Retain the filter entry and release
665 * the socket filter lock
666 */
667 sflt_entry_retain(entry);
668 lck_rw_unlock_shared(sock_filter_lock);
669
670 /* If the socket isn't already unlocked, unlock it */
671 if (unlocked == 0) {
672 socket_unlock(so, 0);
673 unlocked = 1;
674 }
675
676 /* Call the filter */
677 error = entry->sfe_filter->sf_filter.sf_listen(
678 entry->sfe_cookie, so);
679
680 /*
681 * Take the socket filter lock again
682 * and release the entry
683 */
684 lck_rw_lock_shared(sock_filter_lock);
685 sflt_entry_release(entry);
686 }
687 }
688 lck_rw_unlock_shared(sock_filter_lock);
689
690 if (unlocked) {
691 socket_lock(so, 0);
692 }
693
694 return (error);
695 }
696
697 __private_extern__ int
698 sflt_accept(struct socket *head, struct socket *so,
699 const struct sockaddr *local, const struct sockaddr *remote)
700 {
701 if (so->so_filt == NULL)
702 return (0);
703
704 struct socket_filter_entry *entry;
705 int unlocked = 0;
706 int error = 0;
707
708 lck_rw_lock_shared(sock_filter_lock);
709 for (entry = so->so_filt; entry && error == 0;
710 entry = entry->sfe_next_onsocket) {
711 if ((entry->sfe_flags & SFEF_ATTACHED) &&
712 entry->sfe_filter->sf_filter.sf_accept) {
713 /*
714 * Retain the filter entry and
715 * release the socket filter lock
716 */
717 sflt_entry_retain(entry);
718 lck_rw_unlock_shared(sock_filter_lock);
719
720 /* If the socket isn't already unlocked, unlock it */
721 if (unlocked == 0) {
722 socket_unlock(so, 0);
723 unlocked = 1;
724 }
725
726 /* Call the filter */
727 error = entry->sfe_filter->sf_filter.sf_accept(
728 entry->sfe_cookie, head, so, local, remote);
729
730 /*
731 * Take the socket filter lock again
732 * and release the entry
733 */
734 lck_rw_lock_shared(sock_filter_lock);
735 sflt_entry_release(entry);
736 }
737 }
738 lck_rw_unlock_shared(sock_filter_lock);
739
740 if (unlocked) {
741 socket_lock(so, 0);
742 }
743
744 return (error);
745 }
746
747 __private_extern__ int
748 sflt_getsockname(struct socket *so, struct sockaddr **local)
749 {
750 if (so->so_filt == NULL)
751 return (0);
752
753 struct socket_filter_entry *entry;
754 int unlocked = 0;
755 int error = 0;
756
757 lck_rw_lock_shared(sock_filter_lock);
758 for (entry = so->so_filt; entry && error == 0;
759 entry = entry->sfe_next_onsocket) {
760 if ((entry->sfe_flags & SFEF_ATTACHED) &&
761 entry->sfe_filter->sf_filter.sf_getsockname) {
762 /*
763 * Retain the filter entry and
764 * release the socket filter lock
765 */
766 sflt_entry_retain(entry);
767 lck_rw_unlock_shared(sock_filter_lock);
768
769 /* If the socket isn't already unlocked, unlock it */
770 if (unlocked == 0) {
771 socket_unlock(so, 0);
772 unlocked = 1;
773 }
774
775 /* Call the filter */
776 error = entry->sfe_filter->sf_filter.sf_getsockname(
777 entry->sfe_cookie, so, local);
778
779 /*
780 * Take the socket filter lock again
781 * and release the entry
782 */
783 lck_rw_lock_shared(sock_filter_lock);
784 sflt_entry_release(entry);
785 }
786 }
787 lck_rw_unlock_shared(sock_filter_lock);
788
789 if (unlocked) {
790 socket_lock(so, 0);
791 }
792
793 return (error);
794 }
795
796 __private_extern__ int
797 sflt_getpeername(struct socket *so, struct sockaddr **remote)
798 {
799 if (so->so_filt == NULL)
800 return (0);
801
802 struct socket_filter_entry *entry;
803 int unlocked = 0;
804 int error = 0;
805
806 lck_rw_lock_shared(sock_filter_lock);
807 for (entry = so->so_filt; entry && error == 0;
808 entry = entry->sfe_next_onsocket) {
809 if ((entry->sfe_flags & SFEF_ATTACHED) &&
810 entry->sfe_filter->sf_filter.sf_getpeername) {
811 /*
812 * Retain the filter entry and release
813 * the socket filter lock
814 */
815 sflt_entry_retain(entry);
816 lck_rw_unlock_shared(sock_filter_lock);
817
818 /* If the socket isn't already unlocked, unlock it */
819 if (unlocked == 0) {
820 socket_unlock(so, 0);
821 unlocked = 1;
822 }
823
824 /* Call the filter */
825 error = entry->sfe_filter->sf_filter.sf_getpeername(
826 entry->sfe_cookie, so, remote);
827
828 /*
829 * Take the socket filter lock again
830 * and release the entry
831 */
832 lck_rw_lock_shared(sock_filter_lock);
833 sflt_entry_release(entry);
834 }
835 }
836 lck_rw_unlock_shared(sock_filter_lock);
837
838 if (unlocked) {
839 socket_lock(so, 0);
840 }
841
842 return (error);
843 }
844
845 __private_extern__ int
846 sflt_connectin(struct socket *so, const struct sockaddr *remote)
847 {
848 if (so->so_filt == NULL)
849 return (0);
850
851 struct socket_filter_entry *entry;
852 int unlocked = 0;
853 int error = 0;
854
855 lck_rw_lock_shared(sock_filter_lock);
856 for (entry = so->so_filt; entry && error == 0;
857 entry = entry->sfe_next_onsocket) {
858 if ((entry->sfe_flags & SFEF_ATTACHED) &&
859 entry->sfe_filter->sf_filter.sf_connect_in) {
860 /*
861 * Retain the filter entry and release
862 * the socket filter lock
863 */
864 sflt_entry_retain(entry);
865 lck_rw_unlock_shared(sock_filter_lock);
866
867 /* If the socket isn't already unlocked, unlock it */
868 if (unlocked == 0) {
869 socket_unlock(so, 0);
870 unlocked = 1;
871 }
872
873 /* Call the filter */
874 error = entry->sfe_filter->sf_filter.sf_connect_in(
875 entry->sfe_cookie, so, remote);
876
877 /*
878 * Take the socket filter lock again
879 * and release the entry
880 */
881 lck_rw_lock_shared(sock_filter_lock);
882 sflt_entry_release(entry);
883 }
884 }
885 lck_rw_unlock_shared(sock_filter_lock);
886
887 if (unlocked) {
888 socket_lock(so, 0);
889 }
890
891 return (error);
892 }
893
894 static int
895 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
896 {
897 struct socket_filter_entry *entry;
898 int unlocked = 0;
899 int error = 0;
900
901 lck_rw_lock_shared(sock_filter_lock);
902 for (entry = so->so_filt; entry && error == 0;
903 entry = entry->sfe_next_onsocket) {
904 if ((entry->sfe_flags & SFEF_ATTACHED) &&
905 entry->sfe_filter->sf_filter.sf_connect_out) {
906 /*
907 * Retain the filter entry and release
908 * the socket filter lock
909 */
910 sflt_entry_retain(entry);
911 lck_rw_unlock_shared(sock_filter_lock);
912
913 /* If the socket isn't already unlocked, unlock it */
914 if (unlocked == 0) {
915 socket_unlock(so, 0);
916 unlocked = 1;
917 }
918
919 /* Call the filter */
920 error = entry->sfe_filter->sf_filter.sf_connect_out(
921 entry->sfe_cookie, so, nam);
922
923 /*
924 * Take the socket filter lock again
925 * and release the entry
926 */
927 lck_rw_lock_shared(sock_filter_lock);
928 sflt_entry_release(entry);
929 }
930 }
931 lck_rw_unlock_shared(sock_filter_lock);
932
933 if (unlocked) {
934 socket_lock(so, 0);
935 }
936
937 return (error);
938 }
939
940 __private_extern__ int
941 sflt_connectout(struct socket *so, const struct sockaddr *nam)
942 {
943 char buf[SOCK_MAXADDRLEN];
944 struct sockaddr *sa;
945 int error;
946
947 if (so->so_filt == NULL)
948 return (0);
949
950 /*
951 * Workaround for rdar://23362120
952 * Always pass a buffer that can hold an IPv6 socket address
953 */
954 bzero(buf, sizeof (buf));
955 bcopy(nam, buf, nam->sa_len);
956 sa = (struct sockaddr *)buf;
957
958 error = sflt_connectout_common(so, sa);
959 if (error != 0)
960 return (error);
961
962 /*
963 * If the address was modified, copy it back
964 */
965 if (bcmp(sa, nam, nam->sa_len) != 0) {
966 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
967 }
968
969 return (0);
970 }
971
972 __private_extern__ int
973 sflt_connectxout(struct socket *so, struct sockaddr_list **dst_sl0)
974 {
975 struct sockaddr_list *dst_sl;
976 struct sockaddr_entry *se, *tse;
977 int modified = 0;
978 int error = 0;
979
980 if (so->so_filt == NULL)
981 return (0);
982
983 /* make a copy as sflt_connectout() releases socket lock */
984 dst_sl = sockaddrlist_dup(*dst_sl0, M_WAITOK);
985 if (dst_sl == NULL)
986 return (ENOBUFS);
987
988 /*
989 * Hmm; we don't yet have a connectx socket filter callback,
990 * so the closest thing to do is to probably call sflt_connectout()
991 * as many times as there are addresses in the list, and bail
992 * as soon as we get an error.
993 */
994 TAILQ_FOREACH_SAFE(se, &dst_sl->sl_head, se_link, tse) {
995 char buf[SOCK_MAXADDRLEN];
996 struct sockaddr *sa;
997
998 VERIFY(se->se_addr != NULL);
999
1000 /*
1001 * Workaround for rdar://23362120
1002 * Always pass a buffer that can hold an IPv6 socket address
1003 */
1004 bzero(buf, sizeof (buf));
1005 bcopy(se->se_addr, buf, se->se_addr->sa_len);
1006 sa = (struct sockaddr *)buf;
1007
1008 error = sflt_connectout_common(so, sa);
1009 if (error != 0)
1010 break;
1011
1012 /*
1013 * If the address was modified, copy it back
1014 */
1015 if (bcmp(se->se_addr, sa, se->se_addr->sa_len) != 0) {
1016 bcopy(sa, se->se_addr, se->se_addr->sa_len);
1017 modified = 1;
1018 }
1019 }
1020
1021 if (error != 0 || !modified) {
1022 /* leave the original as is */
1023 sockaddrlist_free(dst_sl);
1024 } else {
1025 /*
1026 * At least one address was modified and there were no errors;
1027 * ditch the original and return the modified list.
1028 */
1029 sockaddrlist_free(*dst_sl0);
1030 *dst_sl0 = dst_sl;
1031 }
1032
1033 return (error);
1034 }
1035
1036 __private_extern__ int
1037 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1038 {
1039 if (so->so_filt == NULL)
1040 return (0);
1041
1042 struct socket_filter_entry *entry;
1043 int unlocked = 0;
1044 int error = 0;
1045
1046 lck_rw_lock_shared(sock_filter_lock);
1047 for (entry = so->so_filt; entry && error == 0;
1048 entry = entry->sfe_next_onsocket) {
1049 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1050 entry->sfe_filter->sf_filter.sf_setoption) {
1051 /*
1052 * Retain the filter entry and release
1053 * the socket filter lock
1054 */
1055 sflt_entry_retain(entry);
1056 lck_rw_unlock_shared(sock_filter_lock);
1057
1058 /* If the socket isn't already unlocked, unlock it */
1059 if (unlocked == 0) {
1060 socket_unlock(so, 0);
1061 unlocked = 1;
1062 }
1063
1064 /* Call the filter */
1065 error = entry->sfe_filter->sf_filter.sf_setoption(
1066 entry->sfe_cookie, so, sopt);
1067
1068 /*
1069 * Take the socket filter lock again
1070 * and release the entry
1071 */
1072 lck_rw_lock_shared(sock_filter_lock);
1073 sflt_entry_release(entry);
1074 }
1075 }
1076 lck_rw_unlock_shared(sock_filter_lock);
1077
1078 if (unlocked) {
1079 socket_lock(so, 0);
1080 }
1081
1082 return (error);
1083 }
1084
1085 __private_extern__ int
1086 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1087 {
1088 if (so->so_filt == NULL)
1089 return (0);
1090
1091 struct socket_filter_entry *entry;
1092 int unlocked = 0;
1093 int error = 0;
1094
1095 lck_rw_lock_shared(sock_filter_lock);
1096 for (entry = so->so_filt; entry && error == 0;
1097 entry = entry->sfe_next_onsocket) {
1098 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1099 entry->sfe_filter->sf_filter.sf_getoption) {
1100 /*
1101 * Retain the filter entry and release
1102 * the socket filter lock
1103 */
1104 sflt_entry_retain(entry);
1105 lck_rw_unlock_shared(sock_filter_lock);
1106
1107 /* If the socket isn't already unlocked, unlock it */
1108 if (unlocked == 0) {
1109 socket_unlock(so, 0);
1110 unlocked = 1;
1111 }
1112
1113 /* Call the filter */
1114 error = entry->sfe_filter->sf_filter.sf_getoption(
1115 entry->sfe_cookie, so, sopt);
1116
1117 /*
1118 * Take the socket filter lock again
1119 * and release the entry
1120 */
1121 lck_rw_lock_shared(sock_filter_lock);
1122 sflt_entry_release(entry);
1123 }
1124 }
1125 lck_rw_unlock_shared(sock_filter_lock);
1126
1127 if (unlocked) {
1128 socket_lock(so, 0);
1129 }
1130
1131 return (error);
1132 }
1133
1134 __private_extern__ int
1135 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1136 mbuf_t *control, sflt_data_flag_t flags)
1137 {
1138 if (so->so_filt == NULL)
1139 return (0);
1140
1141 struct socket_filter_entry *entry;
1142 int unlocked = 0;
1143 int setsendthread = 0;
1144 int error = 0;
1145
1146 lck_rw_lock_shared(sock_filter_lock);
1147 for (entry = so->so_filt; entry && error == 0;
1148 entry = entry->sfe_next_onsocket) {
1149 /* skip if this is a subflow socket */
1150 if (so->so_flags & SOF_MP_SUBFLOW)
1151 continue;
1152 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1153 entry->sfe_filter->sf_filter.sf_data_out) {
1154 /*
1155 * Retain the filter entry and
1156 * release the socket filter lock
1157 */
1158 sflt_entry_retain(entry);
1159 lck_rw_unlock_shared(sock_filter_lock);
1160
1161 /* If the socket isn't already unlocked, unlock it */
1162 if (unlocked == 0) {
1163 if (so->so_send_filt_thread == NULL) {
1164 setsendthread = 1;
1165 so->so_send_filt_thread =
1166 current_thread();
1167 }
1168 socket_unlock(so, 0);
1169 unlocked = 1;
1170 }
1171
1172 /* Call the filter */
1173 error = entry->sfe_filter->sf_filter.sf_data_out(
1174 entry->sfe_cookie, so, to, data, control, flags);
1175
1176 /*
1177 * Take the socket filter lock again
1178 * and release the entry
1179 */
1180 lck_rw_lock_shared(sock_filter_lock);
1181 sflt_entry_release(entry);
1182 }
1183 }
1184 lck_rw_unlock_shared(sock_filter_lock);
1185
1186 if (unlocked) {
1187 socket_lock(so, 0);
1188 if (setsendthread)
1189 so->so_send_filt_thread = NULL;
1190 }
1191
1192 return (error);
1193 }
1194
1195 __private_extern__ int
1196 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1197 mbuf_t *control, sflt_data_flag_t flags)
1198 {
1199 if (so->so_filt == NULL)
1200 return (0);
1201
1202 struct socket_filter_entry *entry;
1203 int error = 0;
1204 int unlocked = 0;
1205
1206 lck_rw_lock_shared(sock_filter_lock);
1207
1208 for (entry = so->so_filt; entry && (error == 0);
1209 entry = entry->sfe_next_onsocket) {
1210 /* skip if this is a subflow socket */
1211 if (so->so_flags & SOF_MP_SUBFLOW)
1212 continue;
1213 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1214 entry->sfe_filter->sf_filter.sf_data_in) {
1215 /*
1216 * Retain the filter entry and
1217 * release the socket filter lock
1218 */
1219 sflt_entry_retain(entry);
1220 lck_rw_unlock_shared(sock_filter_lock);
1221
1222 /* If the socket isn't already unlocked, unlock it */
1223 if (unlocked == 0) {
1224 unlocked = 1;
1225 socket_unlock(so, 0);
1226 }
1227
1228 /* Call the filter */
1229 error = entry->sfe_filter->sf_filter.sf_data_in(
1230 entry->sfe_cookie, so, from, data, control, flags);
1231
1232 /*
1233 * Take the socket filter lock again
1234 * and release the entry
1235 */
1236 lck_rw_lock_shared(sock_filter_lock);
1237 sflt_entry_release(entry);
1238 }
1239 }
1240 lck_rw_unlock_shared(sock_filter_lock);
1241
1242 if (unlocked) {
1243 socket_lock(so, 0);
1244 }
1245
1246 return (error);
1247 }
1248
1249 #pragma mark -- KPI --
1250
1251 errno_t
1252 sflt_attach(socket_t socket, sflt_handle handle)
1253 {
1254 socket_lock(socket, 1);
1255 errno_t result = sflt_attach_internal(socket, handle);
1256 socket_unlock(socket, 1);
1257 return (result);
1258 }
1259
1260 errno_t
1261 sflt_detach(socket_t socket, sflt_handle handle)
1262 {
1263 struct socket_filter_entry *entry;
1264 errno_t result = 0;
1265
1266 if (socket == NULL || handle == 0)
1267 return (EINVAL);
1268
1269 lck_rw_lock_exclusive(sock_filter_lock);
1270 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1271 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1272 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1273 break;
1274 }
1275 }
1276
1277 if (entry != NULL) {
1278 sflt_detach_locked(entry);
1279 }
1280 lck_rw_unlock_exclusive(sock_filter_lock);
1281
1282 return (result);
1283 }
1284
1285 struct solist {
1286 struct solist *next;
1287 struct socket *so;
1288 };
1289
1290 errno_t
1291 sflt_register(const struct sflt_filter *filter, int domain, int type,
1292 int protocol)
1293 {
1294 struct socket_filter *sock_filt = NULL;
1295 struct socket_filter *match = NULL;
1296 int error = 0;
1297 struct protosw *pr;
1298 unsigned int len;
1299 struct socket *so;
1300 struct inpcb *inp;
1301 struct solist *solisthead = NULL, *solist = NULL;
1302
1303 if ((domain != PF_INET) && (domain != PF_INET6))
1304 return (ENOTSUP);
1305
1306 pr = pffindproto(domain, protocol, type);
1307 if (pr == NULL)
1308 return (ENOENT);
1309
1310 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1311 filter->sf_handle == 0 || filter->sf_name == NULL)
1312 return (EINVAL);
1313
1314 /* Allocate the socket filter */
1315 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1316 M_IFADDR, M_WAITOK);
1317 if (sock_filt == NULL) {
1318 return (ENOBUFS);
1319 }
1320
1321 bzero(sock_filt, sizeof (*sock_filt));
1322
1323 /* Legacy sflt_filter length; current structure minus extended */
1324 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1325 /*
1326 * Include extended fields if filter defines SFLT_EXTENDED.
1327 * We've zeroed out our internal sflt_filter placeholder,
1328 * so any unused portion would have been taken care of.
1329 */
1330 if (filter->sf_flags & SFLT_EXTENDED) {
1331 unsigned int ext_len = filter->sf_len;
1332
1333 if (ext_len > sizeof (struct sflt_filter_ext))
1334 ext_len = sizeof (struct sflt_filter_ext);
1335
1336 len += ext_len;
1337 }
1338 bcopy(filter, &sock_filt->sf_filter, len);
1339
1340 lck_rw_lock_exclusive(sock_filter_lock);
1341 /* Look for an existing entry */
1342 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1343 if (match->sf_filter.sf_handle ==
1344 sock_filt->sf_filter.sf_handle) {
1345 break;
1346 }
1347 }
1348
1349 /* Add the entry only if there was no existing entry */
1350 if (match == NULL) {
1351 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1352 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1353 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1354 sf_protosw_next);
1355 sock_filt->sf_proto = pr;
1356 }
1357 sflt_retain_locked(sock_filt);
1358 }
1359 lck_rw_unlock_exclusive(sock_filter_lock);
1360
1361 if (match != NULL) {
1362 FREE(sock_filt, M_IFADDR);
1363 return (EEXIST);
1364 }
1365
1366 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY))
1367 return (error);
1368
1369 /*
1370 * Setup the filter on the TCP and UDP sockets already created.
1371 */
1372 #define SOLIST_ADD(_so) do { \
1373 solist->next = solisthead; \
1374 sock_retain((_so)); \
1375 solist->so = (_so); \
1376 solisthead = solist; \
1377 } while (0)
1378 if (protocol == IPPROTO_TCP) {
1379 lck_rw_lock_shared(tcbinfo.ipi_lock);
1380 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1381 so = inp->inp_socket;
1382 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1383 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1384 (so->so_state & SS_NOFDREF)) ||
1385 !SOCK_CHECK_DOM(so, domain) ||
1386 !SOCK_CHECK_TYPE(so, type))
1387 continue;
1388 MALLOC(solist, struct solist *, sizeof (*solist),
1389 M_IFADDR, M_NOWAIT);
1390 if (!solist)
1391 continue;
1392 SOLIST_ADD(so);
1393 }
1394 lck_rw_done(tcbinfo.ipi_lock);
1395 } else if (protocol == IPPROTO_UDP) {
1396 lck_rw_lock_shared(udbinfo.ipi_lock);
1397 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1398 so = inp->inp_socket;
1399 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1400 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1401 (so->so_state & SS_NOFDREF)) ||
1402 !SOCK_CHECK_DOM(so, domain) ||
1403 !SOCK_CHECK_TYPE(so, type))
1404 continue;
1405 MALLOC(solist, struct solist *, sizeof (*solist),
1406 M_IFADDR, M_NOWAIT);
1407 if (!solist)
1408 continue;
1409 SOLIST_ADD(so);
1410 }
1411 lck_rw_done(udbinfo.ipi_lock);
1412 }
1413 /* XXX it's possible to walk the raw socket list as well */
1414 #undef SOLIST_ADD
1415
1416 while (solisthead) {
1417 sflt_handle handle = filter->sf_handle;
1418
1419 so = solisthead->so;
1420 socket_lock(so, 0);
1421 sflt_initsock(so);
1422 if (so->so_state & SS_ISCONNECTING)
1423 sflt_notify_after_register(so, sock_evt_connecting,
1424 handle);
1425 else if (so->so_state & SS_ISCONNECTED)
1426 sflt_notify_after_register(so, sock_evt_connected,
1427 handle);
1428 else if ((so->so_state &
1429 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) ==
1430 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE))
1431 sflt_notify_after_register(so, sock_evt_disconnecting,
1432 handle);
1433 else if ((so->so_state &
1434 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) ==
1435 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED))
1436 sflt_notify_after_register(so, sock_evt_disconnected,
1437 handle);
1438 else if (so->so_state & SS_CANTSENDMORE)
1439 sflt_notify_after_register(so, sock_evt_cantsendmore,
1440 handle);
1441 else if (so->so_state & SS_CANTRCVMORE)
1442 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1443 handle);
1444 socket_unlock(so, 0);
1445 /* XXX no easy way to post the sock_evt_closing event */
1446 sock_release(so);
1447 solist = solisthead;
1448 solisthead = solisthead->next;
1449 FREE(solist, M_IFADDR);
1450 }
1451
1452 return (error);
1453 }
1454
1455 errno_t
1456 sflt_unregister(sflt_handle handle)
1457 {
1458 struct socket_filter *filter;
1459 lck_rw_lock_exclusive(sock_filter_lock);
1460
1461 /* Find the entry by the handle */
1462 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1463 if (filter->sf_filter.sf_handle == handle)
1464 break;
1465 }
1466
1467 if (filter) {
1468 /* Remove it from the global list */
1469 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1470
1471 /* Remove it from the protosw list */
1472 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1473 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1474 filter, sf_protosw_next);
1475 }
1476
1477 /* Detach from any sockets */
1478 struct socket_filter_entry *entry = NULL;
1479
1480 for (entry = filter->sf_entry_head; entry;
1481 entry = entry->sfe_next_onfilter) {
1482 sflt_detach_locked(entry);
1483 }
1484
1485 /* Release the filter */
1486 sflt_release_locked(filter);
1487 }
1488
1489 lck_rw_unlock_exclusive(sock_filter_lock);
1490
1491 if (filter == NULL)
1492 return (ENOENT);
1493
1494 return (0);
1495 }
1496
1497 errno_t
1498 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1499 mbuf_t control, sflt_data_flag_t flags)
1500 {
1501 int error = 0;
1502
1503 if (so == NULL || data == NULL)
1504 return (EINVAL);
1505
1506 if (flags & sock_data_filt_flag_oob) {
1507 return (ENOTSUP);
1508 }
1509
1510 socket_lock(so, 1);
1511
1512 /* reject if this is a subflow socket */
1513 if (so->so_flags & SOF_MP_SUBFLOW) {
1514 error = ENOTSUP;
1515 goto done;
1516 }
1517
1518 if (from) {
1519 if (sbappendaddr(&so->so_rcv,
1520 (struct sockaddr *)(uintptr_t)from, data, control, NULL))
1521 sorwakeup(so);
1522 goto done;
1523 }
1524
1525 if (control) {
1526 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1527 sorwakeup(so);
1528 goto done;
1529 }
1530
1531 if (flags & sock_data_filt_flag_record) {
1532 if (control || from) {
1533 error = EINVAL;
1534 goto done;
1535 }
1536 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data))
1537 sorwakeup(so);
1538 goto done;
1539 }
1540
1541 if (sbappend(&so->so_rcv, data))
1542 sorwakeup(so);
1543 done:
1544 socket_unlock(so, 1);
1545 return (error);
1546 }
1547
1548 errno_t
1549 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1550 mbuf_t control, sflt_data_flag_t flags)
1551 {
1552 int sosendflags = 0;
1553
1554 /* reject if this is a subflow socket */
1555 if (so->so_flags & SOF_MP_SUBFLOW)
1556 return (ENOTSUP);
1557
1558 if (flags & sock_data_filt_flag_oob)
1559 sosendflags = MSG_OOB;
1560 return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1561 data, control, sosendflags));
1562 }
1563
1564 sockopt_dir
1565 sockopt_direction(sockopt_t sopt)
1566 {
1567 return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set);
1568 }
1569
1570 int
1571 sockopt_level(sockopt_t sopt)
1572 {
1573 return (sopt->sopt_level);
1574 }
1575
1576 int
1577 sockopt_name(sockopt_t sopt)
1578 {
1579 return (sopt->sopt_name);
1580 }
1581
1582 size_t
1583 sockopt_valsize(sockopt_t sopt)
1584 {
1585 return (sopt->sopt_valsize);
1586 }
1587
1588 errno_t
1589 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1590 {
1591 return (sooptcopyin(sopt, data, len, len));
1592 }
1593
1594 errno_t
1595 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1596 {
1597 return (sooptcopyout(sopt, data, len));
1598 }