]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
091f66b46f3fe2113abe7b46c3a56c320ce8d211
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
50
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
53
54 #include <string.h>
55
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
59
60 struct socket_filter_entry {
61 struct socket_filter_entry *sfe_next_onsocket;
62 struct socket_filter_entry *sfe_next_onfilter;
63 struct socket_filter_entry *sfe_next_oncleanup;
64
65 struct socket_filter *sfe_filter;
66 struct socket *sfe_socket;
67 void *sfe_cookie;
68
69 uint32_t sfe_flags;
70 int32_t sfe_refcount;
71 };
72
73 struct socket_filter {
74 TAILQ_ENTRY(socket_filter) sf_protosw_next;
75 TAILQ_ENTRY(socket_filter) sf_global_next;
76 struct socket_filter_entry *sf_entry_head;
77
78 struct protosw *sf_proto;
79 struct sflt_filter sf_filter;
80 u_int32_t sf_refcount;
81 };
82
83 TAILQ_HEAD(socket_filter_list, socket_filter);
84
85 static struct socket_filter_list sock_filter_head;
86 static lck_rw_t *sock_filter_lock = NULL;
87 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
88 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
89 static thread_t sock_filter_cleanup_thread = NULL;
90
91 static void sflt_cleanup_thread(void *, wait_result_t);
92 static void sflt_detach_locked(struct socket_filter_entry *entry);
93
94 #pragma mark -- Internal State Management --
95
96 __private_extern__ int
97 sflt_permission_check(struct inpcb *inp)
98 {
99
100 /*
101 * All these permissions only apply to the co-processor interface,
102 * so ignore IPv4.
103 */
104 if (!(inp->inp_vflag & INP_IPV6)) {
105 return (0);
106 }
107 /* Sockets that have this entitlement bypass socket filters. */
108 if (INP_INTCOPROC_ALLOWED(inp)) {
109 return (1);
110 }
111 if ((inp->inp_flags & INP_BOUND_IF) &&
112 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
113 return (1);
114 }
115 return (0);
116 }
117
118 __private_extern__ void
119 sflt_init(void)
120 {
121 lck_grp_attr_t *grp_attrib = NULL;
122 lck_attr_t *lck_attrib = NULL;
123 lck_grp_t *lck_group = NULL;
124
125 TAILQ_INIT(&sock_filter_head);
126
127 /* Allocate a rw lock */
128 grp_attrib = lck_grp_attr_alloc_init();
129 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
130 lck_grp_attr_free(grp_attrib);
131 lck_attrib = lck_attr_alloc_init();
132 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
133 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
134 lck_grp_free(lck_group);
135 lck_attr_free(lck_attrib);
136 }
137
138 static void
139 sflt_retain_locked(struct socket_filter *filter)
140 {
141 filter->sf_refcount++;
142 }
143
144 static void
145 sflt_release_locked(struct socket_filter *filter)
146 {
147 filter->sf_refcount--;
148 if (filter->sf_refcount == 0) {
149 /* Call the unregistered function */
150 if (filter->sf_filter.sf_unregistered) {
151 lck_rw_unlock_exclusive(sock_filter_lock);
152 filter->sf_filter.sf_unregistered(
153 filter->sf_filter.sf_handle);
154 lck_rw_lock_exclusive(sock_filter_lock);
155 }
156
157 /* Free the entry */
158 FREE(filter, M_IFADDR);
159 }
160 }
161
162 static void
163 sflt_entry_retain(struct socket_filter_entry *entry)
164 {
165 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
166 panic("sflt_entry_retain - sfe_refcount <= 0\n");
167 /* NOTREACHED */
168 }
169 }
170
171 static void
172 sflt_entry_release(struct socket_filter_entry *entry)
173 {
174 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
175 if (old == 1) {
176 /* That was the last reference */
177
178 /* Take the cleanup lock */
179 lck_mtx_lock(sock_filter_cleanup_lock);
180
181 /* Put this item on the cleanup list */
182 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
183 sock_filter_cleanup_entries = entry;
184
185 /* If the item is the first item in the list */
186 if (entry->sfe_next_oncleanup == NULL) {
187 if (sock_filter_cleanup_thread == NULL) {
188 /* Create a thread */
189 kernel_thread_start(sflt_cleanup_thread,
190 NULL, &sock_filter_cleanup_thread);
191 } else {
192 /* Wakeup the thread */
193 wakeup(&sock_filter_cleanup_entries);
194 }
195 }
196
197 /* Drop the cleanup lock */
198 lck_mtx_unlock(sock_filter_cleanup_lock);
199 } else if (old <= 0) {
200 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
201 (int)old);
202 /* NOTREACHED */
203 }
204 }
205
206 __attribute__((noreturn))
207 static void
208 sflt_cleanup_thread(void *blah, wait_result_t blah2)
209 {
210 #pragma unused(blah, blah2)
211 while (1) {
212 lck_mtx_lock(sock_filter_cleanup_lock);
213 while (sock_filter_cleanup_entries == NULL) {
214 /* Sleep until we've got something better to do */
215 msleep(&sock_filter_cleanup_entries,
216 sock_filter_cleanup_lock, PWAIT,
217 "sflt_cleanup", NULL);
218 }
219
220 /* Pull the current list of dead items */
221 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
222 sock_filter_cleanup_entries = NULL;
223
224 /* Drop the lock */
225 lck_mtx_unlock(sock_filter_cleanup_lock);
226
227 /* Take the socket filter lock */
228 lck_rw_lock_exclusive(sock_filter_lock);
229
230 /* Cleanup every dead item */
231 struct socket_filter_entry *entry;
232 for (entry = dead; entry; entry = dead) {
233 struct socket_filter_entry **nextpp;
234
235 dead = entry->sfe_next_oncleanup;
236
237 /* Call detach function if necessary - drop the lock */
238 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
239 entry->sfe_filter->sf_filter.sf_detach) {
240 entry->sfe_flags |= SFEF_NODETACH;
241 lck_rw_unlock_exclusive(sock_filter_lock);
242
243 /*
244 * Warning - passing a potentially
245 * dead socket may be bad
246 */
247 entry->sfe_filter->sf_filter. sf_detach(
248 entry->sfe_cookie, entry->sfe_socket);
249
250 lck_rw_lock_exclusive(sock_filter_lock);
251 }
252
253 /*
254 * Pull entry off the socket list --
255 * if the socket still exists
256 */
257 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
258 for (nextpp = &entry->sfe_socket->so_filt;
259 *nextpp;
260 nextpp = &(*nextpp)->sfe_next_onsocket) {
261 if (*nextpp == entry) {
262 *nextpp =
263 entry->sfe_next_onsocket;
264 break;
265 }
266 }
267 }
268
269 /* Pull entry off the filter list */
270 for (nextpp = &entry->sfe_filter->sf_entry_head;
271 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
272 if (*nextpp == entry) {
273 *nextpp = entry->sfe_next_onfilter;
274 break;
275 }
276 }
277
278 /*
279 * Release the filter -- may drop lock, but that's okay
280 */
281 sflt_release_locked(entry->sfe_filter);
282 entry->sfe_socket = NULL;
283 entry->sfe_filter = NULL;
284 FREE(entry, M_IFADDR);
285 }
286
287 /* Drop the socket filter lock */
288 lck_rw_unlock_exclusive(sock_filter_lock);
289 }
290 /* NOTREACHED */
291 }
292
293 static int
294 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
295 int socklocked)
296 {
297 int error = 0;
298 struct socket_filter_entry *entry = NULL;
299
300 if (sflt_permission_check(sotoinpcb(so)))
301 return (0);
302
303 if (filter == NULL)
304 return (ENOENT);
305
306 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
307 if (entry->sfe_filter->sf_filter.sf_handle ==
308 filter->sf_filter.sf_handle)
309 return (EEXIST);
310 }
311 /* allocate the socket filter entry */
312 MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR,
313 M_WAITOK);
314 if (entry == NULL)
315 return (ENOMEM);
316
317 /* Initialize the socket filter entry */
318 entry->sfe_cookie = NULL;
319 entry->sfe_flags = SFEF_ATTACHED;
320 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
321
322 /* Put the entry in the filter list */
323 sflt_retain_locked(filter);
324 entry->sfe_filter = filter;
325 entry->sfe_next_onfilter = filter->sf_entry_head;
326 filter->sf_entry_head = entry;
327
328 /* Put the entry on the socket filter list */
329 entry->sfe_socket = so;
330 entry->sfe_next_onsocket = so->so_filt;
331 so->so_filt = entry;
332
333 if (entry->sfe_filter->sf_filter.sf_attach) {
334 /* Retain the entry while we call attach */
335 sflt_entry_retain(entry);
336
337 /*
338 * Release the filter lock --
339 * callers must be aware we will do this
340 */
341 lck_rw_unlock_exclusive(sock_filter_lock);
342
343 /* Unlock the socket */
344 if (socklocked)
345 socket_unlock(so, 0);
346
347 /* It's finally safe to call the filter function */
348 error = entry->sfe_filter->sf_filter.sf_attach(
349 &entry->sfe_cookie, so);
350
351 /* Lock the socket again */
352 if (socklocked)
353 socket_lock(so, 0);
354
355 /* Lock the filters again */
356 lck_rw_lock_exclusive(sock_filter_lock);
357
358 /*
359 * If the attach function returns an error,
360 * this filter must be detached
361 */
362 if (error) {
363 /* don't call sf_detach */
364 entry->sfe_flags |= SFEF_NODETACH;
365 sflt_detach_locked(entry);
366 }
367
368 /* Release the retain we held through the attach call */
369 sflt_entry_release(entry);
370 }
371
372 return (error);
373 }
374
375 errno_t
376 sflt_attach_internal(socket_t socket, sflt_handle handle)
377 {
378 if (socket == NULL || handle == 0)
379 return (EINVAL);
380
381 int result = EINVAL;
382
383 lck_rw_lock_exclusive(sock_filter_lock);
384
385 struct socket_filter *filter = NULL;
386 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
387 if (filter->sf_filter.sf_handle == handle) break;
388 }
389
390 if (filter) {
391 result = sflt_attach_locked(socket, filter, 1);
392 }
393
394 lck_rw_unlock_exclusive(sock_filter_lock);
395
396 return (result);
397 }
398
399 static void
400 sflt_detach_locked(struct socket_filter_entry *entry)
401 {
402 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
403 entry->sfe_flags &= ~SFEF_ATTACHED;
404 sflt_entry_release(entry);
405 }
406 }
407
408 #pragma mark -- Socket Layer Hooks --
409
410 __private_extern__ void
411 sflt_initsock(struct socket *so)
412 {
413 /*
414 * Point to the real protosw, as so_proto might have been
415 * pointed to a modified version.
416 */
417 struct protosw *proto = so->so_proto->pr_protosw;
418
419 lck_rw_lock_shared(sock_filter_lock);
420 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
421 /* Promote lock to exclusive */
422 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
423 lck_rw_lock_exclusive(sock_filter_lock);
424
425 /*
426 * Warning: A filter unregistering will be pulled out of
427 * the list. This could happen while we drop the lock in
428 * sftl_attach_locked or sflt_release_locked. For this
429 * reason we retain a reference on the filter (or next_filter)
430 * while calling this function. This protects us from a panic,
431 * but it could result in a socket being created without all
432 * of the global filters if we're attaching a filter as it
433 * is removed, if that's possible.
434 */
435 struct socket_filter *filter =
436 TAILQ_FIRST(&proto->pr_filter_head);
437
438 sflt_retain_locked(filter);
439
440 while (filter) {
441 struct socket_filter *filter_next;
442 /*
443 * Warning: sflt_attach_private_locked
444 * will drop the lock
445 */
446 sflt_attach_locked(so, filter, 0);
447
448 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
449 if (filter_next)
450 sflt_retain_locked(filter_next);
451
452 /*
453 * Warning: filt_release_locked may remove
454 * the filter from the queue
455 */
456 sflt_release_locked(filter);
457 filter = filter_next;
458 }
459 }
460 lck_rw_done(sock_filter_lock);
461 }
462
463 /*
464 * sflt_termsock
465 *
466 * Detaches all filters from the socket.
467 */
468 __private_extern__ void
469 sflt_termsock(struct socket *so)
470 {
471 lck_rw_lock_exclusive(sock_filter_lock);
472
473 struct socket_filter_entry *entry;
474
475 while ((entry = so->so_filt) != NULL) {
476 /* Pull filter off the socket */
477 so->so_filt = entry->sfe_next_onsocket;
478 entry->sfe_flags |= SFEF_NOSOCKET;
479
480 /* Call detach */
481 sflt_detach_locked(entry);
482
483 /*
484 * On sflt_termsock, we can't return until the detach function
485 * has been called. Call the detach function - this is gross
486 * because the socket filter entry could be freed when we drop
487 * the lock, so we make copies on the stack and retain
488 * everything we need before dropping the lock.
489 */
490 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
491 entry->sfe_filter->sf_filter.sf_detach) {
492 void *sfe_cookie = entry->sfe_cookie;
493 struct socket_filter *sfe_filter = entry->sfe_filter;
494
495 /* Retain the socket filter */
496 sflt_retain_locked(sfe_filter);
497
498 /* Mark that we've called the detach function */
499 entry->sfe_flags |= SFEF_NODETACH;
500
501 /* Drop the lock before calling the detach function */
502 lck_rw_unlock_exclusive(sock_filter_lock);
503 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
504 lck_rw_lock_exclusive(sock_filter_lock);
505
506 /* Release the filter */
507 sflt_release_locked(sfe_filter);
508 }
509 }
510
511 lck_rw_unlock_exclusive(sock_filter_lock);
512 }
513
514
515 static void
516 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
517 sflt_handle handle)
518 {
519 if (so->so_filt == NULL)
520 return;
521
522 struct socket_filter_entry *entry;
523 int unlocked = 0;
524
525 lck_rw_lock_shared(sock_filter_lock);
526 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
527 if ((entry->sfe_flags & SFEF_ATTACHED) &&
528 entry->sfe_filter->sf_filter.sf_notify &&
529 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
530 handle) || !handle)) {
531 /*
532 * Retain the filter entry and release
533 * the socket filter lock
534 */
535 sflt_entry_retain(entry);
536 lck_rw_unlock_shared(sock_filter_lock);
537
538 /* If the socket isn't already unlocked, unlock it */
539 if (unlocked == 0) {
540 unlocked = 1;
541 socket_unlock(so, 0);
542 }
543
544 /* Finally call the filter */
545 entry->sfe_filter->sf_filter.sf_notify(
546 entry->sfe_cookie, so, event, param);
547
548 /*
549 * Take the socket filter lock again
550 * and release the entry
551 */
552 lck_rw_lock_shared(sock_filter_lock);
553 sflt_entry_release(entry);
554 }
555 }
556 lck_rw_unlock_shared(sock_filter_lock);
557
558 if (unlocked != 0) {
559 socket_lock(so, 0);
560 }
561 }
562
563 __private_extern__ void
564 sflt_notify(struct socket *so, sflt_event_t event, void *param)
565 {
566 sflt_notify_internal(so, event, param, 0);
567 }
568
569 static void
570 sflt_notify_after_register(struct socket *so, sflt_event_t event,
571 sflt_handle handle)
572 {
573 sflt_notify_internal(so, event, NULL, handle);
574 }
575
576 __private_extern__ int
577 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
578 {
579 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
580 return (0);
581
582 struct socket_filter_entry *entry;
583 int unlocked = 0;
584 int error = 0;
585
586 lck_rw_lock_shared(sock_filter_lock);
587 for (entry = so->so_filt; entry && error == 0;
588 entry = entry->sfe_next_onsocket) {
589 if ((entry->sfe_flags & SFEF_ATTACHED) &&
590 entry->sfe_filter->sf_filter.sf_ioctl) {
591 /*
592 * Retain the filter entry and release
593 * the socket filter lock
594 */
595 sflt_entry_retain(entry);
596 lck_rw_unlock_shared(sock_filter_lock);
597
598 /* If the socket isn't already unlocked, unlock it */
599 if (unlocked == 0) {
600 socket_unlock(so, 0);
601 unlocked = 1;
602 }
603
604 /* Call the filter */
605 error = entry->sfe_filter->sf_filter.sf_ioctl(
606 entry->sfe_cookie, so, cmd, data);
607
608 /*
609 * Take the socket filter lock again
610 * and release the entry
611 */
612 lck_rw_lock_shared(sock_filter_lock);
613 sflt_entry_release(entry);
614 }
615 }
616 lck_rw_unlock_shared(sock_filter_lock);
617
618 if (unlocked) {
619 socket_lock(so, 0);
620 }
621
622 return (error);
623 }
624
625 __private_extern__ int
626 sflt_bind(struct socket *so, const struct sockaddr *nam)
627 {
628 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
629 return (0);
630
631 struct socket_filter_entry *entry;
632 int unlocked = 0;
633 int error = 0;
634
635 lck_rw_lock_shared(sock_filter_lock);
636 for (entry = so->so_filt; entry && error == 0;
637 entry = entry->sfe_next_onsocket) {
638 if ((entry->sfe_flags & SFEF_ATTACHED) &&
639 entry->sfe_filter->sf_filter.sf_bind) {
640 /*
641 * Retain the filter entry and
642 * release the socket filter lock
643 */
644 sflt_entry_retain(entry);
645 lck_rw_unlock_shared(sock_filter_lock);
646
647 /* If the socket isn't already unlocked, unlock it */
648 if (unlocked == 0) {
649 socket_unlock(so, 0);
650 unlocked = 1;
651 }
652
653 /* Call the filter */
654 error = entry->sfe_filter->sf_filter.sf_bind(
655 entry->sfe_cookie, so, nam);
656
657 /*
658 * Take the socket filter lock again and
659 * release the entry
660 */
661 lck_rw_lock_shared(sock_filter_lock);
662 sflt_entry_release(entry);
663 }
664 }
665 lck_rw_unlock_shared(sock_filter_lock);
666
667 if (unlocked) {
668 socket_lock(so, 0);
669 }
670
671 return (error);
672 }
673
674 __private_extern__ int
675 sflt_listen(struct socket *so)
676 {
677 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
678 return (0);
679
680 struct socket_filter_entry *entry;
681 int unlocked = 0;
682 int error = 0;
683
684 lck_rw_lock_shared(sock_filter_lock);
685 for (entry = so->so_filt; entry && error == 0;
686 entry = entry->sfe_next_onsocket) {
687 if ((entry->sfe_flags & SFEF_ATTACHED) &&
688 entry->sfe_filter->sf_filter.sf_listen) {
689 /*
690 * Retain the filter entry and release
691 * the socket filter lock
692 */
693 sflt_entry_retain(entry);
694 lck_rw_unlock_shared(sock_filter_lock);
695
696 /* If the socket isn't already unlocked, unlock it */
697 if (unlocked == 0) {
698 socket_unlock(so, 0);
699 unlocked = 1;
700 }
701
702 /* Call the filter */
703 error = entry->sfe_filter->sf_filter.sf_listen(
704 entry->sfe_cookie, so);
705
706 /*
707 * Take the socket filter lock again
708 * and release the entry
709 */
710 lck_rw_lock_shared(sock_filter_lock);
711 sflt_entry_release(entry);
712 }
713 }
714 lck_rw_unlock_shared(sock_filter_lock);
715
716 if (unlocked) {
717 socket_lock(so, 0);
718 }
719
720 return (error);
721 }
722
723 __private_extern__ int
724 sflt_accept(struct socket *head, struct socket *so,
725 const struct sockaddr *local, const struct sockaddr *remote)
726 {
727 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
728 return (0);
729
730 struct socket_filter_entry *entry;
731 int unlocked = 0;
732 int error = 0;
733
734 lck_rw_lock_shared(sock_filter_lock);
735 for (entry = so->so_filt; entry && error == 0;
736 entry = entry->sfe_next_onsocket) {
737 if ((entry->sfe_flags & SFEF_ATTACHED) &&
738 entry->sfe_filter->sf_filter.sf_accept) {
739 /*
740 * Retain the filter entry and
741 * release the socket filter lock
742 */
743 sflt_entry_retain(entry);
744 lck_rw_unlock_shared(sock_filter_lock);
745
746 /* If the socket isn't already unlocked, unlock it */
747 if (unlocked == 0) {
748 socket_unlock(so, 0);
749 unlocked = 1;
750 }
751
752 /* Call the filter */
753 error = entry->sfe_filter->sf_filter.sf_accept(
754 entry->sfe_cookie, head, so, local, remote);
755
756 /*
757 * Take the socket filter lock again
758 * and release the entry
759 */
760 lck_rw_lock_shared(sock_filter_lock);
761 sflt_entry_release(entry);
762 }
763 }
764 lck_rw_unlock_shared(sock_filter_lock);
765
766 if (unlocked) {
767 socket_lock(so, 0);
768 }
769
770 return (error);
771 }
772
773 __private_extern__ int
774 sflt_getsockname(struct socket *so, struct sockaddr **local)
775 {
776 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
777 return (0);
778
779 struct socket_filter_entry *entry;
780 int unlocked = 0;
781 int error = 0;
782
783 lck_rw_lock_shared(sock_filter_lock);
784 for (entry = so->so_filt; entry && error == 0;
785 entry = entry->sfe_next_onsocket) {
786 if ((entry->sfe_flags & SFEF_ATTACHED) &&
787 entry->sfe_filter->sf_filter.sf_getsockname) {
788 /*
789 * Retain the filter entry and
790 * release the socket filter lock
791 */
792 sflt_entry_retain(entry);
793 lck_rw_unlock_shared(sock_filter_lock);
794
795 /* If the socket isn't already unlocked, unlock it */
796 if (unlocked == 0) {
797 socket_unlock(so, 0);
798 unlocked = 1;
799 }
800
801 /* Call the filter */
802 error = entry->sfe_filter->sf_filter.sf_getsockname(
803 entry->sfe_cookie, so, local);
804
805 /*
806 * Take the socket filter lock again
807 * and release the entry
808 */
809 lck_rw_lock_shared(sock_filter_lock);
810 sflt_entry_release(entry);
811 }
812 }
813 lck_rw_unlock_shared(sock_filter_lock);
814
815 if (unlocked) {
816 socket_lock(so, 0);
817 }
818
819 return (error);
820 }
821
822 __private_extern__ int
823 sflt_getpeername(struct socket *so, struct sockaddr **remote)
824 {
825 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
826 return (0);
827
828 struct socket_filter_entry *entry;
829 int unlocked = 0;
830 int error = 0;
831
832 lck_rw_lock_shared(sock_filter_lock);
833 for (entry = so->so_filt; entry && error == 0;
834 entry = entry->sfe_next_onsocket) {
835 if ((entry->sfe_flags & SFEF_ATTACHED) &&
836 entry->sfe_filter->sf_filter.sf_getpeername) {
837 /*
838 * Retain the filter entry and release
839 * the socket filter lock
840 */
841 sflt_entry_retain(entry);
842 lck_rw_unlock_shared(sock_filter_lock);
843
844 /* If the socket isn't already unlocked, unlock it */
845 if (unlocked == 0) {
846 socket_unlock(so, 0);
847 unlocked = 1;
848 }
849
850 /* Call the filter */
851 error = entry->sfe_filter->sf_filter.sf_getpeername(
852 entry->sfe_cookie, so, remote);
853
854 /*
855 * Take the socket filter lock again
856 * and release the entry
857 */
858 lck_rw_lock_shared(sock_filter_lock);
859 sflt_entry_release(entry);
860 }
861 }
862 lck_rw_unlock_shared(sock_filter_lock);
863
864 if (unlocked) {
865 socket_lock(so, 0);
866 }
867
868 return (error);
869 }
870
871 __private_extern__ int
872 sflt_connectin(struct socket *so, const struct sockaddr *remote)
873 {
874 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
875 return (0);
876
877 struct socket_filter_entry *entry;
878 int unlocked = 0;
879 int error = 0;
880
881 lck_rw_lock_shared(sock_filter_lock);
882 for (entry = so->so_filt; entry && error == 0;
883 entry = entry->sfe_next_onsocket) {
884 if ((entry->sfe_flags & SFEF_ATTACHED) &&
885 entry->sfe_filter->sf_filter.sf_connect_in) {
886 /*
887 * Retain the filter entry and release
888 * the socket filter lock
889 */
890 sflt_entry_retain(entry);
891 lck_rw_unlock_shared(sock_filter_lock);
892
893 /* If the socket isn't already unlocked, unlock it */
894 if (unlocked == 0) {
895 socket_unlock(so, 0);
896 unlocked = 1;
897 }
898
899 /* Call the filter */
900 error = entry->sfe_filter->sf_filter.sf_connect_in(
901 entry->sfe_cookie, so, remote);
902
903 /*
904 * Take the socket filter lock again
905 * and release the entry
906 */
907 lck_rw_lock_shared(sock_filter_lock);
908 sflt_entry_release(entry);
909 }
910 }
911 lck_rw_unlock_shared(sock_filter_lock);
912
913 if (unlocked) {
914 socket_lock(so, 0);
915 }
916
917 return (error);
918 }
919
920 static int
921 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
922 {
923 struct socket_filter_entry *entry;
924 int unlocked = 0;
925 int error = 0;
926
927 lck_rw_lock_shared(sock_filter_lock);
928 for (entry = so->so_filt; entry && error == 0;
929 entry = entry->sfe_next_onsocket) {
930 if ((entry->sfe_flags & SFEF_ATTACHED) &&
931 entry->sfe_filter->sf_filter.sf_connect_out) {
932 /*
933 * Retain the filter entry and release
934 * the socket filter lock
935 */
936 sflt_entry_retain(entry);
937 lck_rw_unlock_shared(sock_filter_lock);
938
939 /* If the socket isn't already unlocked, unlock it */
940 if (unlocked == 0) {
941 socket_unlock(so, 0);
942 unlocked = 1;
943 }
944
945 /* Call the filter */
946 error = entry->sfe_filter->sf_filter.sf_connect_out(
947 entry->sfe_cookie, so, nam);
948
949 /*
950 * Take the socket filter lock again
951 * and release the entry
952 */
953 lck_rw_lock_shared(sock_filter_lock);
954 sflt_entry_release(entry);
955 }
956 }
957 lck_rw_unlock_shared(sock_filter_lock);
958
959 if (unlocked) {
960 socket_lock(so, 0);
961 }
962
963 return (error);
964 }
965
966 __private_extern__ int
967 sflt_connectout(struct socket *so, const struct sockaddr *nam)
968 {
969 char buf[SOCK_MAXADDRLEN];
970 struct sockaddr *sa;
971 int error;
972
973 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
974 return (0);
975
976 /*
977 * Workaround for rdar://23362120
978 * Always pass a buffer that can hold an IPv6 socket address
979 */
980 bzero(buf, sizeof (buf));
981 bcopy(nam, buf, nam->sa_len);
982 sa = (struct sockaddr *)buf;
983
984 error = sflt_connectout_common(so, sa);
985 if (error != 0)
986 return (error);
987
988 /*
989 * If the address was modified, copy it back
990 */
991 if (bcmp(sa, nam, nam->sa_len) != 0) {
992 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
993 }
994
995 return (0);
996 }
997
998 __private_extern__ int
999 sflt_connectxout(struct socket *so, struct sockaddr_list **dst_sl0)
1000 {
1001 struct sockaddr_list *dst_sl;
1002 struct sockaddr_entry *se, *tse;
1003 int modified = 0;
1004 int error = 0;
1005
1006 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1007 return (0);
1008
1009 /* make a copy as sflt_connectout() releases socket lock */
1010 dst_sl = sockaddrlist_dup(*dst_sl0, M_WAITOK);
1011 if (dst_sl == NULL)
1012 return (ENOBUFS);
1013
1014 /*
1015 * Hmm; we don't yet have a connectx socket filter callback,
1016 * so the closest thing to do is to probably call sflt_connectout()
1017 * as many times as there are addresses in the list, and bail
1018 * as soon as we get an error.
1019 */
1020 TAILQ_FOREACH_SAFE(se, &dst_sl->sl_head, se_link, tse) {
1021 char buf[SOCK_MAXADDRLEN];
1022 struct sockaddr *sa;
1023
1024 VERIFY(se->se_addr != NULL);
1025
1026 /*
1027 * Workaround for rdar://23362120
1028 * Always pass a buffer that can hold an IPv6 socket address
1029 */
1030 bzero(buf, sizeof (buf));
1031 bcopy(se->se_addr, buf, se->se_addr->sa_len);
1032 sa = (struct sockaddr *)buf;
1033
1034 error = sflt_connectout_common(so, sa);
1035 if (error != 0)
1036 break;
1037
1038 /*
1039 * If the address was modified, copy it back
1040 */
1041 if (bcmp(se->se_addr, sa, se->se_addr->sa_len) != 0) {
1042 bcopy(sa, se->se_addr, se->se_addr->sa_len);
1043 modified = 1;
1044 }
1045 }
1046
1047 if (error != 0 || !modified) {
1048 /* leave the original as is */
1049 sockaddrlist_free(dst_sl);
1050 } else {
1051 /*
1052 * At least one address was modified and there were no errors;
1053 * ditch the original and return the modified list.
1054 */
1055 sockaddrlist_free(*dst_sl0);
1056 *dst_sl0 = dst_sl;
1057 }
1058
1059 return (error);
1060 }
1061
1062 __private_extern__ int
1063 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1064 {
1065 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1066 return (0);
1067
1068 struct socket_filter_entry *entry;
1069 int unlocked = 0;
1070 int error = 0;
1071
1072 lck_rw_lock_shared(sock_filter_lock);
1073 for (entry = so->so_filt; entry && error == 0;
1074 entry = entry->sfe_next_onsocket) {
1075 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1076 entry->sfe_filter->sf_filter.sf_setoption) {
1077 /*
1078 * Retain the filter entry and release
1079 * the socket filter lock
1080 */
1081 sflt_entry_retain(entry);
1082 lck_rw_unlock_shared(sock_filter_lock);
1083
1084 /* If the socket isn't already unlocked, unlock it */
1085 if (unlocked == 0) {
1086 socket_unlock(so, 0);
1087 unlocked = 1;
1088 }
1089
1090 /* Call the filter */
1091 error = entry->sfe_filter->sf_filter.sf_setoption(
1092 entry->sfe_cookie, so, sopt);
1093
1094 /*
1095 * Take the socket filter lock again
1096 * and release the entry
1097 */
1098 lck_rw_lock_shared(sock_filter_lock);
1099 sflt_entry_release(entry);
1100 }
1101 }
1102 lck_rw_unlock_shared(sock_filter_lock);
1103
1104 if (unlocked) {
1105 socket_lock(so, 0);
1106 }
1107
1108 return (error);
1109 }
1110
1111 __private_extern__ int
1112 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1113 {
1114 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1115 return (0);
1116
1117 struct socket_filter_entry *entry;
1118 int unlocked = 0;
1119 int error = 0;
1120
1121 lck_rw_lock_shared(sock_filter_lock);
1122 for (entry = so->so_filt; entry && error == 0;
1123 entry = entry->sfe_next_onsocket) {
1124 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1125 entry->sfe_filter->sf_filter.sf_getoption) {
1126 /*
1127 * Retain the filter entry and release
1128 * the socket filter lock
1129 */
1130 sflt_entry_retain(entry);
1131 lck_rw_unlock_shared(sock_filter_lock);
1132
1133 /* If the socket isn't already unlocked, unlock it */
1134 if (unlocked == 0) {
1135 socket_unlock(so, 0);
1136 unlocked = 1;
1137 }
1138
1139 /* Call the filter */
1140 error = entry->sfe_filter->sf_filter.sf_getoption(
1141 entry->sfe_cookie, so, sopt);
1142
1143 /*
1144 * Take the socket filter lock again
1145 * and release the entry
1146 */
1147 lck_rw_lock_shared(sock_filter_lock);
1148 sflt_entry_release(entry);
1149 }
1150 }
1151 lck_rw_unlock_shared(sock_filter_lock);
1152
1153 if (unlocked) {
1154 socket_lock(so, 0);
1155 }
1156
1157 return (error);
1158 }
1159
1160 __private_extern__ int
1161 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1162 mbuf_t *control, sflt_data_flag_t flags)
1163 {
1164 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1165 return (0);
1166
1167 struct socket_filter_entry *entry;
1168 int unlocked = 0;
1169 int setsendthread = 0;
1170 int error = 0;
1171
1172 lck_rw_lock_shared(sock_filter_lock);
1173 for (entry = so->so_filt; entry && error == 0;
1174 entry = entry->sfe_next_onsocket) {
1175 /* skip if this is a subflow socket */
1176 if (so->so_flags & SOF_MP_SUBFLOW)
1177 continue;
1178 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1179 entry->sfe_filter->sf_filter.sf_data_out) {
1180 /*
1181 * Retain the filter entry and
1182 * release the socket filter lock
1183 */
1184 sflt_entry_retain(entry);
1185 lck_rw_unlock_shared(sock_filter_lock);
1186
1187 /* If the socket isn't already unlocked, unlock it */
1188 if (unlocked == 0) {
1189 if (so->so_send_filt_thread == NULL) {
1190 setsendthread = 1;
1191 so->so_send_filt_thread =
1192 current_thread();
1193 }
1194 socket_unlock(so, 0);
1195 unlocked = 1;
1196 }
1197
1198 /* Call the filter */
1199 error = entry->sfe_filter->sf_filter.sf_data_out(
1200 entry->sfe_cookie, so, to, data, control, flags);
1201
1202 /*
1203 * Take the socket filter lock again
1204 * and release the entry
1205 */
1206 lck_rw_lock_shared(sock_filter_lock);
1207 sflt_entry_release(entry);
1208 }
1209 }
1210 lck_rw_unlock_shared(sock_filter_lock);
1211
1212 if (unlocked) {
1213 socket_lock(so, 0);
1214 if (setsendthread)
1215 so->so_send_filt_thread = NULL;
1216 }
1217
1218 return (error);
1219 }
1220
1221 __private_extern__ int
1222 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1223 mbuf_t *control, sflt_data_flag_t flags)
1224 {
1225 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1226 return (0);
1227
1228 struct socket_filter_entry *entry;
1229 int error = 0;
1230 int unlocked = 0;
1231
1232 lck_rw_lock_shared(sock_filter_lock);
1233
1234 for (entry = so->so_filt; entry && (error == 0);
1235 entry = entry->sfe_next_onsocket) {
1236 /* skip if this is a subflow socket */
1237 if (so->so_flags & SOF_MP_SUBFLOW)
1238 continue;
1239 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1240 entry->sfe_filter->sf_filter.sf_data_in) {
1241 /*
1242 * Retain the filter entry and
1243 * release the socket filter lock
1244 */
1245 sflt_entry_retain(entry);
1246 lck_rw_unlock_shared(sock_filter_lock);
1247
1248 /* If the socket isn't already unlocked, unlock it */
1249 if (unlocked == 0) {
1250 unlocked = 1;
1251 socket_unlock(so, 0);
1252 }
1253
1254 /* Call the filter */
1255 error = entry->sfe_filter->sf_filter.sf_data_in(
1256 entry->sfe_cookie, so, from, data, control, flags);
1257
1258 /*
1259 * Take the socket filter lock again
1260 * and release the entry
1261 */
1262 lck_rw_lock_shared(sock_filter_lock);
1263 sflt_entry_release(entry);
1264 }
1265 }
1266 lck_rw_unlock_shared(sock_filter_lock);
1267
1268 if (unlocked) {
1269 socket_lock(so, 0);
1270 }
1271
1272 return (error);
1273 }
1274
1275 #pragma mark -- KPI --
1276
1277 errno_t
1278 sflt_attach(socket_t socket, sflt_handle handle)
1279 {
1280 socket_lock(socket, 1);
1281 errno_t result = sflt_attach_internal(socket, handle);
1282 socket_unlock(socket, 1);
1283 return (result);
1284 }
1285
1286 errno_t
1287 sflt_detach(socket_t socket, sflt_handle handle)
1288 {
1289 struct socket_filter_entry *entry;
1290 errno_t result = 0;
1291
1292 if (socket == NULL || handle == 0)
1293 return (EINVAL);
1294
1295 lck_rw_lock_exclusive(sock_filter_lock);
1296 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1297 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1298 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1299 break;
1300 }
1301 }
1302
1303 if (entry != NULL) {
1304 sflt_detach_locked(entry);
1305 }
1306 lck_rw_unlock_exclusive(sock_filter_lock);
1307
1308 return (result);
1309 }
1310
1311 struct solist {
1312 struct solist *next;
1313 struct socket *so;
1314 };
1315
1316 errno_t
1317 sflt_register(const struct sflt_filter *filter, int domain, int type,
1318 int protocol)
1319 {
1320 struct socket_filter *sock_filt = NULL;
1321 struct socket_filter *match = NULL;
1322 int error = 0;
1323 struct protosw *pr;
1324 unsigned int len;
1325 struct socket *so;
1326 struct inpcb *inp;
1327 struct solist *solisthead = NULL, *solist = NULL;
1328
1329 if ((domain != PF_INET) && (domain != PF_INET6))
1330 return (ENOTSUP);
1331
1332 pr = pffindproto(domain, protocol, type);
1333 if (pr == NULL)
1334 return (ENOENT);
1335
1336 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1337 filter->sf_handle == 0 || filter->sf_name == NULL)
1338 return (EINVAL);
1339
1340 /* Allocate the socket filter */
1341 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1342 M_IFADDR, M_WAITOK);
1343 if (sock_filt == NULL) {
1344 return (ENOBUFS);
1345 }
1346
1347 bzero(sock_filt, sizeof (*sock_filt));
1348
1349 /* Legacy sflt_filter length; current structure minus extended */
1350 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1351 /*
1352 * Include extended fields if filter defines SFLT_EXTENDED.
1353 * We've zeroed out our internal sflt_filter placeholder,
1354 * so any unused portion would have been taken care of.
1355 */
1356 if (filter->sf_flags & SFLT_EXTENDED) {
1357 unsigned int ext_len = filter->sf_len;
1358
1359 if (ext_len > sizeof (struct sflt_filter_ext))
1360 ext_len = sizeof (struct sflt_filter_ext);
1361
1362 len += ext_len;
1363 }
1364 bcopy(filter, &sock_filt->sf_filter, len);
1365
1366 lck_rw_lock_exclusive(sock_filter_lock);
1367 /* Look for an existing entry */
1368 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1369 if (match->sf_filter.sf_handle ==
1370 sock_filt->sf_filter.sf_handle) {
1371 break;
1372 }
1373 }
1374
1375 /* Add the entry only if there was no existing entry */
1376 if (match == NULL) {
1377 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1378 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1379 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1380 sf_protosw_next);
1381 sock_filt->sf_proto = pr;
1382 }
1383 sflt_retain_locked(sock_filt);
1384 }
1385 lck_rw_unlock_exclusive(sock_filter_lock);
1386
1387 if (match != NULL) {
1388 FREE(sock_filt, M_IFADDR);
1389 return (EEXIST);
1390 }
1391
1392 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY))
1393 return (error);
1394
1395 /*
1396 * Setup the filter on the TCP and UDP sockets already created.
1397 */
1398 #define SOLIST_ADD(_so) do { \
1399 solist->next = solisthead; \
1400 sock_retain((_so)); \
1401 solist->so = (_so); \
1402 solisthead = solist; \
1403 } while (0)
1404 if (protocol == IPPROTO_TCP) {
1405 lck_rw_lock_shared(tcbinfo.ipi_lock);
1406 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1407 so = inp->inp_socket;
1408 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1409 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1410 (so->so_state & SS_NOFDREF)) ||
1411 !SOCK_CHECK_DOM(so, domain) ||
1412 !SOCK_CHECK_TYPE(so, type))
1413 continue;
1414 MALLOC(solist, struct solist *, sizeof (*solist),
1415 M_IFADDR, M_NOWAIT);
1416 if (!solist)
1417 continue;
1418 SOLIST_ADD(so);
1419 }
1420 lck_rw_done(tcbinfo.ipi_lock);
1421 } else if (protocol == IPPROTO_UDP) {
1422 lck_rw_lock_shared(udbinfo.ipi_lock);
1423 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1424 so = inp->inp_socket;
1425 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1426 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1427 (so->so_state & SS_NOFDREF)) ||
1428 !SOCK_CHECK_DOM(so, domain) ||
1429 !SOCK_CHECK_TYPE(so, type))
1430 continue;
1431 MALLOC(solist, struct solist *, sizeof (*solist),
1432 M_IFADDR, M_NOWAIT);
1433 if (!solist)
1434 continue;
1435 SOLIST_ADD(so);
1436 }
1437 lck_rw_done(udbinfo.ipi_lock);
1438 }
1439 /* XXX it's possible to walk the raw socket list as well */
1440 #undef SOLIST_ADD
1441
1442 while (solisthead) {
1443 sflt_handle handle = filter->sf_handle;
1444
1445 so = solisthead->so;
1446 socket_lock(so, 0);
1447 sflt_initsock(so);
1448 if (so->so_state & SS_ISCONNECTING)
1449 sflt_notify_after_register(so, sock_evt_connecting,
1450 handle);
1451 else if (so->so_state & SS_ISCONNECTED)
1452 sflt_notify_after_register(so, sock_evt_connected,
1453 handle);
1454 else if ((so->so_state &
1455 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) ==
1456 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE))
1457 sflt_notify_after_register(so, sock_evt_disconnecting,
1458 handle);
1459 else if ((so->so_state &
1460 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) ==
1461 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED))
1462 sflt_notify_after_register(so, sock_evt_disconnected,
1463 handle);
1464 else if (so->so_state & SS_CANTSENDMORE)
1465 sflt_notify_after_register(so, sock_evt_cantsendmore,
1466 handle);
1467 else if (so->so_state & SS_CANTRCVMORE)
1468 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1469 handle);
1470 socket_unlock(so, 0);
1471 /* XXX no easy way to post the sock_evt_closing event */
1472 sock_release(so);
1473 solist = solisthead;
1474 solisthead = solisthead->next;
1475 FREE(solist, M_IFADDR);
1476 }
1477
1478 return (error);
1479 }
1480
1481 errno_t
1482 sflt_unregister(sflt_handle handle)
1483 {
1484 struct socket_filter *filter;
1485 lck_rw_lock_exclusive(sock_filter_lock);
1486
1487 /* Find the entry by the handle */
1488 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1489 if (filter->sf_filter.sf_handle == handle)
1490 break;
1491 }
1492
1493 if (filter) {
1494 /* Remove it from the global list */
1495 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1496
1497 /* Remove it from the protosw list */
1498 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1499 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1500 filter, sf_protosw_next);
1501 }
1502
1503 /* Detach from any sockets */
1504 struct socket_filter_entry *entry = NULL;
1505
1506 for (entry = filter->sf_entry_head; entry;
1507 entry = entry->sfe_next_onfilter) {
1508 sflt_detach_locked(entry);
1509 }
1510
1511 /* Release the filter */
1512 sflt_release_locked(filter);
1513 }
1514
1515 lck_rw_unlock_exclusive(sock_filter_lock);
1516
1517 if (filter == NULL)
1518 return (ENOENT);
1519
1520 return (0);
1521 }
1522
1523 errno_t
1524 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1525 mbuf_t control, sflt_data_flag_t flags)
1526 {
1527 int error = 0;
1528
1529 if (so == NULL || data == NULL)
1530 return (EINVAL);
1531
1532 if (flags & sock_data_filt_flag_oob) {
1533 return (ENOTSUP);
1534 }
1535
1536 socket_lock(so, 1);
1537
1538 /* reject if this is a subflow socket */
1539 if (so->so_flags & SOF_MP_SUBFLOW) {
1540 error = ENOTSUP;
1541 goto done;
1542 }
1543
1544 if (from) {
1545 if (sbappendaddr(&so->so_rcv,
1546 (struct sockaddr *)(uintptr_t)from, data, control, NULL))
1547 sorwakeup(so);
1548 goto done;
1549 }
1550
1551 if (control) {
1552 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1553 sorwakeup(so);
1554 goto done;
1555 }
1556
1557 if (flags & sock_data_filt_flag_record) {
1558 if (control || from) {
1559 error = EINVAL;
1560 goto done;
1561 }
1562 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data))
1563 sorwakeup(so);
1564 goto done;
1565 }
1566
1567 if (sbappend(&so->so_rcv, data))
1568 sorwakeup(so);
1569 done:
1570 socket_unlock(so, 1);
1571 return (error);
1572 }
1573
1574 errno_t
1575 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1576 mbuf_t control, sflt_data_flag_t flags)
1577 {
1578 int sosendflags = 0;
1579
1580 /* reject if this is a subflow socket */
1581 if (so->so_flags & SOF_MP_SUBFLOW)
1582 return (ENOTSUP);
1583
1584 if (flags & sock_data_filt_flag_oob)
1585 sosendflags = MSG_OOB;
1586 return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1587 data, control, sosendflags));
1588 }
1589
1590 sockopt_dir
1591 sockopt_direction(sockopt_t sopt)
1592 {
1593 return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set);
1594 }
1595
1596 int
1597 sockopt_level(sockopt_t sopt)
1598 {
1599 return (sopt->sopt_level);
1600 }
1601
1602 int
1603 sockopt_name(sockopt_t sopt)
1604 {
1605 return (sopt->sopt_name);
1606 }
1607
1608 size_t
1609 sockopt_valsize(sockopt_t sopt)
1610 {
1611 return (sopt->sopt_valsize);
1612 }
1613
1614 errno_t
1615 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1616 {
1617 return (sooptcopyin(sopt, data, len, len));
1618 }
1619
1620 errno_t
1621 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1622 {
1623 return (sooptcopyout(sopt, data, len));
1624 }