]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
e8e4115f01b24d6e90ac5f2039fc3f48cab324c8
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
50
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
53
54 #include <string.h>
55
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
59
60 struct socket_filter_entry {
61 struct socket_filter_entry *sfe_next_onsocket;
62 struct socket_filter_entry *sfe_next_onfilter;
63 struct socket_filter_entry *sfe_next_oncleanup;
64
65 struct socket_filter *sfe_filter;
66 struct socket *sfe_socket;
67 void *sfe_cookie;
68
69 uint32_t sfe_flags;
70 int32_t sfe_refcount;
71 };
72
73 struct socket_filter {
74 TAILQ_ENTRY(socket_filter) sf_protosw_next;
75 TAILQ_ENTRY(socket_filter) sf_global_next;
76 struct socket_filter_entry *sf_entry_head;
77
78 struct protosw *sf_proto;
79 struct sflt_filter sf_filter;
80 u_int32_t sf_refcount;
81 };
82
83 TAILQ_HEAD(socket_filter_list, socket_filter);
84
85 static struct socket_filter_list sock_filter_head;
86 static lck_rw_t *sock_filter_lock = NULL;
87 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
88 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
89 static thread_t sock_filter_cleanup_thread = NULL;
90
91 static void sflt_cleanup_thread(void *, wait_result_t);
92 static void sflt_detach_locked(struct socket_filter_entry *entry);
93
94 #pragma mark -- Internal State Management --
95
96 __private_extern__ int
97 sflt_permission_check(struct inpcb *inp)
98 {
99
100 /*
101 * All these permissions only apply to the co-processor interface,
102 * so ignore IPv4.
103 */
104 if (!(inp->inp_vflag & INP_IPV6)) {
105 return (0);
106 }
107 /* Sockets that have this entitlement bypass socket filters. */
108 if (INP_INTCOPROC_ALLOWED(inp)) {
109 return (1);
110 }
111 if ((inp->inp_flags & INP_BOUND_IF) &&
112 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
113 return (1);
114 }
115 return (0);
116 }
117
118 __private_extern__ void
119 sflt_init(void)
120 {
121 lck_grp_attr_t *grp_attrib = NULL;
122 lck_attr_t *lck_attrib = NULL;
123 lck_grp_t *lck_group = NULL;
124
125 TAILQ_INIT(&sock_filter_head);
126
127 /* Allocate a rw lock */
128 grp_attrib = lck_grp_attr_alloc_init();
129 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
130 lck_grp_attr_free(grp_attrib);
131 lck_attrib = lck_attr_alloc_init();
132 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
133 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
134 lck_grp_free(lck_group);
135 lck_attr_free(lck_attrib);
136 }
137
138 static void
139 sflt_retain_locked(struct socket_filter *filter)
140 {
141 filter->sf_refcount++;
142 }
143
144 static void
145 sflt_release_locked(struct socket_filter *filter)
146 {
147 filter->sf_refcount--;
148 if (filter->sf_refcount == 0) {
149 /* Call the unregistered function */
150 if (filter->sf_filter.sf_unregistered) {
151 lck_rw_unlock_exclusive(sock_filter_lock);
152 filter->sf_filter.sf_unregistered(
153 filter->sf_filter.sf_handle);
154 lck_rw_lock_exclusive(sock_filter_lock);
155 }
156
157 /* Free the entry */
158 FREE(filter, M_IFADDR);
159 }
160 }
161
162 static void
163 sflt_entry_retain(struct socket_filter_entry *entry)
164 {
165 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
166 panic("sflt_entry_retain - sfe_refcount <= 0\n");
167 /* NOTREACHED */
168 }
169 }
170
171 static void
172 sflt_entry_release(struct socket_filter_entry *entry)
173 {
174 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
175 if (old == 1) {
176 /* That was the last reference */
177
178 /* Take the cleanup lock */
179 lck_mtx_lock(sock_filter_cleanup_lock);
180
181 /* Put this item on the cleanup list */
182 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
183 sock_filter_cleanup_entries = entry;
184
185 /* If the item is the first item in the list */
186 if (entry->sfe_next_oncleanup == NULL) {
187 if (sock_filter_cleanup_thread == NULL) {
188 /* Create a thread */
189 kernel_thread_start(sflt_cleanup_thread,
190 NULL, &sock_filter_cleanup_thread);
191 } else {
192 /* Wakeup the thread */
193 wakeup(&sock_filter_cleanup_entries);
194 }
195 }
196
197 /* Drop the cleanup lock */
198 lck_mtx_unlock(sock_filter_cleanup_lock);
199 } else if (old <= 0) {
200 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
201 (int)old);
202 /* NOTREACHED */
203 }
204 }
205
206 __attribute__((noreturn))
207 static void
208 sflt_cleanup_thread(void *blah, wait_result_t blah2)
209 {
210 #pragma unused(blah, blah2)
211 while (1) {
212 lck_mtx_lock(sock_filter_cleanup_lock);
213 while (sock_filter_cleanup_entries == NULL) {
214 /* Sleep until we've got something better to do */
215 msleep(&sock_filter_cleanup_entries,
216 sock_filter_cleanup_lock, PWAIT,
217 "sflt_cleanup", NULL);
218 }
219
220 /* Pull the current list of dead items */
221 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
222 sock_filter_cleanup_entries = NULL;
223
224 /* Drop the lock */
225 lck_mtx_unlock(sock_filter_cleanup_lock);
226
227 /* Take the socket filter lock */
228 lck_rw_lock_exclusive(sock_filter_lock);
229
230 /* Cleanup every dead item */
231 struct socket_filter_entry *entry;
232 for (entry = dead; entry; entry = dead) {
233 struct socket_filter_entry **nextpp;
234
235 dead = entry->sfe_next_oncleanup;
236
237 /* Call detach function if necessary - drop the lock */
238 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
239 entry->sfe_filter->sf_filter.sf_detach) {
240 entry->sfe_flags |= SFEF_NODETACH;
241 lck_rw_unlock_exclusive(sock_filter_lock);
242
243 /*
244 * Warning - passing a potentially
245 * dead socket may be bad
246 */
247 entry->sfe_filter->sf_filter. sf_detach(
248 entry->sfe_cookie, entry->sfe_socket);
249
250 lck_rw_lock_exclusive(sock_filter_lock);
251 }
252
253 /*
254 * Pull entry off the socket list --
255 * if the socket still exists
256 */
257 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
258 for (nextpp = &entry->sfe_socket->so_filt;
259 *nextpp;
260 nextpp = &(*nextpp)->sfe_next_onsocket) {
261 if (*nextpp == entry) {
262 *nextpp =
263 entry->sfe_next_onsocket;
264 break;
265 }
266 }
267 }
268
269 /* Pull entry off the filter list */
270 for (nextpp = &entry->sfe_filter->sf_entry_head;
271 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
272 if (*nextpp == entry) {
273 *nextpp = entry->sfe_next_onfilter;
274 break;
275 }
276 }
277
278 /*
279 * Release the filter -- may drop lock, but that's okay
280 */
281 sflt_release_locked(entry->sfe_filter);
282 entry->sfe_socket = NULL;
283 entry->sfe_filter = NULL;
284 FREE(entry, M_IFADDR);
285 }
286
287 /* Drop the socket filter lock */
288 lck_rw_unlock_exclusive(sock_filter_lock);
289 }
290 /* NOTREACHED */
291 }
292
293 static int
294 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
295 int socklocked)
296 {
297 int error = 0;
298 struct socket_filter_entry *entry = NULL;
299
300 if (sflt_permission_check(sotoinpcb(so)))
301 return (0);
302
303 if (filter == NULL)
304 return (ENOENT);
305
306 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
307 if (entry->sfe_filter->sf_filter.sf_handle ==
308 filter->sf_filter.sf_handle)
309 return (EEXIST);
310 }
311 /* allocate the socket filter entry */
312 MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR,
313 M_WAITOK);
314 if (entry == NULL)
315 return (ENOMEM);
316
317 /* Initialize the socket filter entry */
318 entry->sfe_cookie = NULL;
319 entry->sfe_flags = SFEF_ATTACHED;
320 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
321
322 /* Put the entry in the filter list */
323 sflt_retain_locked(filter);
324 entry->sfe_filter = filter;
325 entry->sfe_next_onfilter = filter->sf_entry_head;
326 filter->sf_entry_head = entry;
327
328 /* Put the entry on the socket filter list */
329 entry->sfe_socket = so;
330 entry->sfe_next_onsocket = so->so_filt;
331 so->so_filt = entry;
332
333 if (entry->sfe_filter->sf_filter.sf_attach) {
334 /* Retain the entry while we call attach */
335 sflt_entry_retain(entry);
336
337 /*
338 * Release the filter lock --
339 * callers must be aware we will do this
340 */
341 lck_rw_unlock_exclusive(sock_filter_lock);
342
343 /* Unlock the socket */
344 if (socklocked)
345 socket_unlock(so, 0);
346
347 /* It's finally safe to call the filter function */
348 error = entry->sfe_filter->sf_filter.sf_attach(
349 &entry->sfe_cookie, so);
350
351 /* Lock the socket again */
352 if (socklocked)
353 socket_lock(so, 0);
354
355 /* Lock the filters again */
356 lck_rw_lock_exclusive(sock_filter_lock);
357
358 /*
359 * If the attach function returns an error,
360 * this filter must be detached
361 */
362 if (error) {
363 /* don't call sf_detach */
364 entry->sfe_flags |= SFEF_NODETACH;
365 sflt_detach_locked(entry);
366 }
367
368 /* Release the retain we held through the attach call */
369 sflt_entry_release(entry);
370 }
371
372 return (error);
373 }
374
375 errno_t
376 sflt_attach_internal(socket_t socket, sflt_handle handle)
377 {
378 if (socket == NULL || handle == 0)
379 return (EINVAL);
380
381 int result = EINVAL;
382
383 lck_rw_lock_exclusive(sock_filter_lock);
384
385 struct socket_filter *filter = NULL;
386 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
387 if (filter->sf_filter.sf_handle == handle) break;
388 }
389
390 if (filter) {
391 result = sflt_attach_locked(socket, filter, 1);
392 }
393
394 lck_rw_unlock_exclusive(sock_filter_lock);
395
396 return (result);
397 }
398
399 static void
400 sflt_detach_locked(struct socket_filter_entry *entry)
401 {
402 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
403 entry->sfe_flags &= ~SFEF_ATTACHED;
404 sflt_entry_release(entry);
405 }
406 }
407
408 #pragma mark -- Socket Layer Hooks --
409
410 __private_extern__ void
411 sflt_initsock(struct socket *so)
412 {
413 /*
414 * Point to the real protosw, as so_proto might have been
415 * pointed to a modified version.
416 */
417 struct protosw *proto = so->so_proto->pr_protosw;
418
419 lck_rw_lock_shared(sock_filter_lock);
420 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
421 /* Promote lock to exclusive */
422 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
423 lck_rw_lock_exclusive(sock_filter_lock);
424
425 /*
426 * Warning: A filter unregistering will be pulled out of
427 * the list. This could happen while we drop the lock in
428 * sftl_attach_locked or sflt_release_locked. For this
429 * reason we retain a reference on the filter (or next_filter)
430 * while calling this function. This protects us from a panic,
431 * but it could result in a socket being created without all
432 * of the global filters if we're attaching a filter as it
433 * is removed, if that's possible.
434 */
435 struct socket_filter *filter =
436 TAILQ_FIRST(&proto->pr_filter_head);
437
438 sflt_retain_locked(filter);
439
440 while (filter) {
441 struct socket_filter *filter_next;
442 /*
443 * Warning: sflt_attach_private_locked
444 * will drop the lock
445 */
446 sflt_attach_locked(so, filter, 0);
447
448 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
449 if (filter_next)
450 sflt_retain_locked(filter_next);
451
452 /*
453 * Warning: filt_release_locked may remove
454 * the filter from the queue
455 */
456 sflt_release_locked(filter);
457 filter = filter_next;
458 }
459 }
460 lck_rw_done(sock_filter_lock);
461 }
462
463 /*
464 * sflt_termsock
465 *
466 * Detaches all filters from the socket.
467 */
468 __private_extern__ void
469 sflt_termsock(struct socket *so)
470 {
471 lck_rw_lock_exclusive(sock_filter_lock);
472
473 struct socket_filter_entry *entry;
474
475 while ((entry = so->so_filt) != NULL) {
476 /* Pull filter off the socket */
477 so->so_filt = entry->sfe_next_onsocket;
478 entry->sfe_flags |= SFEF_NOSOCKET;
479
480 /* Call detach */
481 sflt_detach_locked(entry);
482
483 /*
484 * On sflt_termsock, we can't return until the detach function
485 * has been called. Call the detach function - this is gross
486 * because the socket filter entry could be freed when we drop
487 * the lock, so we make copies on the stack and retain
488 * everything we need before dropping the lock.
489 */
490 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
491 entry->sfe_filter->sf_filter.sf_detach) {
492 void *sfe_cookie = entry->sfe_cookie;
493 struct socket_filter *sfe_filter = entry->sfe_filter;
494
495 /* Retain the socket filter */
496 sflt_retain_locked(sfe_filter);
497
498 /* Mark that we've called the detach function */
499 entry->sfe_flags |= SFEF_NODETACH;
500
501 /* Drop the lock before calling the detach function */
502 lck_rw_unlock_exclusive(sock_filter_lock);
503 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
504 lck_rw_lock_exclusive(sock_filter_lock);
505
506 /* Release the filter */
507 sflt_release_locked(sfe_filter);
508 }
509 }
510
511 lck_rw_unlock_exclusive(sock_filter_lock);
512 }
513
514
515 static void
516 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
517 sflt_handle handle)
518 {
519 if (so->so_filt == NULL)
520 return;
521
522 struct socket_filter_entry *entry;
523 int unlocked = 0;
524
525 lck_rw_lock_shared(sock_filter_lock);
526 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
527 if ((entry->sfe_flags & SFEF_ATTACHED) &&
528 entry->sfe_filter->sf_filter.sf_notify &&
529 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
530 handle) || !handle)) {
531 /*
532 * Retain the filter entry and release
533 * the socket filter lock
534 */
535 sflt_entry_retain(entry);
536 lck_rw_unlock_shared(sock_filter_lock);
537
538 /* If the socket isn't already unlocked, unlock it */
539 if (unlocked == 0) {
540 unlocked = 1;
541 socket_unlock(so, 0);
542 }
543
544 /* Finally call the filter */
545 entry->sfe_filter->sf_filter.sf_notify(
546 entry->sfe_cookie, so, event, param);
547
548 /*
549 * Take the socket filter lock again
550 * and release the entry
551 */
552 lck_rw_lock_shared(sock_filter_lock);
553 sflt_entry_release(entry);
554 }
555 }
556 lck_rw_unlock_shared(sock_filter_lock);
557
558 if (unlocked != 0) {
559 socket_lock(so, 0);
560 }
561 }
562
563 __private_extern__ void
564 sflt_notify(struct socket *so, sflt_event_t event, void *param)
565 {
566 sflt_notify_internal(so, event, param, 0);
567 }
568
569 static void
570 sflt_notify_after_register(struct socket *so, sflt_event_t event,
571 sflt_handle handle)
572 {
573 sflt_notify_internal(so, event, NULL, handle);
574 }
575
576 __private_extern__ int
577 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
578 {
579 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
580 return (0);
581
582 struct socket_filter_entry *entry;
583 int unlocked = 0;
584 int error = 0;
585
586 lck_rw_lock_shared(sock_filter_lock);
587 for (entry = so->so_filt; entry && error == 0;
588 entry = entry->sfe_next_onsocket) {
589 if ((entry->sfe_flags & SFEF_ATTACHED) &&
590 entry->sfe_filter->sf_filter.sf_ioctl) {
591 /*
592 * Retain the filter entry and release
593 * the socket filter lock
594 */
595 sflt_entry_retain(entry);
596 lck_rw_unlock_shared(sock_filter_lock);
597
598 /* If the socket isn't already unlocked, unlock it */
599 if (unlocked == 0) {
600 socket_unlock(so, 0);
601 unlocked = 1;
602 }
603
604 /* Call the filter */
605 error = entry->sfe_filter->sf_filter.sf_ioctl(
606 entry->sfe_cookie, so, cmd, data);
607
608 /*
609 * Take the socket filter lock again
610 * and release the entry
611 */
612 lck_rw_lock_shared(sock_filter_lock);
613 sflt_entry_release(entry);
614 }
615 }
616 lck_rw_unlock_shared(sock_filter_lock);
617
618 if (unlocked) {
619 socket_lock(so, 0);
620 }
621
622 return (error);
623 }
624
625 __private_extern__ int
626 sflt_bind(struct socket *so, const struct sockaddr *nam)
627 {
628 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
629 return (0);
630
631 struct socket_filter_entry *entry;
632 int unlocked = 0;
633 int error = 0;
634
635 lck_rw_lock_shared(sock_filter_lock);
636 for (entry = so->so_filt; entry && error == 0;
637 entry = entry->sfe_next_onsocket) {
638 if ((entry->sfe_flags & SFEF_ATTACHED) &&
639 entry->sfe_filter->sf_filter.sf_bind) {
640 /*
641 * Retain the filter entry and
642 * release the socket filter lock
643 */
644 sflt_entry_retain(entry);
645 lck_rw_unlock_shared(sock_filter_lock);
646
647 /* If the socket isn't already unlocked, unlock it */
648 if (unlocked == 0) {
649 socket_unlock(so, 0);
650 unlocked = 1;
651 }
652
653 /* Call the filter */
654 error = entry->sfe_filter->sf_filter.sf_bind(
655 entry->sfe_cookie, so, nam);
656
657 /*
658 * Take the socket filter lock again and
659 * release the entry
660 */
661 lck_rw_lock_shared(sock_filter_lock);
662 sflt_entry_release(entry);
663 }
664 }
665 lck_rw_unlock_shared(sock_filter_lock);
666
667 if (unlocked) {
668 socket_lock(so, 0);
669 }
670
671 return (error);
672 }
673
674 __private_extern__ int
675 sflt_listen(struct socket *so)
676 {
677 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
678 return (0);
679
680 struct socket_filter_entry *entry;
681 int unlocked = 0;
682 int error = 0;
683
684 lck_rw_lock_shared(sock_filter_lock);
685 for (entry = so->so_filt; entry && error == 0;
686 entry = entry->sfe_next_onsocket) {
687 if ((entry->sfe_flags & SFEF_ATTACHED) &&
688 entry->sfe_filter->sf_filter.sf_listen) {
689 /*
690 * Retain the filter entry and release
691 * the socket filter lock
692 */
693 sflt_entry_retain(entry);
694 lck_rw_unlock_shared(sock_filter_lock);
695
696 /* If the socket isn't already unlocked, unlock it */
697 if (unlocked == 0) {
698 socket_unlock(so, 0);
699 unlocked = 1;
700 }
701
702 /* Call the filter */
703 error = entry->sfe_filter->sf_filter.sf_listen(
704 entry->sfe_cookie, so);
705
706 /*
707 * Take the socket filter lock again
708 * and release the entry
709 */
710 lck_rw_lock_shared(sock_filter_lock);
711 sflt_entry_release(entry);
712 }
713 }
714 lck_rw_unlock_shared(sock_filter_lock);
715
716 if (unlocked) {
717 socket_lock(so, 0);
718 }
719
720 return (error);
721 }
722
723 __private_extern__ int
724 sflt_accept(struct socket *head, struct socket *so,
725 const struct sockaddr *local, const struct sockaddr *remote)
726 {
727 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
728 return (0);
729
730 struct socket_filter_entry *entry;
731 int unlocked = 0;
732 int error = 0;
733
734 lck_rw_lock_shared(sock_filter_lock);
735 for (entry = so->so_filt; entry && error == 0;
736 entry = entry->sfe_next_onsocket) {
737 if ((entry->sfe_flags & SFEF_ATTACHED) &&
738 entry->sfe_filter->sf_filter.sf_accept) {
739 /*
740 * Retain the filter entry and
741 * release the socket filter lock
742 */
743 sflt_entry_retain(entry);
744 lck_rw_unlock_shared(sock_filter_lock);
745
746 /* If the socket isn't already unlocked, unlock it */
747 if (unlocked == 0) {
748 socket_unlock(so, 0);
749 unlocked = 1;
750 }
751
752 /* Call the filter */
753 error = entry->sfe_filter->sf_filter.sf_accept(
754 entry->sfe_cookie, head, so, local, remote);
755
756 /*
757 * Take the socket filter lock again
758 * and release the entry
759 */
760 lck_rw_lock_shared(sock_filter_lock);
761 sflt_entry_release(entry);
762 }
763 }
764 lck_rw_unlock_shared(sock_filter_lock);
765
766 if (unlocked) {
767 socket_lock(so, 0);
768 }
769
770 return (error);
771 }
772
773 __private_extern__ int
774 sflt_getsockname(struct socket *so, struct sockaddr **local)
775 {
776 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
777 return (0);
778
779 struct socket_filter_entry *entry;
780 int unlocked = 0;
781 int error = 0;
782
783 lck_rw_lock_shared(sock_filter_lock);
784 for (entry = so->so_filt; entry && error == 0;
785 entry = entry->sfe_next_onsocket) {
786 if ((entry->sfe_flags & SFEF_ATTACHED) &&
787 entry->sfe_filter->sf_filter.sf_getsockname) {
788 /*
789 * Retain the filter entry and
790 * release the socket filter lock
791 */
792 sflt_entry_retain(entry);
793 lck_rw_unlock_shared(sock_filter_lock);
794
795 /* If the socket isn't already unlocked, unlock it */
796 if (unlocked == 0) {
797 socket_unlock(so, 0);
798 unlocked = 1;
799 }
800
801 /* Call the filter */
802 error = entry->sfe_filter->sf_filter.sf_getsockname(
803 entry->sfe_cookie, so, local);
804
805 /*
806 * Take the socket filter lock again
807 * and release the entry
808 */
809 lck_rw_lock_shared(sock_filter_lock);
810 sflt_entry_release(entry);
811 }
812 }
813 lck_rw_unlock_shared(sock_filter_lock);
814
815 if (unlocked) {
816 socket_lock(so, 0);
817 }
818
819 return (error);
820 }
821
822 __private_extern__ int
823 sflt_getpeername(struct socket *so, struct sockaddr **remote)
824 {
825 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
826 return (0);
827
828 struct socket_filter_entry *entry;
829 int unlocked = 0;
830 int error = 0;
831
832 lck_rw_lock_shared(sock_filter_lock);
833 for (entry = so->so_filt; entry && error == 0;
834 entry = entry->sfe_next_onsocket) {
835 if ((entry->sfe_flags & SFEF_ATTACHED) &&
836 entry->sfe_filter->sf_filter.sf_getpeername) {
837 /*
838 * Retain the filter entry and release
839 * the socket filter lock
840 */
841 sflt_entry_retain(entry);
842 lck_rw_unlock_shared(sock_filter_lock);
843
844 /* If the socket isn't already unlocked, unlock it */
845 if (unlocked == 0) {
846 socket_unlock(so, 0);
847 unlocked = 1;
848 }
849
850 /* Call the filter */
851 error = entry->sfe_filter->sf_filter.sf_getpeername(
852 entry->sfe_cookie, so, remote);
853
854 /*
855 * Take the socket filter lock again
856 * and release the entry
857 */
858 lck_rw_lock_shared(sock_filter_lock);
859 sflt_entry_release(entry);
860 }
861 }
862 lck_rw_unlock_shared(sock_filter_lock);
863
864 if (unlocked) {
865 socket_lock(so, 0);
866 }
867
868 return (error);
869 }
870
871 __private_extern__ int
872 sflt_connectin(struct socket *so, const struct sockaddr *remote)
873 {
874 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
875 return (0);
876
877 struct socket_filter_entry *entry;
878 int unlocked = 0;
879 int error = 0;
880
881 lck_rw_lock_shared(sock_filter_lock);
882 for (entry = so->so_filt; entry && error == 0;
883 entry = entry->sfe_next_onsocket) {
884 if ((entry->sfe_flags & SFEF_ATTACHED) &&
885 entry->sfe_filter->sf_filter.sf_connect_in) {
886 /*
887 * Retain the filter entry and release
888 * the socket filter lock
889 */
890 sflt_entry_retain(entry);
891 lck_rw_unlock_shared(sock_filter_lock);
892
893 /* If the socket isn't already unlocked, unlock it */
894 if (unlocked == 0) {
895 socket_unlock(so, 0);
896 unlocked = 1;
897 }
898
899 /* Call the filter */
900 error = entry->sfe_filter->sf_filter.sf_connect_in(
901 entry->sfe_cookie, so, remote);
902
903 /*
904 * Take the socket filter lock again
905 * and release the entry
906 */
907 lck_rw_lock_shared(sock_filter_lock);
908 sflt_entry_release(entry);
909 }
910 }
911 lck_rw_unlock_shared(sock_filter_lock);
912
913 if (unlocked) {
914 socket_lock(so, 0);
915 }
916
917 return (error);
918 }
919
920 static int
921 sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
922 {
923 struct socket_filter_entry *entry;
924 int unlocked = 0;
925 int error = 0;
926
927 lck_rw_lock_shared(sock_filter_lock);
928 for (entry = so->so_filt; entry && error == 0;
929 entry = entry->sfe_next_onsocket) {
930 if ((entry->sfe_flags & SFEF_ATTACHED) &&
931 entry->sfe_filter->sf_filter.sf_connect_out) {
932 /*
933 * Retain the filter entry and release
934 * the socket filter lock
935 */
936 sflt_entry_retain(entry);
937 lck_rw_unlock_shared(sock_filter_lock);
938
939 /* If the socket isn't already unlocked, unlock it */
940 if (unlocked == 0) {
941 socket_unlock(so, 0);
942 unlocked = 1;
943 }
944
945 /* Call the filter */
946 error = entry->sfe_filter->sf_filter.sf_connect_out(
947 entry->sfe_cookie, so, nam);
948
949 /*
950 * Take the socket filter lock again
951 * and release the entry
952 */
953 lck_rw_lock_shared(sock_filter_lock);
954 sflt_entry_release(entry);
955 }
956 }
957 lck_rw_unlock_shared(sock_filter_lock);
958
959 if (unlocked) {
960 socket_lock(so, 0);
961 }
962
963 return (error);
964 }
965
966 __private_extern__ int
967 sflt_connectout(struct socket *so, const struct sockaddr *nam)
968 {
969 char buf[SOCK_MAXADDRLEN];
970 struct sockaddr *sa;
971 int error;
972
973 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
974 return (0);
975
976 /*
977 * Workaround for rdar://23362120
978 * Always pass a buffer that can hold an IPv6 socket address
979 */
980 bzero(buf, sizeof (buf));
981 bcopy(nam, buf, nam->sa_len);
982 sa = (struct sockaddr *)buf;
983
984 error = sflt_connectout_common(so, sa);
985 if (error != 0)
986 return (error);
987
988 /*
989 * If the address was modified, copy it back
990 */
991 if (bcmp(sa, nam, nam->sa_len) != 0) {
992 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
993 }
994
995 return (0);
996 }
997
998 __private_extern__ int
999 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1000 {
1001 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1002 return (0);
1003
1004 struct socket_filter_entry *entry;
1005 int unlocked = 0;
1006 int error = 0;
1007
1008 lck_rw_lock_shared(sock_filter_lock);
1009 for (entry = so->so_filt; entry && error == 0;
1010 entry = entry->sfe_next_onsocket) {
1011 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1012 entry->sfe_filter->sf_filter.sf_setoption) {
1013 /*
1014 * Retain the filter entry and release
1015 * the socket filter lock
1016 */
1017 sflt_entry_retain(entry);
1018 lck_rw_unlock_shared(sock_filter_lock);
1019
1020 /* If the socket isn't already unlocked, unlock it */
1021 if (unlocked == 0) {
1022 socket_unlock(so, 0);
1023 unlocked = 1;
1024 }
1025
1026 /* Call the filter */
1027 error = entry->sfe_filter->sf_filter.sf_setoption(
1028 entry->sfe_cookie, so, sopt);
1029
1030 /*
1031 * Take the socket filter lock again
1032 * and release the entry
1033 */
1034 lck_rw_lock_shared(sock_filter_lock);
1035 sflt_entry_release(entry);
1036 }
1037 }
1038 lck_rw_unlock_shared(sock_filter_lock);
1039
1040 if (unlocked) {
1041 socket_lock(so, 0);
1042 }
1043
1044 return (error);
1045 }
1046
1047 __private_extern__ int
1048 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1049 {
1050 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1051 return (0);
1052
1053 struct socket_filter_entry *entry;
1054 int unlocked = 0;
1055 int error = 0;
1056
1057 lck_rw_lock_shared(sock_filter_lock);
1058 for (entry = so->so_filt; entry && error == 0;
1059 entry = entry->sfe_next_onsocket) {
1060 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1061 entry->sfe_filter->sf_filter.sf_getoption) {
1062 /*
1063 * Retain the filter entry and release
1064 * the socket filter lock
1065 */
1066 sflt_entry_retain(entry);
1067 lck_rw_unlock_shared(sock_filter_lock);
1068
1069 /* If the socket isn't already unlocked, unlock it */
1070 if (unlocked == 0) {
1071 socket_unlock(so, 0);
1072 unlocked = 1;
1073 }
1074
1075 /* Call the filter */
1076 error = entry->sfe_filter->sf_filter.sf_getoption(
1077 entry->sfe_cookie, so, sopt);
1078
1079 /*
1080 * Take the socket filter lock again
1081 * and release the entry
1082 */
1083 lck_rw_lock_shared(sock_filter_lock);
1084 sflt_entry_release(entry);
1085 }
1086 }
1087 lck_rw_unlock_shared(sock_filter_lock);
1088
1089 if (unlocked) {
1090 socket_lock(so, 0);
1091 }
1092
1093 return (error);
1094 }
1095
1096 __private_extern__ int
1097 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1098 mbuf_t *control, sflt_data_flag_t flags)
1099 {
1100 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1101 return (0);
1102
1103 struct socket_filter_entry *entry;
1104 int unlocked = 0;
1105 int setsendthread = 0;
1106 int error = 0;
1107
1108 lck_rw_lock_shared(sock_filter_lock);
1109 for (entry = so->so_filt; entry && error == 0;
1110 entry = entry->sfe_next_onsocket) {
1111 /* skip if this is a subflow socket */
1112 if (so->so_flags & SOF_MP_SUBFLOW)
1113 continue;
1114 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1115 entry->sfe_filter->sf_filter.sf_data_out) {
1116 /*
1117 * Retain the filter entry and
1118 * release the socket filter lock
1119 */
1120 sflt_entry_retain(entry);
1121 lck_rw_unlock_shared(sock_filter_lock);
1122
1123 /* If the socket isn't already unlocked, unlock it */
1124 if (unlocked == 0) {
1125 if (so->so_send_filt_thread == NULL) {
1126 setsendthread = 1;
1127 so->so_send_filt_thread =
1128 current_thread();
1129 }
1130 socket_unlock(so, 0);
1131 unlocked = 1;
1132 }
1133
1134 /* Call the filter */
1135 error = entry->sfe_filter->sf_filter.sf_data_out(
1136 entry->sfe_cookie, so, to, data, control, flags);
1137
1138 /*
1139 * Take the socket filter lock again
1140 * and release the entry
1141 */
1142 lck_rw_lock_shared(sock_filter_lock);
1143 sflt_entry_release(entry);
1144 }
1145 }
1146 lck_rw_unlock_shared(sock_filter_lock);
1147
1148 if (unlocked) {
1149 socket_lock(so, 0);
1150 if (setsendthread)
1151 so->so_send_filt_thread = NULL;
1152 }
1153
1154 return (error);
1155 }
1156
1157 __private_extern__ int
1158 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1159 mbuf_t *control, sflt_data_flag_t flags)
1160 {
1161 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so)))
1162 return (0);
1163
1164 struct socket_filter_entry *entry;
1165 int error = 0;
1166 int unlocked = 0;
1167
1168 lck_rw_lock_shared(sock_filter_lock);
1169
1170 for (entry = so->so_filt; entry && (error == 0);
1171 entry = entry->sfe_next_onsocket) {
1172 /* skip if this is a subflow socket */
1173 if (so->so_flags & SOF_MP_SUBFLOW)
1174 continue;
1175 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1176 entry->sfe_filter->sf_filter.sf_data_in) {
1177 /*
1178 * Retain the filter entry and
1179 * release the socket filter lock
1180 */
1181 sflt_entry_retain(entry);
1182 lck_rw_unlock_shared(sock_filter_lock);
1183
1184 /* If the socket isn't already unlocked, unlock it */
1185 if (unlocked == 0) {
1186 unlocked = 1;
1187 socket_unlock(so, 0);
1188 }
1189
1190 /* Call the filter */
1191 error = entry->sfe_filter->sf_filter.sf_data_in(
1192 entry->sfe_cookie, so, from, data, control, flags);
1193
1194 /*
1195 * Take the socket filter lock again
1196 * and release the entry
1197 */
1198 lck_rw_lock_shared(sock_filter_lock);
1199 sflt_entry_release(entry);
1200 }
1201 }
1202 lck_rw_unlock_shared(sock_filter_lock);
1203
1204 if (unlocked) {
1205 socket_lock(so, 0);
1206 }
1207
1208 return (error);
1209 }
1210
1211 #pragma mark -- KPI --
1212
1213 errno_t
1214 sflt_attach(socket_t socket, sflt_handle handle)
1215 {
1216 socket_lock(socket, 1);
1217 errno_t result = sflt_attach_internal(socket, handle);
1218 socket_unlock(socket, 1);
1219 return (result);
1220 }
1221
1222 errno_t
1223 sflt_detach(socket_t socket, sflt_handle handle)
1224 {
1225 struct socket_filter_entry *entry;
1226 errno_t result = 0;
1227
1228 if (socket == NULL || handle == 0)
1229 return (EINVAL);
1230
1231 lck_rw_lock_exclusive(sock_filter_lock);
1232 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1233 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1234 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1235 break;
1236 }
1237 }
1238
1239 if (entry != NULL) {
1240 sflt_detach_locked(entry);
1241 }
1242 lck_rw_unlock_exclusive(sock_filter_lock);
1243
1244 return (result);
1245 }
1246
1247 struct solist {
1248 struct solist *next;
1249 struct socket *so;
1250 };
1251
1252 errno_t
1253 sflt_register(const struct sflt_filter *filter, int domain, int type,
1254 int protocol)
1255 {
1256 struct socket_filter *sock_filt = NULL;
1257 struct socket_filter *match = NULL;
1258 int error = 0;
1259 struct protosw *pr;
1260 unsigned int len;
1261 struct socket *so;
1262 struct inpcb *inp;
1263 struct solist *solisthead = NULL, *solist = NULL;
1264
1265 if ((domain != PF_INET) && (domain != PF_INET6))
1266 return (ENOTSUP);
1267
1268 pr = pffindproto(domain, protocol, type);
1269 if (pr == NULL)
1270 return (ENOENT);
1271
1272 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1273 filter->sf_handle == 0 || filter->sf_name == NULL)
1274 return (EINVAL);
1275
1276 /* Allocate the socket filter */
1277 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1278 M_IFADDR, M_WAITOK);
1279 if (sock_filt == NULL) {
1280 return (ENOBUFS);
1281 }
1282
1283 bzero(sock_filt, sizeof (*sock_filt));
1284
1285 /* Legacy sflt_filter length; current structure minus extended */
1286 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1287 /*
1288 * Include extended fields if filter defines SFLT_EXTENDED.
1289 * We've zeroed out our internal sflt_filter placeholder,
1290 * so any unused portion would have been taken care of.
1291 */
1292 if (filter->sf_flags & SFLT_EXTENDED) {
1293 unsigned int ext_len = filter->sf_len;
1294
1295 if (ext_len > sizeof (struct sflt_filter_ext))
1296 ext_len = sizeof (struct sflt_filter_ext);
1297
1298 len += ext_len;
1299 }
1300 bcopy(filter, &sock_filt->sf_filter, len);
1301
1302 lck_rw_lock_exclusive(sock_filter_lock);
1303 /* Look for an existing entry */
1304 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1305 if (match->sf_filter.sf_handle ==
1306 sock_filt->sf_filter.sf_handle) {
1307 break;
1308 }
1309 }
1310
1311 /* Add the entry only if there was no existing entry */
1312 if (match == NULL) {
1313 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1314 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1315 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1316 sf_protosw_next);
1317 sock_filt->sf_proto = pr;
1318 }
1319 sflt_retain_locked(sock_filt);
1320 }
1321 lck_rw_unlock_exclusive(sock_filter_lock);
1322
1323 if (match != NULL) {
1324 FREE(sock_filt, M_IFADDR);
1325 return (EEXIST);
1326 }
1327
1328 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY))
1329 return (error);
1330
1331 /*
1332 * Setup the filter on the TCP and UDP sockets already created.
1333 */
1334 #define SOLIST_ADD(_so) do { \
1335 solist->next = solisthead; \
1336 sock_retain((_so)); \
1337 solist->so = (_so); \
1338 solisthead = solist; \
1339 } while (0)
1340 if (protocol == IPPROTO_TCP) {
1341 lck_rw_lock_shared(tcbinfo.ipi_lock);
1342 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1343 so = inp->inp_socket;
1344 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1345 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1346 (so->so_state & SS_NOFDREF)) ||
1347 !SOCK_CHECK_DOM(so, domain) ||
1348 !SOCK_CHECK_TYPE(so, type))
1349 continue;
1350 MALLOC(solist, struct solist *, sizeof (*solist),
1351 M_IFADDR, M_NOWAIT);
1352 if (!solist)
1353 continue;
1354 SOLIST_ADD(so);
1355 }
1356 lck_rw_done(tcbinfo.ipi_lock);
1357 } else if (protocol == IPPROTO_UDP) {
1358 lck_rw_lock_shared(udbinfo.ipi_lock);
1359 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1360 so = inp->inp_socket;
1361 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1362 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1363 (so->so_state & SS_NOFDREF)) ||
1364 !SOCK_CHECK_DOM(so, domain) ||
1365 !SOCK_CHECK_TYPE(so, type))
1366 continue;
1367 MALLOC(solist, struct solist *, sizeof (*solist),
1368 M_IFADDR, M_NOWAIT);
1369 if (!solist)
1370 continue;
1371 SOLIST_ADD(so);
1372 }
1373 lck_rw_done(udbinfo.ipi_lock);
1374 }
1375 /* XXX it's possible to walk the raw socket list as well */
1376 #undef SOLIST_ADD
1377
1378 while (solisthead) {
1379 sflt_handle handle = filter->sf_handle;
1380
1381 so = solisthead->so;
1382 socket_lock(so, 0);
1383 sflt_initsock(so);
1384 if (so->so_state & SS_ISCONNECTING)
1385 sflt_notify_after_register(so, sock_evt_connecting,
1386 handle);
1387 else if (so->so_state & SS_ISCONNECTED)
1388 sflt_notify_after_register(so, sock_evt_connected,
1389 handle);
1390 else if ((so->so_state &
1391 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) ==
1392 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE))
1393 sflt_notify_after_register(so, sock_evt_disconnecting,
1394 handle);
1395 else if ((so->so_state &
1396 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) ==
1397 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED))
1398 sflt_notify_after_register(so, sock_evt_disconnected,
1399 handle);
1400 else if (so->so_state & SS_CANTSENDMORE)
1401 sflt_notify_after_register(so, sock_evt_cantsendmore,
1402 handle);
1403 else if (so->so_state & SS_CANTRCVMORE)
1404 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1405 handle);
1406 socket_unlock(so, 0);
1407 /* XXX no easy way to post the sock_evt_closing event */
1408 sock_release(so);
1409 solist = solisthead;
1410 solisthead = solisthead->next;
1411 FREE(solist, M_IFADDR);
1412 }
1413
1414 return (error);
1415 }
1416
1417 errno_t
1418 sflt_unregister(sflt_handle handle)
1419 {
1420 struct socket_filter *filter;
1421 lck_rw_lock_exclusive(sock_filter_lock);
1422
1423 /* Find the entry by the handle */
1424 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1425 if (filter->sf_filter.sf_handle == handle)
1426 break;
1427 }
1428
1429 if (filter) {
1430 /* Remove it from the global list */
1431 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1432
1433 /* Remove it from the protosw list */
1434 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1435 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1436 filter, sf_protosw_next);
1437 }
1438
1439 /* Detach from any sockets */
1440 struct socket_filter_entry *entry = NULL;
1441
1442 for (entry = filter->sf_entry_head; entry;
1443 entry = entry->sfe_next_onfilter) {
1444 sflt_detach_locked(entry);
1445 }
1446
1447 /* Release the filter */
1448 sflt_release_locked(filter);
1449 }
1450
1451 lck_rw_unlock_exclusive(sock_filter_lock);
1452
1453 if (filter == NULL)
1454 return (ENOENT);
1455
1456 return (0);
1457 }
1458
1459 errno_t
1460 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1461 mbuf_t control, sflt_data_flag_t flags)
1462 {
1463 int error = 0;
1464
1465 if (so == NULL || data == NULL)
1466 return (EINVAL);
1467
1468 if (flags & sock_data_filt_flag_oob) {
1469 return (ENOTSUP);
1470 }
1471
1472 socket_lock(so, 1);
1473
1474 /* reject if this is a subflow socket */
1475 if (so->so_flags & SOF_MP_SUBFLOW) {
1476 error = ENOTSUP;
1477 goto done;
1478 }
1479
1480 if (from) {
1481 if (sbappendaddr(&so->so_rcv,
1482 (struct sockaddr *)(uintptr_t)from, data, control, NULL))
1483 sorwakeup(so);
1484 goto done;
1485 }
1486
1487 if (control) {
1488 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1489 sorwakeup(so);
1490 goto done;
1491 }
1492
1493 if (flags & sock_data_filt_flag_record) {
1494 if (control || from) {
1495 error = EINVAL;
1496 goto done;
1497 }
1498 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data))
1499 sorwakeup(so);
1500 goto done;
1501 }
1502
1503 if (sbappend(&so->so_rcv, data))
1504 sorwakeup(so);
1505 done:
1506 socket_unlock(so, 1);
1507 return (error);
1508 }
1509
1510 errno_t
1511 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1512 mbuf_t control, sflt_data_flag_t flags)
1513 {
1514 int sosendflags = 0;
1515
1516 /* reject if this is a subflow socket */
1517 if (so->so_flags & SOF_MP_SUBFLOW)
1518 return (ENOTSUP);
1519
1520 if (flags & sock_data_filt_flag_oob)
1521 sosendflags = MSG_OOB;
1522 return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1523 data, control, sosendflags));
1524 }
1525
1526 sockopt_dir
1527 sockopt_direction(sockopt_t sopt)
1528 {
1529 return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set);
1530 }
1531
1532 int
1533 sockopt_level(sockopt_t sopt)
1534 {
1535 return (sopt->sopt_level);
1536 }
1537
1538 int
1539 sockopt_name(sockopt_t sopt)
1540 {
1541 return (sopt->sopt_name);
1542 }
1543
1544 size_t
1545 sockopt_valsize(sockopt_t sopt)
1546 {
1547 return (sopt->sopt_valsize);
1548 }
1549
1550 errno_t
1551 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1552 {
1553 return (sooptcopyin(sopt, data, len, len));
1554 }
1555
1556 errno_t
1557 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1558 {
1559 return (sooptcopyout(sopt, data, len));
1560 }