]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <sys/domain.h>
37 #include <sys/proc.h>
38 #include <kern/locks.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <net/kext_net.h>
42 #include <net/if.h>
43 #include <netinet/in_var.h>
44 #include <netinet/ip.h>
45 #include <netinet/ip_var.h>
46 #include <netinet/tcp.h>
47 #include <netinet/tcp_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
50
51 #include <libkern/libkern.h>
52 #include <libkern/OSAtomic.h>
53
54 #include <string.h>
55
56 #define SFEF_ATTACHED 0x1 /* SFE is on socket list */
57 #define SFEF_NODETACH 0x2 /* Detach should not be called */
58 #define SFEF_NOSOCKET 0x4 /* Socket is gone */
59
60 struct socket_filter_entry {
61 struct socket_filter_entry *sfe_next_onsocket;
62 struct socket_filter_entry *sfe_next_onfilter;
63 struct socket_filter_entry *sfe_next_oncleanup;
64
65 struct socket_filter *sfe_filter;
66 struct socket *sfe_socket;
67 void *sfe_cookie;
68
69 uint32_t sfe_flags;
70 int32_t sfe_refcount;
71 };
72
73 struct socket_filter {
74 TAILQ_ENTRY(socket_filter) sf_protosw_next;
75 TAILQ_ENTRY(socket_filter) sf_global_next;
76 struct socket_filter_entry *sf_entry_head;
77
78 struct protosw *sf_proto;
79 struct sflt_filter sf_filter;
80 u_int32_t sf_refcount;
81 };
82
83 TAILQ_HEAD(socket_filter_list, socket_filter);
84
85 static struct socket_filter_list sock_filter_head;
86 static lck_rw_t *sock_filter_lock = NULL;
87 static lck_mtx_t *sock_filter_cleanup_lock = NULL;
88 static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
89 static thread_t sock_filter_cleanup_thread = NULL;
90
91 static void sflt_cleanup_thread(void *, wait_result_t);
92 static void sflt_detach_locked(struct socket_filter_entry *entry);
93
94 #pragma mark -- Internal State Management --
95
96 __private_extern__ void
97 sflt_init(void)
98 {
99 lck_grp_attr_t *grp_attrib = NULL;
100 lck_attr_t *lck_attrib = NULL;
101 lck_grp_t *lck_group = NULL;
102
103 TAILQ_INIT(&sock_filter_head);
104
105 /* Allocate a rw lock */
106 grp_attrib = lck_grp_attr_alloc_init();
107 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
108 lck_grp_attr_free(grp_attrib);
109 lck_attrib = lck_attr_alloc_init();
110 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
111 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
112 lck_grp_free(lck_group);
113 lck_attr_free(lck_attrib);
114 }
115
116 static void
117 sflt_retain_locked(struct socket_filter *filter)
118 {
119 filter->sf_refcount++;
120 }
121
122 static void
123 sflt_release_locked(struct socket_filter *filter)
124 {
125 filter->sf_refcount--;
126 if (filter->sf_refcount == 0) {
127 /* Call the unregistered function */
128 if (filter->sf_filter.sf_unregistered) {
129 lck_rw_unlock_exclusive(sock_filter_lock);
130 filter->sf_filter.sf_unregistered(
131 filter->sf_filter.sf_handle);
132 lck_rw_lock_exclusive(sock_filter_lock);
133 }
134
135 /* Free the entry */
136 FREE(filter, M_IFADDR);
137 }
138 }
139
140 static void
141 sflt_entry_retain(struct socket_filter_entry *entry)
142 {
143 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
144 panic("sflt_entry_retain - sfe_refcount <= 0\n");
145 /* NOTREACHED */
146 }
147 }
148
149 static void
150 sflt_entry_release(struct socket_filter_entry *entry)
151 {
152 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
153 if (old == 1) {
154 /* That was the last reference */
155
156 /* Take the cleanup lock */
157 lck_mtx_lock(sock_filter_cleanup_lock);
158
159 /* Put this item on the cleanup list */
160 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
161 sock_filter_cleanup_entries = entry;
162
163 /* If the item is the first item in the list */
164 if (entry->sfe_next_oncleanup == NULL) {
165 if (sock_filter_cleanup_thread == NULL) {
166 /* Create a thread */
167 kernel_thread_start(sflt_cleanup_thread,
168 NULL, &sock_filter_cleanup_thread);
169 } else {
170 /* Wakeup the thread */
171 wakeup(&sock_filter_cleanup_entries);
172 }
173 }
174
175 /* Drop the cleanup lock */
176 lck_mtx_unlock(sock_filter_cleanup_lock);
177 } else if (old <= 0) {
178 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
179 (int)old);
180 /* NOTREACHED */
181 }
182 }
183
184 static void
185 sflt_cleanup_thread(void *blah, wait_result_t blah2)
186 {
187 #pragma unused(blah, blah2)
188 while (1) {
189 lck_mtx_lock(sock_filter_cleanup_lock);
190 while (sock_filter_cleanup_entries == NULL) {
191 /* Sleep until we've got something better to do */
192 msleep(&sock_filter_cleanup_entries,
193 sock_filter_cleanup_lock, PWAIT,
194 "sflt_cleanup", NULL);
195 }
196
197 /* Pull the current list of dead items */
198 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
199 sock_filter_cleanup_entries = NULL;
200
201 /* Drop the lock */
202 lck_mtx_unlock(sock_filter_cleanup_lock);
203
204 /* Take the socket filter lock */
205 lck_rw_lock_exclusive(sock_filter_lock);
206
207 /* Cleanup every dead item */
208 struct socket_filter_entry *entry;
209 for (entry = dead; entry; entry = dead) {
210 struct socket_filter_entry **nextpp;
211
212 dead = entry->sfe_next_oncleanup;
213
214 /* Call detach function if necessary - drop the lock */
215 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
216 entry->sfe_filter->sf_filter.sf_detach) {
217 entry->sfe_flags |= SFEF_NODETACH;
218 lck_rw_unlock_exclusive(sock_filter_lock);
219
220 /*
221 * Warning - passing a potentially
222 * dead socket may be bad
223 */
224 entry->sfe_filter->sf_filter. sf_detach(
225 entry->sfe_cookie, entry->sfe_socket);
226
227 lck_rw_lock_exclusive(sock_filter_lock);
228 }
229
230 /*
231 * Pull entry off the socket list --
232 * if the socket still exists
233 */
234 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
235 for (nextpp = &entry->sfe_socket->so_filt;
236 *nextpp;
237 nextpp = &(*nextpp)->sfe_next_onsocket) {
238 if (*nextpp == entry) {
239 *nextpp =
240 entry->sfe_next_onsocket;
241 break;
242 }
243 }
244 }
245
246 /* Pull entry off the filter list */
247 for (nextpp = &entry->sfe_filter->sf_entry_head;
248 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
249 if (*nextpp == entry) {
250 *nextpp = entry->sfe_next_onfilter;
251 break;
252 }
253 }
254
255 /*
256 * Release the filter -- may drop lock, but that's okay
257 */
258 sflt_release_locked(entry->sfe_filter);
259 entry->sfe_socket = NULL;
260 entry->sfe_filter = NULL;
261 FREE(entry, M_IFADDR);
262 }
263
264 /* Drop the socket filter lock */
265 lck_rw_unlock_exclusive(sock_filter_lock);
266 }
267 /* NOTREACHED */
268 }
269
270 static int
271 sflt_attach_locked(struct socket *so, struct socket_filter *filter,
272 int socklocked)
273 {
274 int error = 0;
275 struct socket_filter_entry *entry = NULL;
276
277 if (filter == NULL)
278 return (ENOENT);
279
280 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
281 if (entry->sfe_filter->sf_filter.sf_handle ==
282 filter->sf_filter.sf_handle)
283 return (EEXIST);
284 }
285 /* allocate the socket filter entry */
286 MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR,
287 M_WAITOK);
288 if (entry == NULL)
289 return (ENOMEM);
290
291 /* Initialize the socket filter entry */
292 entry->sfe_cookie = NULL;
293 entry->sfe_flags = SFEF_ATTACHED;
294 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
295
296 /* Put the entry in the filter list */
297 sflt_retain_locked(filter);
298 entry->sfe_filter = filter;
299 entry->sfe_next_onfilter = filter->sf_entry_head;
300 filter->sf_entry_head = entry;
301
302 /* Put the entry on the socket filter list */
303 entry->sfe_socket = so;
304 entry->sfe_next_onsocket = so->so_filt;
305 so->so_filt = entry;
306
307 if (entry->sfe_filter->sf_filter.sf_attach) {
308 /* Retain the entry while we call attach */
309 sflt_entry_retain(entry);
310
311 /*
312 * Release the filter lock --
313 * callers must be aware we will do this
314 */
315 lck_rw_unlock_exclusive(sock_filter_lock);
316
317 /* Unlock the socket */
318 if (socklocked)
319 socket_unlock(so, 0);
320
321 /* It's finally safe to call the filter function */
322 error = entry->sfe_filter->sf_filter.sf_attach(
323 &entry->sfe_cookie, so);
324
325 /* Lock the socket again */
326 if (socklocked)
327 socket_lock(so, 0);
328
329 /* Lock the filters again */
330 lck_rw_lock_exclusive(sock_filter_lock);
331
332 /*
333 * If the attach function returns an error,
334 * this filter must be detached
335 */
336 if (error) {
337 /* don't call sf_detach */
338 entry->sfe_flags |= SFEF_NODETACH;
339 sflt_detach_locked(entry);
340 }
341
342 /* Release the retain we held through the attach call */
343 sflt_entry_release(entry);
344 }
345
346 return (error);
347 }
348
349 errno_t
350 sflt_attach_internal(socket_t socket, sflt_handle handle)
351 {
352 if (socket == NULL || handle == 0)
353 return (EINVAL);
354
355 int result = EINVAL;
356
357 lck_rw_lock_exclusive(sock_filter_lock);
358
359 struct socket_filter *filter = NULL;
360 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
361 if (filter->sf_filter.sf_handle == handle) break;
362 }
363
364 if (filter) {
365 result = sflt_attach_locked(socket, filter, 1);
366 }
367
368 lck_rw_unlock_exclusive(sock_filter_lock);
369
370 return (result);
371 }
372
373 static void
374 sflt_detach_locked(struct socket_filter_entry *entry)
375 {
376 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
377 entry->sfe_flags &= ~SFEF_ATTACHED;
378 sflt_entry_release(entry);
379 }
380 }
381
382 #pragma mark -- Socket Layer Hooks --
383
384 __private_extern__ void
385 sflt_initsock(struct socket *so)
386 {
387 /*
388 * Point to the real protosw, as so_proto might have been
389 * pointed to a modified version.
390 */
391 struct protosw *proto = so->so_proto->pr_protosw;
392
393 lck_rw_lock_shared(sock_filter_lock);
394 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
395 /* Promote lock to exclusive */
396 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
397 lck_rw_lock_exclusive(sock_filter_lock);
398
399 /*
400 * Warning: A filter unregistering will be pulled out of
401 * the list. This could happen while we drop the lock in
402 * sftl_attach_locked or sflt_release_locked. For this
403 * reason we retain a reference on the filter (or next_filter)
404 * while calling this function. This protects us from a panic,
405 * but it could result in a socket being created without all
406 * of the global filters if we're attaching a filter as it
407 * is removed, if that's possible.
408 */
409 struct socket_filter *filter =
410 TAILQ_FIRST(&proto->pr_filter_head);
411
412 sflt_retain_locked(filter);
413
414 while (filter) {
415 struct socket_filter *filter_next;
416 /*
417 * Warning: sflt_attach_private_locked
418 * will drop the lock
419 */
420 sflt_attach_locked(so, filter, 0);
421
422 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
423 if (filter_next)
424 sflt_retain_locked(filter_next);
425
426 /*
427 * Warning: filt_release_locked may remove
428 * the filter from the queue
429 */
430 sflt_release_locked(filter);
431 filter = filter_next;
432 }
433 }
434 lck_rw_done(sock_filter_lock);
435 }
436
437 /*
438 * sflt_termsock
439 *
440 * Detaches all filters from the socket.
441 */
442 __private_extern__ void
443 sflt_termsock(struct socket *so)
444 {
445 lck_rw_lock_exclusive(sock_filter_lock);
446
447 struct socket_filter_entry *entry;
448
449 while ((entry = so->so_filt) != NULL) {
450 /* Pull filter off the socket */
451 so->so_filt = entry->sfe_next_onsocket;
452 entry->sfe_flags |= SFEF_NOSOCKET;
453
454 /* Call detach */
455 sflt_detach_locked(entry);
456
457 /*
458 * On sflt_termsock, we can't return until the detach function
459 * has been called. Call the detach function - this is gross
460 * because the socket filter entry could be freed when we drop
461 * the lock, so we make copies on the stack and retain
462 * everything we need before dropping the lock.
463 */
464 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
465 entry->sfe_filter->sf_filter.sf_detach) {
466 void *sfe_cookie = entry->sfe_cookie;
467 struct socket_filter *sfe_filter = entry->sfe_filter;
468
469 /* Retain the socket filter */
470 sflt_retain_locked(sfe_filter);
471
472 /* Mark that we've called the detach function */
473 entry->sfe_flags |= SFEF_NODETACH;
474
475 /* Drop the lock before calling the detach function */
476 lck_rw_unlock_exclusive(sock_filter_lock);
477 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
478 lck_rw_lock_exclusive(sock_filter_lock);
479
480 /* Release the filter */
481 sflt_release_locked(sfe_filter);
482 }
483 }
484
485 lck_rw_unlock_exclusive(sock_filter_lock);
486 }
487
488
489 static void
490 sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
491 sflt_handle handle)
492 {
493 if (so->so_filt == NULL)
494 return;
495
496 struct socket_filter_entry *entry;
497 int unlocked = 0;
498
499 lck_rw_lock_shared(sock_filter_lock);
500 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
501 if ((entry->sfe_flags & SFEF_ATTACHED) &&
502 entry->sfe_filter->sf_filter.sf_notify &&
503 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
504 handle) || !handle)) {
505 /*
506 * Retain the filter entry and release
507 * the socket filter lock
508 */
509 sflt_entry_retain(entry);
510 lck_rw_unlock_shared(sock_filter_lock);
511
512 /* If the socket isn't already unlocked, unlock it */
513 if (unlocked == 0) {
514 unlocked = 1;
515 socket_unlock(so, 0);
516 }
517
518 /* Finally call the filter */
519 entry->sfe_filter->sf_filter.sf_notify(
520 entry->sfe_cookie, so, event, param);
521
522 /*
523 * Take the socket filter lock again
524 * and release the entry
525 */
526 lck_rw_lock_shared(sock_filter_lock);
527 sflt_entry_release(entry);
528 }
529 }
530 lck_rw_unlock_shared(sock_filter_lock);
531
532 if (unlocked != 0) {
533 socket_lock(so, 0);
534 }
535 }
536
537 __private_extern__ void
538 sflt_notify(struct socket *so, sflt_event_t event, void *param)
539 {
540 sflt_notify_internal(so, event, param, 0);
541 }
542
543 static void
544 sflt_notify_after_register(struct socket *so, sflt_event_t event,
545 sflt_handle handle)
546 {
547 sflt_notify_internal(so, event, NULL, handle);
548 }
549
550 __private_extern__ int
551 sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
552 {
553 if (so->so_filt == NULL)
554 return (0);
555
556 struct socket_filter_entry *entry;
557 int unlocked = 0;
558 int error = 0;
559
560 lck_rw_lock_shared(sock_filter_lock);
561 for (entry = so->so_filt; entry && error == 0;
562 entry = entry->sfe_next_onsocket) {
563 if ((entry->sfe_flags & SFEF_ATTACHED) &&
564 entry->sfe_filter->sf_filter.sf_ioctl) {
565 /*
566 * Retain the filter entry and release
567 * the socket filter lock
568 */
569 sflt_entry_retain(entry);
570 lck_rw_unlock_shared(sock_filter_lock);
571
572 /* If the socket isn't already unlocked, unlock it */
573 if (unlocked == 0) {
574 socket_unlock(so, 0);
575 unlocked = 1;
576 }
577
578 /* Call the filter */
579 error = entry->sfe_filter->sf_filter.sf_ioctl(
580 entry->sfe_cookie, so, cmd, data);
581
582 /*
583 * Take the socket filter lock again
584 * and release the entry
585 */
586 lck_rw_lock_shared(sock_filter_lock);
587 sflt_entry_release(entry);
588 }
589 }
590 lck_rw_unlock_shared(sock_filter_lock);
591
592 if (unlocked) {
593 socket_lock(so, 0);
594 }
595
596 return (error);
597 }
598
599 __private_extern__ int
600 sflt_bind(struct socket *so, const struct sockaddr *nam)
601 {
602 if (so->so_filt == NULL)
603 return (0);
604
605 struct socket_filter_entry *entry;
606 int unlocked = 0;
607 int error = 0;
608
609 lck_rw_lock_shared(sock_filter_lock);
610 for (entry = so->so_filt; entry && error == 0;
611 entry = entry->sfe_next_onsocket) {
612 if ((entry->sfe_flags & SFEF_ATTACHED) &&
613 entry->sfe_filter->sf_filter.sf_bind) {
614 /*
615 * Retain the filter entry and
616 * release the socket filter lock
617 */
618 sflt_entry_retain(entry);
619 lck_rw_unlock_shared(sock_filter_lock);
620
621 /* If the socket isn't already unlocked, unlock it */
622 if (unlocked == 0) {
623 socket_unlock(so, 0);
624 unlocked = 1;
625 }
626
627 /* Call the filter */
628 error = entry->sfe_filter->sf_filter.sf_bind(
629 entry->sfe_cookie, so, nam);
630
631 /*
632 * Take the socket filter lock again and
633 * release the entry
634 */
635 lck_rw_lock_shared(sock_filter_lock);
636 sflt_entry_release(entry);
637 }
638 }
639 lck_rw_unlock_shared(sock_filter_lock);
640
641 if (unlocked) {
642 socket_lock(so, 0);
643 }
644
645 return (error);
646 }
647
648 __private_extern__ int
649 sflt_listen(struct socket *so)
650 {
651 if (so->so_filt == NULL)
652 return (0);
653
654 struct socket_filter_entry *entry;
655 int unlocked = 0;
656 int error = 0;
657
658 lck_rw_lock_shared(sock_filter_lock);
659 for (entry = so->so_filt; entry && error == 0;
660 entry = entry->sfe_next_onsocket) {
661 if ((entry->sfe_flags & SFEF_ATTACHED) &&
662 entry->sfe_filter->sf_filter.sf_listen) {
663 /*
664 * Retain the filter entry and release
665 * the socket filter lock
666 */
667 sflt_entry_retain(entry);
668 lck_rw_unlock_shared(sock_filter_lock);
669
670 /* If the socket isn't already unlocked, unlock it */
671 if (unlocked == 0) {
672 socket_unlock(so, 0);
673 unlocked = 1;
674 }
675
676 /* Call the filter */
677 error = entry->sfe_filter->sf_filter.sf_listen(
678 entry->sfe_cookie, so);
679
680 /*
681 * Take the socket filter lock again
682 * and release the entry
683 */
684 lck_rw_lock_shared(sock_filter_lock);
685 sflt_entry_release(entry);
686 }
687 }
688 lck_rw_unlock_shared(sock_filter_lock);
689
690 if (unlocked) {
691 socket_lock(so, 0);
692 }
693
694 return (error);
695 }
696
697 __private_extern__ int
698 sflt_accept(struct socket *head, struct socket *so,
699 const struct sockaddr *local, const struct sockaddr *remote)
700 {
701 if (so->so_filt == NULL)
702 return (0);
703
704 struct socket_filter_entry *entry;
705 int unlocked = 0;
706 int error = 0;
707
708 lck_rw_lock_shared(sock_filter_lock);
709 for (entry = so->so_filt; entry && error == 0;
710 entry = entry->sfe_next_onsocket) {
711 if ((entry->sfe_flags & SFEF_ATTACHED) &&
712 entry->sfe_filter->sf_filter.sf_accept) {
713 /*
714 * Retain the filter entry and
715 * release the socket filter lock
716 */
717 sflt_entry_retain(entry);
718 lck_rw_unlock_shared(sock_filter_lock);
719
720 /* If the socket isn't already unlocked, unlock it */
721 if (unlocked == 0) {
722 socket_unlock(so, 0);
723 unlocked = 1;
724 }
725
726 /* Call the filter */
727 error = entry->sfe_filter->sf_filter.sf_accept(
728 entry->sfe_cookie, head, so, local, remote);
729
730 /*
731 * Take the socket filter lock again
732 * and release the entry
733 */
734 lck_rw_lock_shared(sock_filter_lock);
735 sflt_entry_release(entry);
736 }
737 }
738 lck_rw_unlock_shared(sock_filter_lock);
739
740 if (unlocked) {
741 socket_lock(so, 0);
742 }
743
744 return (error);
745 }
746
747 __private_extern__ int
748 sflt_getsockname(struct socket *so, struct sockaddr **local)
749 {
750 if (so->so_filt == NULL)
751 return (0);
752
753 struct socket_filter_entry *entry;
754 int unlocked = 0;
755 int error = 0;
756
757 lck_rw_lock_shared(sock_filter_lock);
758 for (entry = so->so_filt; entry && error == 0;
759 entry = entry->sfe_next_onsocket) {
760 if ((entry->sfe_flags & SFEF_ATTACHED) &&
761 entry->sfe_filter->sf_filter.sf_getsockname) {
762 /*
763 * Retain the filter entry and
764 * release the socket filter lock
765 */
766 sflt_entry_retain(entry);
767 lck_rw_unlock_shared(sock_filter_lock);
768
769 /* If the socket isn't already unlocked, unlock it */
770 if (unlocked == 0) {
771 socket_unlock(so, 0);
772 unlocked = 1;
773 }
774
775 /* Call the filter */
776 error = entry->sfe_filter->sf_filter.sf_getsockname(
777 entry->sfe_cookie, so, local);
778
779 /*
780 * Take the socket filter lock again
781 * and release the entry
782 */
783 lck_rw_lock_shared(sock_filter_lock);
784 sflt_entry_release(entry);
785 }
786 }
787 lck_rw_unlock_shared(sock_filter_lock);
788
789 if (unlocked) {
790 socket_lock(so, 0);
791 }
792
793 return (error);
794 }
795
796 __private_extern__ int
797 sflt_getpeername(struct socket *so, struct sockaddr **remote)
798 {
799 if (so->so_filt == NULL)
800 return (0);
801
802 struct socket_filter_entry *entry;
803 int unlocked = 0;
804 int error = 0;
805
806 lck_rw_lock_shared(sock_filter_lock);
807 for (entry = so->so_filt; entry && error == 0;
808 entry = entry->sfe_next_onsocket) {
809 if ((entry->sfe_flags & SFEF_ATTACHED) &&
810 entry->sfe_filter->sf_filter.sf_getpeername) {
811 /*
812 * Retain the filter entry and release
813 * the socket filter lock
814 */
815 sflt_entry_retain(entry);
816 lck_rw_unlock_shared(sock_filter_lock);
817
818 /* If the socket isn't already unlocked, unlock it */
819 if (unlocked == 0) {
820 socket_unlock(so, 0);
821 unlocked = 1;
822 }
823
824 /* Call the filter */
825 error = entry->sfe_filter->sf_filter.sf_getpeername(
826 entry->sfe_cookie, so, remote);
827
828 /*
829 * Take the socket filter lock again
830 * and release the entry
831 */
832 lck_rw_lock_shared(sock_filter_lock);
833 sflt_entry_release(entry);
834 }
835 }
836 lck_rw_unlock_shared(sock_filter_lock);
837
838 if (unlocked) {
839 socket_lock(so, 0);
840 }
841
842 return (error);
843 }
844
845 __private_extern__ int
846 sflt_connectin(struct socket *so, const struct sockaddr *remote)
847 {
848 if (so->so_filt == NULL)
849 return (0);
850
851 struct socket_filter_entry *entry;
852 int unlocked = 0;
853 int error = 0;
854
855 lck_rw_lock_shared(sock_filter_lock);
856 for (entry = so->so_filt; entry && error == 0;
857 entry = entry->sfe_next_onsocket) {
858 if ((entry->sfe_flags & SFEF_ATTACHED) &&
859 entry->sfe_filter->sf_filter.sf_connect_in) {
860 /*
861 * Retain the filter entry and release
862 * the socket filter lock
863 */
864 sflt_entry_retain(entry);
865 lck_rw_unlock_shared(sock_filter_lock);
866
867 /* If the socket isn't already unlocked, unlock it */
868 if (unlocked == 0) {
869 socket_unlock(so, 0);
870 unlocked = 1;
871 }
872
873 /* Call the filter */
874 error = entry->sfe_filter->sf_filter.sf_connect_in(
875 entry->sfe_cookie, so, remote);
876
877 /*
878 * Take the socket filter lock again
879 * and release the entry
880 */
881 lck_rw_lock_shared(sock_filter_lock);
882 sflt_entry_release(entry);
883 }
884 }
885 lck_rw_unlock_shared(sock_filter_lock);
886
887 if (unlocked) {
888 socket_lock(so, 0);
889 }
890
891 return (error);
892 }
893
894 __private_extern__ int
895 sflt_connectout(struct socket *so, const struct sockaddr *nam)
896 {
897 if (so->so_filt == NULL)
898 return (0);
899
900 struct socket_filter_entry *entry;
901 int unlocked = 0;
902 int error = 0;
903
904 lck_rw_lock_shared(sock_filter_lock);
905 for (entry = so->so_filt; entry && error == 0;
906 entry = entry->sfe_next_onsocket) {
907 if ((entry->sfe_flags & SFEF_ATTACHED) &&
908 entry->sfe_filter->sf_filter.sf_connect_out) {
909 /*
910 * Retain the filter entry and release
911 * the socket filter lock
912 */
913 sflt_entry_retain(entry);
914 lck_rw_unlock_shared(sock_filter_lock);
915
916 /* If the socket isn't already unlocked, unlock it */
917 if (unlocked == 0) {
918 socket_unlock(so, 0);
919 unlocked = 1;
920 }
921
922 /* Call the filter */
923 error = entry->sfe_filter->sf_filter.sf_connect_out(
924 entry->sfe_cookie, so, nam);
925
926 /*
927 * Take the socket filter lock again
928 * and release the entry
929 */
930 lck_rw_lock_shared(sock_filter_lock);
931 sflt_entry_release(entry);
932 }
933 }
934 lck_rw_unlock_shared(sock_filter_lock);
935
936 if (unlocked) {
937 socket_lock(so, 0);
938 }
939
940 return (error);
941 }
942
943 __private_extern__ int
944 sflt_connectxout(struct socket *so, struct sockaddr_list **dst_sl0)
945 {
946 char buf[SOCK_MAXADDRLEN];
947 struct sockaddr_list *dst_sl;
948 struct sockaddr_entry *se, *tse;
949 int modified = 0;
950 int error = 0;
951
952 if (so->so_filt == NULL)
953 return (0);
954
955 /* make a copy as sflt_connectout() releases socket lock */
956 dst_sl = sockaddrlist_dup(*dst_sl0, M_WAITOK);
957 if (dst_sl == NULL)
958 return (ENOBUFS);
959
960 /*
961 * Hmm; we don't yet have a connectx socket filter callback,
962 * so the closest thing to do is to probably call sflt_connectout()
963 * as many times as there are addresses in the list, and bail
964 * as soon as we get an error.
965 */
966 TAILQ_FOREACH_SAFE(se, &dst_sl->sl_head, se_link, tse) {
967 int sa_len = se->se_addr->sa_len;
968
969 /* remember the original address */
970 bzero(buf, sizeof (buf));
971 bcopy(se->se_addr, buf, sa_len);
972
973 VERIFY(se->se_addr != NULL);
974 error = sflt_connectout(so, se->se_addr);
975 if (error != 0)
976 break;
977
978 /* see if the address was modified */
979 if (bcmp(se->se_addr, buf, sa_len) != 0)
980 modified = 1;
981 }
982
983 if (error != 0 || !modified) {
984 /* leave the original as is */
985 sockaddrlist_free(dst_sl);
986 } else {
987 /*
988 * At least one address was modified and there were no errors;
989 * ditch the original and return the modified list.
990 */
991 sockaddrlist_free(*dst_sl0);
992 *dst_sl0 = dst_sl;
993 }
994
995 return (error);
996 }
997
998 __private_extern__ int
999 sflt_setsockopt(struct socket *so, struct sockopt *sopt)
1000 {
1001 if (so->so_filt == NULL)
1002 return (0);
1003
1004 struct socket_filter_entry *entry;
1005 int unlocked = 0;
1006 int error = 0;
1007
1008 lck_rw_lock_shared(sock_filter_lock);
1009 for (entry = so->so_filt; entry && error == 0;
1010 entry = entry->sfe_next_onsocket) {
1011 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1012 entry->sfe_filter->sf_filter.sf_setoption) {
1013 /*
1014 * Retain the filter entry and release
1015 * the socket filter lock
1016 */
1017 sflt_entry_retain(entry);
1018 lck_rw_unlock_shared(sock_filter_lock);
1019
1020 /* If the socket isn't already unlocked, unlock it */
1021 if (unlocked == 0) {
1022 socket_unlock(so, 0);
1023 unlocked = 1;
1024 }
1025
1026 /* Call the filter */
1027 error = entry->sfe_filter->sf_filter.sf_setoption(
1028 entry->sfe_cookie, so, sopt);
1029
1030 /*
1031 * Take the socket filter lock again
1032 * and release the entry
1033 */
1034 lck_rw_lock_shared(sock_filter_lock);
1035 sflt_entry_release(entry);
1036 }
1037 }
1038 lck_rw_unlock_shared(sock_filter_lock);
1039
1040 if (unlocked) {
1041 socket_lock(so, 0);
1042 }
1043
1044 return (error);
1045 }
1046
1047 __private_extern__ int
1048 sflt_getsockopt(struct socket *so, struct sockopt *sopt)
1049 {
1050 if (so->so_filt == NULL)
1051 return (0);
1052
1053 struct socket_filter_entry *entry;
1054 int unlocked = 0;
1055 int error = 0;
1056
1057 lck_rw_lock_shared(sock_filter_lock);
1058 for (entry = so->so_filt; entry && error == 0;
1059 entry = entry->sfe_next_onsocket) {
1060 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1061 entry->sfe_filter->sf_filter.sf_getoption) {
1062 /*
1063 * Retain the filter entry and release
1064 * the socket filter lock
1065 */
1066 sflt_entry_retain(entry);
1067 lck_rw_unlock_shared(sock_filter_lock);
1068
1069 /* If the socket isn't already unlocked, unlock it */
1070 if (unlocked == 0) {
1071 socket_unlock(so, 0);
1072 unlocked = 1;
1073 }
1074
1075 /* Call the filter */
1076 error = entry->sfe_filter->sf_filter.sf_getoption(
1077 entry->sfe_cookie, so, sopt);
1078
1079 /*
1080 * Take the socket filter lock again
1081 * and release the entry
1082 */
1083 lck_rw_lock_shared(sock_filter_lock);
1084 sflt_entry_release(entry);
1085 }
1086 }
1087 lck_rw_unlock_shared(sock_filter_lock);
1088
1089 if (unlocked) {
1090 socket_lock(so, 0);
1091 }
1092
1093 return (error);
1094 }
1095
1096 __private_extern__ int
1097 sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1098 mbuf_t *control, sflt_data_flag_t flags)
1099 {
1100 if (so->so_filt == NULL)
1101 return (0);
1102
1103 struct socket_filter_entry *entry;
1104 int unlocked = 0;
1105 int setsendthread = 0;
1106 int error = 0;
1107
1108 lck_rw_lock_shared(sock_filter_lock);
1109 for (entry = so->so_filt; entry && error == 0;
1110 entry = entry->sfe_next_onsocket) {
1111 /* skip if this is a subflow socket */
1112 if (so->so_flags & SOF_MP_SUBFLOW)
1113 continue;
1114 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1115 entry->sfe_filter->sf_filter.sf_data_out) {
1116 /*
1117 * Retain the filter entry and
1118 * release the socket filter lock
1119 */
1120 sflt_entry_retain(entry);
1121 lck_rw_unlock_shared(sock_filter_lock);
1122
1123 /* If the socket isn't already unlocked, unlock it */
1124 if (unlocked == 0) {
1125 if (so->so_send_filt_thread == NULL) {
1126 setsendthread = 1;
1127 so->so_send_filt_thread =
1128 current_thread();
1129 }
1130 socket_unlock(so, 0);
1131 unlocked = 1;
1132 }
1133
1134 /* Call the filter */
1135 error = entry->sfe_filter->sf_filter.sf_data_out(
1136 entry->sfe_cookie, so, to, data, control, flags);
1137
1138 /*
1139 * Take the socket filter lock again
1140 * and release the entry
1141 */
1142 lck_rw_lock_shared(sock_filter_lock);
1143 sflt_entry_release(entry);
1144 }
1145 }
1146 lck_rw_unlock_shared(sock_filter_lock);
1147
1148 if (unlocked) {
1149 socket_lock(so, 0);
1150 if (setsendthread)
1151 so->so_send_filt_thread = NULL;
1152 }
1153
1154 return (error);
1155 }
1156
1157 __private_extern__ int
1158 sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1159 mbuf_t *control, sflt_data_flag_t flags)
1160 {
1161 if (so->so_filt == NULL)
1162 return (0);
1163
1164 struct socket_filter_entry *entry;
1165 int error = 0;
1166 int unlocked = 0;
1167
1168 lck_rw_lock_shared(sock_filter_lock);
1169
1170 for (entry = so->so_filt; entry && (error == 0);
1171 entry = entry->sfe_next_onsocket) {
1172 /* skip if this is a subflow socket */
1173 if (so->so_flags & SOF_MP_SUBFLOW)
1174 continue;
1175 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1176 entry->sfe_filter->sf_filter.sf_data_in) {
1177 /*
1178 * Retain the filter entry and
1179 * release the socket filter lock
1180 */
1181 sflt_entry_retain(entry);
1182 lck_rw_unlock_shared(sock_filter_lock);
1183
1184 /* If the socket isn't already unlocked, unlock it */
1185 if (unlocked == 0) {
1186 unlocked = 1;
1187 socket_unlock(so, 0);
1188 }
1189
1190 /* Call the filter */
1191 error = entry->sfe_filter->sf_filter.sf_data_in(
1192 entry->sfe_cookie, so, from, data, control, flags);
1193
1194 /*
1195 * Take the socket filter lock again
1196 * and release the entry
1197 */
1198 lck_rw_lock_shared(sock_filter_lock);
1199 sflt_entry_release(entry);
1200 }
1201 }
1202 lck_rw_unlock_shared(sock_filter_lock);
1203
1204 if (unlocked) {
1205 socket_lock(so, 0);
1206 }
1207
1208 return (error);
1209 }
1210
1211 #pragma mark -- KPI --
1212
1213 errno_t
1214 sflt_attach(socket_t socket, sflt_handle handle)
1215 {
1216 socket_lock(socket, 1);
1217 errno_t result = sflt_attach_internal(socket, handle);
1218 socket_unlock(socket, 1);
1219 return (result);
1220 }
1221
1222 errno_t
1223 sflt_detach(socket_t socket, sflt_handle handle)
1224 {
1225 struct socket_filter_entry *entry;
1226 errno_t result = 0;
1227
1228 if (socket == NULL || handle == 0)
1229 return (EINVAL);
1230
1231 lck_rw_lock_exclusive(sock_filter_lock);
1232 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
1233 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1234 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
1235 break;
1236 }
1237 }
1238
1239 if (entry != NULL) {
1240 sflt_detach_locked(entry);
1241 }
1242 lck_rw_unlock_exclusive(sock_filter_lock);
1243
1244 return (result);
1245 }
1246
1247 struct solist {
1248 struct solist *next;
1249 struct socket *so;
1250 };
1251
1252 errno_t
1253 sflt_register(const struct sflt_filter *filter, int domain, int type,
1254 int protocol)
1255 {
1256 struct socket_filter *sock_filt = NULL;
1257 struct socket_filter *match = NULL;
1258 int error = 0;
1259 struct protosw *pr = pffindproto(domain, protocol, type);
1260 unsigned int len;
1261 struct socket *so;
1262 struct inpcb *inp;
1263 struct solist *solisthead = NULL, *solist = NULL;
1264
1265 if (pr == NULL)
1266 return (ENOENT);
1267
1268 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1269 filter->sf_handle == 0 || filter->sf_name == NULL)
1270 return (EINVAL);
1271
1272 /* Allocate the socket filter */
1273 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1274 M_IFADDR, M_WAITOK);
1275 if (sock_filt == NULL) {
1276 return (ENOBUFS);
1277 }
1278
1279 bzero(sock_filt, sizeof (*sock_filt));
1280
1281 /* Legacy sflt_filter length; current structure minus extended */
1282 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1283 /*
1284 * Include extended fields if filter defines SFLT_EXTENDED.
1285 * We've zeroed out our internal sflt_filter placeholder,
1286 * so any unused portion would have been taken care of.
1287 */
1288 if (filter->sf_flags & SFLT_EXTENDED) {
1289 unsigned int ext_len = filter->sf_len;
1290
1291 if (ext_len > sizeof (struct sflt_filter_ext))
1292 ext_len = sizeof (struct sflt_filter_ext);
1293
1294 len += ext_len;
1295 }
1296 bcopy(filter, &sock_filt->sf_filter, len);
1297
1298 lck_rw_lock_exclusive(sock_filter_lock);
1299 /* Look for an existing entry */
1300 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
1301 if (match->sf_filter.sf_handle ==
1302 sock_filt->sf_filter.sf_handle) {
1303 break;
1304 }
1305 }
1306
1307 /* Add the entry only if there was no existing entry */
1308 if (match == NULL) {
1309 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1310 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1311 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1312 sf_protosw_next);
1313 sock_filt->sf_proto = pr;
1314 }
1315 sflt_retain_locked(sock_filt);
1316 }
1317 lck_rw_unlock_exclusive(sock_filter_lock);
1318
1319 if (match != NULL) {
1320 FREE(sock_filt, M_IFADDR);
1321 return (EEXIST);
1322 }
1323
1324 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY))
1325 return (error);
1326
1327 /*
1328 * Setup the filter on the TCP and UDP sockets already created.
1329 */
1330 #define SOLIST_ADD(_so) do { \
1331 solist->next = solisthead; \
1332 sock_retain((_so)); \
1333 solist->so = (_so); \
1334 solisthead = solist; \
1335 } while (0)
1336 if (protocol == IPPROTO_TCP) {
1337 lck_rw_lock_shared(tcbinfo.ipi_lock);
1338 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
1339 so = inp->inp_socket;
1340 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1341 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1342 (so->so_state & SS_NOFDREF)) ||
1343 !SOCK_CHECK_DOM(so, domain) ||
1344 !SOCK_CHECK_TYPE(so, type))
1345 continue;
1346 MALLOC(solist, struct solist *, sizeof (*solist),
1347 M_IFADDR, M_NOWAIT);
1348 if (!solist)
1349 continue;
1350 SOLIST_ADD(so);
1351 }
1352 lck_rw_done(tcbinfo.ipi_lock);
1353 } else if (protocol == IPPROTO_UDP) {
1354 lck_rw_lock_shared(udbinfo.ipi_lock);
1355 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
1356 so = inp->inp_socket;
1357 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1358 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1359 (so->so_state & SS_NOFDREF)) ||
1360 !SOCK_CHECK_DOM(so, domain) ||
1361 !SOCK_CHECK_TYPE(so, type))
1362 continue;
1363 MALLOC(solist, struct solist *, sizeof (*solist),
1364 M_IFADDR, M_NOWAIT);
1365 if (!solist)
1366 continue;
1367 SOLIST_ADD(so);
1368 }
1369 lck_rw_done(udbinfo.ipi_lock);
1370 }
1371 /* XXX it's possible to walk the raw socket list as well */
1372 #undef SOLIST_ADD
1373
1374 while (solisthead) {
1375 sflt_handle handle = filter->sf_handle;
1376
1377 so = solisthead->so;
1378 sflt_initsock(so);
1379
1380 if (so->so_state & SS_ISCONNECTING)
1381 sflt_notify_after_register(so, sock_evt_connecting,
1382 handle);
1383 else if (so->so_state & SS_ISCONNECTED)
1384 sflt_notify_after_register(so, sock_evt_connected,
1385 handle);
1386 else if ((so->so_state &
1387 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) ==
1388 (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE))
1389 sflt_notify_after_register(so, sock_evt_disconnecting,
1390 handle);
1391 else if ((so->so_state &
1392 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) ==
1393 (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED))
1394 sflt_notify_after_register(so, sock_evt_disconnected,
1395 handle);
1396 else if (so->so_state & SS_CANTSENDMORE)
1397 sflt_notify_after_register(so, sock_evt_cantsendmore,
1398 handle);
1399 else if (so->so_state & SS_CANTRCVMORE)
1400 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1401 handle);
1402 /* XXX no easy way to post the sock_evt_closing event */
1403 sock_release(so);
1404 solist = solisthead;
1405 solisthead = solisthead->next;
1406 FREE(solist, M_IFADDR);
1407 }
1408
1409 return (error);
1410 }
1411
1412 errno_t
1413 sflt_unregister(sflt_handle handle)
1414 {
1415 struct socket_filter *filter;
1416 lck_rw_lock_exclusive(sock_filter_lock);
1417
1418 /* Find the entry by the handle */
1419 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1420 if (filter->sf_filter.sf_handle == handle)
1421 break;
1422 }
1423
1424 if (filter) {
1425 /* Remove it from the global list */
1426 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
1427
1428 /* Remove it from the protosw list */
1429 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1430 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1431 filter, sf_protosw_next);
1432 }
1433
1434 /* Detach from any sockets */
1435 struct socket_filter_entry *entry = NULL;
1436
1437 for (entry = filter->sf_entry_head; entry;
1438 entry = entry->sfe_next_onfilter) {
1439 sflt_detach_locked(entry);
1440 }
1441
1442 /* Release the filter */
1443 sflt_release_locked(filter);
1444 }
1445
1446 lck_rw_unlock_exclusive(sock_filter_lock);
1447
1448 if (filter == NULL)
1449 return (ENOENT);
1450
1451 return (0);
1452 }
1453
1454 errno_t
1455 sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1456 mbuf_t control, sflt_data_flag_t flags)
1457 {
1458 int error = 0;
1459
1460 if (so == NULL || data == NULL)
1461 return (EINVAL);
1462
1463 if (flags & sock_data_filt_flag_oob) {
1464 return (ENOTSUP);
1465 }
1466
1467 socket_lock(so, 1);
1468
1469 /* reject if this is a subflow socket */
1470 if (so->so_flags & SOF_MP_SUBFLOW) {
1471 error = ENOTSUP;
1472 goto done;
1473 }
1474
1475 if (from) {
1476 if (sbappendaddr(&so->so_rcv,
1477 (struct sockaddr *)(uintptr_t)from, data, control, NULL))
1478 sorwakeup(so);
1479 goto done;
1480 }
1481
1482 if (control) {
1483 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1484 sorwakeup(so);
1485 goto done;
1486 }
1487
1488 if (flags & sock_data_filt_flag_record) {
1489 if (control || from) {
1490 error = EINVAL;
1491 goto done;
1492 }
1493 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data))
1494 sorwakeup(so);
1495 goto done;
1496 }
1497
1498 if (sbappend(&so->so_rcv, data))
1499 sorwakeup(so);
1500 done:
1501 socket_unlock(so, 1);
1502 return (error);
1503 }
1504
1505 errno_t
1506 sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1507 mbuf_t control, sflt_data_flag_t flags)
1508 {
1509 int sosendflags = 0;
1510
1511 /* reject if this is a subflow socket */
1512 if (so->so_flags & SOF_MP_SUBFLOW)
1513 return (ENOTSUP);
1514
1515 if (flags & sock_data_filt_flag_oob)
1516 sosendflags = MSG_OOB;
1517 return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1518 data, control, sosendflags));
1519 }
1520
1521 sockopt_dir
1522 sockopt_direction(sockopt_t sopt)
1523 {
1524 return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set);
1525 }
1526
1527 int
1528 sockopt_level(sockopt_t sopt)
1529 {
1530 return (sopt->sopt_level);
1531 }
1532
1533 int
1534 sockopt_name(sockopt_t sopt)
1535 {
1536 return (sopt->sopt_name);
1537 }
1538
1539 size_t
1540 sockopt_valsize(sockopt_t sopt)
1541 {
1542 return (sopt->sopt_valsize);
1543 }
1544
1545 errno_t
1546 sockopt_copyin(sockopt_t sopt, void *data, size_t len)
1547 {
1548 return (sooptcopyin(sopt, data, len, len));
1549 }
1550
1551 errno_t
1552 sockopt_copyout(sockopt_t sopt, void *data, size_t len)
1553 {
1554 return (sooptcopyout(sopt, data, len));
1555 }