]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kpi_socketfilter.c
xnu-1699.24.8.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
CommitLineData
91447636 1/*
2d21ac55 2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
29#include <sys/kpi_socketfilter.h>
30
31#include <sys/socket.h>
32#include <sys/param.h>
33#include <sys/errno.h>
34#include <sys/malloc.h>
35#include <sys/protosw.h>
6d2010ae 36#include <sys/proc.h>
91447636 37#include <kern/locks.h>
6d2010ae
A
38#include <kern/thread.h>
39#include <kern/debug.h>
91447636
A
40#include <net/kext_net.h>
41
c910b4d9 42#include <libkern/libkern.h>
6d2010ae 43#include <libkern/OSAtomic.h>
c910b4d9 44
2d21ac55
A
45#include <string.h>
46
6d2010ae
A
47#define SFEF_ATTACHED 0x1 /* SFE is on socket list */
48#define SFEF_NODETACH 0x2 /* Detach should not be called */
49#define SFEF_NOSOCKET 0x4 /* Socket is gone */
50
51struct socket_filter_entry {
52 struct socket_filter_entry *sfe_next_onsocket;
53 struct socket_filter_entry *sfe_next_onfilter;
54 struct socket_filter_entry *sfe_next_oncleanup;
55
56 struct socket_filter *sfe_filter;
57 struct socket *sfe_socket;
58 void *sfe_cookie;
59
60 uint32_t sfe_flags;
61 int32_t sfe_refcount;
62};
63
64struct socket_filter {
65 TAILQ_ENTRY(socket_filter) sf_protosw_next;
66 TAILQ_ENTRY(socket_filter) sf_global_next;
67 struct socket_filter_entry *sf_entry_head;
68
69 struct protosw *sf_proto;
70 struct sflt_filter sf_filter;
71 u_int32_t sf_refcount;
72};
73
74TAILQ_HEAD(socket_filter_list, socket_filter);
75
91447636 76static struct socket_filter_list sock_filter_head;
6d2010ae
A
77static lck_rw_t *sock_filter_lock = NULL;
78static lck_mtx_t *sock_filter_cleanup_lock = NULL;
79static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
80static thread_t sock_filter_cleanup_thread = NULL;
91447636 81
6d2010ae
A
82static void sflt_cleanup_thread(void *, wait_result_t);
83static void sflt_detach_locked(struct socket_filter_entry *entry);
84
85#pragma mark -- Internal State Management --
3a60a9f5 86
91447636
A
87__private_extern__ void
88sflt_init(void)
89{
90 lck_grp_attr_t *grp_attrib = 0;
91 lck_attr_t *lck_attrib = 0;
92 lck_grp_t *lck_group = 0;
93
94 TAILQ_INIT(&sock_filter_head);
95
6d2010ae 96 /* Allocate a rw lock */
91447636 97 grp_attrib = lck_grp_attr_alloc_init();
91447636
A
98 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
99 lck_grp_attr_free(grp_attrib);
100 lck_attrib = lck_attr_alloc_init();
6d2010ae
A
101 sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
102 sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
91447636
A
103 lck_grp_free(lck_group);
104 lck_attr_free(lck_attrib);
105}
106
6d2010ae
A
107static void
108sflt_retain_locked(
109 struct socket_filter *filter)
91447636 110{
6d2010ae
A
111 filter->sf_refcount++;
112}
113
114static void
115sflt_release_locked(
116 struct socket_filter *filter)
117{
118 filter->sf_refcount--;
119 if (filter->sf_refcount == 0)
120 {
121 // Call the unregistered function
122 if (filter->sf_filter.sf_unregistered) {
123 lck_rw_unlock_exclusive(sock_filter_lock);
124 filter->sf_filter.sf_unregistered(filter->sf_filter.sf_handle);
125 lck_rw_lock_exclusive(sock_filter_lock);
126 }
127
128 // Free the entry
129 FREE(filter, M_IFADDR);
130 }
131}
132
133static void
134sflt_entry_retain(
135 struct socket_filter_entry *entry)
136{
137 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0)
138 panic("sflt_entry_retain - sfe_refcount <= 0\n");
139}
140
141static void
142sflt_entry_release(
143 struct socket_filter_entry *entry)
144{
145 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
146 if (old == 1) {
147 // That was the last reference
148
149 // Take the cleanup lock
150 lck_mtx_lock(sock_filter_cleanup_lock);
151
152 // Put this item on the cleanup list
153 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
154 sock_filter_cleanup_entries = entry;
155
156 // If the item is the first item in the list
157 if (entry->sfe_next_oncleanup == NULL) {
158 if (sock_filter_cleanup_thread == NULL) {
159 // Create a thread
160 kernel_thread_start(sflt_cleanup_thread, NULL, &sock_filter_cleanup_thread);
161 } else {
162 // Wakeup the thread
163 wakeup(&sock_filter_cleanup_entries);
164 }
165 }
166
167 // Drop the cleanup lock
168 lck_mtx_unlock(sock_filter_cleanup_lock);
169 }
170 else if (old <= 0)
171 {
172 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n", (int)old);
173 }
174}
175
176static void
177sflt_cleanup_thread(
178 __unused void * blah,
179 __unused wait_result_t blah2)
180{
181 while (1) {
182 lck_mtx_lock(sock_filter_cleanup_lock);
183 while (sock_filter_cleanup_entries == NULL) {
184 // Sleep until we've got something better to do
185 msleep(&sock_filter_cleanup_entries, sock_filter_cleanup_lock, PWAIT, "sflt_cleanup", NULL);
186 }
187
188 // Pull the current list of dead items
189 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
190 sock_filter_cleanup_entries = NULL;
191
192 // Drop the lock
193 lck_mtx_unlock(sock_filter_cleanup_lock);
194
195 // Take the socket filter lock
196 lck_rw_lock_exclusive(sock_filter_lock);
197
198 // Cleanup every dead item
199 struct socket_filter_entry *entry;
200 for (entry = dead; entry; entry = dead) {
201 struct socket_filter_entry **nextpp;
202
203 dead = entry->sfe_next_oncleanup;
204
205 // Call the detach function if necessary - drop the lock
206 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
207 entry->sfe_filter->sf_filter.sf_detach) {
208 entry->sfe_flags |= SFEF_NODETACH;
209 lck_rw_unlock_exclusive(sock_filter_lock);
210
211 // Warning - passing a potentially dead socket may be bad
212 entry->sfe_filter->sf_filter.
213 sf_detach(entry->sfe_cookie, entry->sfe_socket);
214
215 lck_rw_lock_exclusive(sock_filter_lock);
216 }
217
218 // Pull entry off the socket list -- if the socket still exists
219 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
220 for (nextpp = &entry->sfe_socket->so_filt; *nextpp;
221 nextpp = &(*nextpp)->sfe_next_onsocket) {
222 if (*nextpp == entry) {
223 *nextpp = entry->sfe_next_onsocket;
224 break;
225 }
226 }
227 }
228
229 // Pull entry off the filter list
230 for (nextpp = &entry->sfe_filter->sf_entry_head; *nextpp;
231 nextpp = &(*nextpp)->sfe_next_onfilter) {
232 if (*nextpp == entry) {
233 *nextpp = entry->sfe_next_onfilter;
234 break;
235 }
236 }
237
238 // Release the filter -- may drop lock, but that's okay
239 sflt_release_locked(entry->sfe_filter);
240 entry->sfe_socket = NULL;
241 entry->sfe_filter = NULL;
242 FREE(entry, M_IFADDR);
243 }
244
245 // Drop the socket filter lock
246 lck_rw_unlock_exclusive(sock_filter_lock);
247 }
248 // Not reached
249}
250
251static int
252sflt_attach_locked(
253 struct socket *so,
254 struct socket_filter *filter,
255 int socklocked)
256{
257 int error = 0;
258 struct socket_filter_entry *entry = NULL;
91447636 259
6d2010ae
A
260 if (filter == NULL)
261 error = ENOENT;
262
263 if (error == 0) {
264 /* allocate the socket filter entry */
265 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK);
266 if (entry == NULL) {
267 error = ENOMEM;
268 }
269 }
270
271 if (error == 0) {
272 /* Initialize the socket filter entry */
273 entry->sfe_cookie = NULL;
274 entry->sfe_flags = SFEF_ATTACHED;
275 entry->sfe_refcount = 1; // corresponds to SFEF_ATTACHED flag set
276
277 /* Put the entry in the filter list */
278 sflt_retain_locked(filter);
279 entry->sfe_filter = filter;
280 entry->sfe_next_onfilter = filter->sf_entry_head;
281 filter->sf_entry_head = entry;
282
283 /* Put the entry on the socket filter list */
284 entry->sfe_socket = so;
285 entry->sfe_next_onsocket = so->so_filt;
286 so->so_filt = entry;
287
288 if (entry->sfe_filter->sf_filter.sf_attach) {
289 // Retain the entry while we call attach
290 sflt_entry_retain(entry);
291
292 // Release the filter lock -- callers must be aware we will do this
293 lck_rw_unlock_exclusive(sock_filter_lock);
294
295 // Unlock the socket
296 if (socklocked)
297 socket_unlock(so, 0);
298
299 // It's finally safe to call the filter function
300 error = entry->sfe_filter->sf_filter.sf_attach(&entry->sfe_cookie, so);
301
302 // Lock the socket again
303 if (socklocked)
304 socket_lock(so, 0);
305
306 // Lock the filters again
307 lck_rw_lock_exclusive(sock_filter_lock);
308
309 // If the attach function returns an error, this filter must be detached
310 if (error) {
311 entry->sfe_flags |= SFEF_NODETACH; // don't call sf_detach
312 sflt_detach_locked(entry);
313 }
314
315 // Release the retain we held through the attach call
316 sflt_entry_release(entry);
91447636 317 }
91447636 318 }
6d2010ae
A
319
320 return error;
91447636
A
321}
322
6d2010ae
A
323errno_t
324sflt_attach_internal(
325 socket_t socket,
326 sflt_handle handle)
91447636 327{
6d2010ae
A
328 if (socket == NULL || handle == 0)
329 return EINVAL;
330
331 int result = EINVAL;
332
333 lck_rw_lock_exclusive(sock_filter_lock);
91447636 334
6d2010ae
A
335 struct socket_filter *filter = NULL;
336 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
337 if (filter->sf_filter.sf_handle == handle) break;
338 }
339
340 if (filter) {
341 result = sflt_attach_locked(socket, filter, 1);
91447636 342 }
6d2010ae
A
343
344 lck_rw_unlock_exclusive(sock_filter_lock);
345
346 return result;
91447636
A
347}
348
6d2010ae
A
349static void
350sflt_detach_locked(
351 struct socket_filter_entry *entry)
352{
353 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
354 entry->sfe_flags &= ~SFEF_ATTACHED;
355 sflt_entry_release(entry);
356 }
357}
358
359#pragma mark -- Socket Layer Hooks --
360
91447636 361__private_extern__ void
6d2010ae 362sflt_initsock(
91447636
A
363 struct socket *so)
364{
6d2010ae
A
365 struct protosw *proto = so->so_proto;
366
367 lck_rw_lock_shared(sock_filter_lock);
368 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
369 // Promote lock to exclusive
370 if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock))
371 lck_rw_lock_exclusive(sock_filter_lock);
372
373 // Warning: A filter unregistering will be pulled out of the list.
374 // This could happen while we drop the lock in sftl_attach_locked
375 // or sflt_release_locked. For this reason we retain a reference
376 // on the filter (or next_filter) while calling this function
377 //
378 // This protects us from a panic, but it could result in a
379 // socket being created without all of the global filters if
380 // we're attaching a filter as it is removed, if that's possible.
381 struct socket_filter *filter = TAILQ_FIRST(&proto->pr_filter_head);
382 sflt_retain_locked(filter);
383
384 while (filter)
385 {
386 struct socket_filter *filter_next;
387
388 // Warning: sflt_attach_private_locked will drop the lock
389 sflt_attach_locked(so, filter, 0);
390
391 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
392 if (filter_next)
393 sflt_retain_locked(filter_next);
394
395 // Warning: filt_release_locked may remove the filter from the queue
396 sflt_release_locked(filter);
397 filter = filter_next;
398 }
399 }
400 lck_rw_done(sock_filter_lock);
91447636
A
401}
402
6d2010ae
A
403/*
404 * sflt_termsock
405 *
406 * Detaches all filters from the socket.
407 */
408
91447636 409__private_extern__ void
6d2010ae 410sflt_termsock(
91447636
A
411 struct socket *so)
412{
6d2010ae
A
413 lck_rw_lock_exclusive(sock_filter_lock);
414
415 struct socket_filter_entry *entry;
416
417 while ((entry = so->so_filt) != NULL) {
418 // Pull filter off the socket
419 so->so_filt = entry->sfe_next_onsocket;
420 entry->sfe_flags |= SFEF_NOSOCKET;
421
422 // Call detach
423 sflt_detach_locked(entry);
424
425 // On sflt_termsock, we can't return until the detach function has been called
426 // Call the detach function - this is gross because the socket filter
427 // entry could be freed when we drop the lock, so we make copies on
428 // the stack and retain everything we need before dropping the lock
429 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
430 entry->sfe_filter->sf_filter.sf_detach) {
431 void *sfe_cookie = entry->sfe_cookie;
432 struct socket_filter *sfe_filter = entry->sfe_filter;
433
434 // Retain the socket filter
435 sflt_retain_locked(sfe_filter);
436
437 // Mark that we've called the detach function
438 entry->sfe_flags |= SFEF_NODETACH;
439
440 // Drop the lock around the call to the detach function
441 lck_rw_unlock_exclusive(sock_filter_lock);
442 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
443 lck_rw_lock_exclusive(sock_filter_lock);
444
445 // Release the filter
446 sflt_release_locked(sfe_filter);
91447636
A
447 }
448 }
6d2010ae
A
449
450 lck_rw_unlock_exclusive(sock_filter_lock);
91447636
A
451}
452
453__private_extern__ void
454sflt_notify(
455 struct socket *so,
456 sflt_event_t event,
457 void *param)
458{
6d2010ae
A
459 if (so->so_filt == NULL) return;
460
461 struct socket_filter_entry *entry;
462 int unlocked = 0;
463
464 lck_rw_lock_shared(sock_filter_lock);
465 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
466 if ((entry->sfe_flags & SFEF_ATTACHED)
467 && entry->sfe_filter->sf_filter.sf_notify) {
468 // Retain the filter entry and release the socket filter lock
469 sflt_entry_retain(entry);
470 lck_rw_unlock_shared(sock_filter_lock);
471
472 // If the socket isn't already unlocked, unlock it
473 if (unlocked == 0) {
474 unlocked = 1;
91447636
A
475 socket_unlock(so, 0);
476 }
6d2010ae
A
477
478 // Finally call the filter
479 entry->sfe_filter->sf_filter.
480 sf_notify(entry->sfe_cookie, so, event, param);
481
482 // Take the socket filter lock again and release the entry
483 lck_rw_lock_shared(sock_filter_lock);
484 sflt_entry_release(entry);
91447636
A
485 }
486 }
6d2010ae 487 lck_rw_unlock_shared(sock_filter_lock);
91447636 488
6d2010ae 489 if (unlocked != 0) {
91447636 490 socket_lock(so, 0);
91447636
A
491 }
492}
493
494__private_extern__ int
6d2010ae
A
495sflt_ioctl(
496 struct socket *so,
497 u_long cmd,
498 caddr_t data)
91447636 499{
6d2010ae
A
500 if (so->so_filt == NULL) return 0;
501
502 struct socket_filter_entry *entry;
503 int unlocked = 0;
91447636 504 int error = 0;
6d2010ae
A
505
506 lck_rw_lock_shared(sock_filter_lock);
507 for (entry = so->so_filt; entry && error == 0;
508 entry = entry->sfe_next_onsocket) {
509 if ((entry->sfe_flags & SFEF_ATTACHED)
510 && entry->sfe_filter->sf_filter.sf_ioctl) {
511 // Retain the filter entry and release the socket filter lock
512 sflt_entry_retain(entry);
513 lck_rw_unlock_shared(sock_filter_lock);
514
515 // If the socket isn't already unlocked, unlock it
516 if (unlocked == 0) {
91447636 517 socket_unlock(so, 0);
6d2010ae 518 unlocked = 1;
91447636 519 }
6d2010ae
A
520
521 // Call the filter
522 error = entry->sfe_filter->sf_filter.
523 sf_ioctl(entry->sfe_cookie, so, cmd, data);
524
525 // Take the socket filter lock again and release the entry
526 lck_rw_lock_shared(sock_filter_lock);
527 sflt_entry_release(entry);
91447636
A
528 }
529 }
6d2010ae
A
530 lck_rw_unlock_shared(sock_filter_lock);
531
532 if (unlocked) {
91447636 533 socket_lock(so, 0);
91447636
A
534 }
535
536 return error;
537}
538
91447636 539__private_extern__ int
6d2010ae
A
540sflt_bind(
541 struct socket *so,
542 const struct sockaddr *nam)
91447636 543{
6d2010ae 544 if (so->so_filt == NULL) return 0;
91447636 545
6d2010ae
A
546 struct socket_filter_entry *entry;
547 int unlocked = 0;
548 int error = 0;
549
550 lck_rw_lock_shared(sock_filter_lock);
551 for (entry = so->so_filt; entry && error == 0;
552 entry = entry->sfe_next_onsocket) {
553 if ((entry->sfe_flags & SFEF_ATTACHED)
554 && entry->sfe_filter->sf_filter.sf_bind) {
555 // Retain the filter entry and release the socket filter lock
556 sflt_entry_retain(entry);
557 lck_rw_unlock_shared(sock_filter_lock);
558
559 // If the socket isn't already unlocked, unlock it
560 if (unlocked == 0) {
561 socket_unlock(so, 0);
562 unlocked = 1;
563 }
564
565 // Call the filter
566 error = entry->sfe_filter->sf_filter.
567 sf_bind(entry->sfe_cookie, so, nam);
568
569 // Take the socket filter lock again and release the entry
570 lck_rw_lock_shared(sock_filter_lock);
571 sflt_entry_release(entry);
91447636
A
572 }
573 }
6d2010ae
A
574 lck_rw_unlock_shared(sock_filter_lock);
575
576 if (unlocked) {
577 socket_lock(so, 0);
578 }
91447636 579
6d2010ae
A
580 return error;
581}
582
583__private_extern__ int
584sflt_listen(
585 struct socket *so)
586{
587 if (so->so_filt == NULL) return 0;
91447636 588
6d2010ae
A
589 struct socket_filter_entry *entry;
590 int unlocked = 0;
591 int error = 0;
592
593 lck_rw_lock_shared(sock_filter_lock);
594 for (entry = so->so_filt; entry && error == 0;
595 entry = entry->sfe_next_onsocket) {
596 if ((entry->sfe_flags & SFEF_ATTACHED)
597 && entry->sfe_filter->sf_filter.sf_listen) {
598 // Retain the filter entry and release the socket filter lock
599 sflt_entry_retain(entry);
600 lck_rw_unlock_shared(sock_filter_lock);
601
602 // If the socket isn't already unlocked, unlock it
603 if (unlocked == 0) {
604 socket_unlock(so, 0);
605 unlocked = 1;
606 }
607
608 // Call the filter
609 error = entry->sfe_filter->sf_filter.
610 sf_listen(entry->sfe_cookie, so);
611
612 // Take the socket filter lock again and release the entry
613 lck_rw_lock_shared(sock_filter_lock);
614 sflt_entry_release(entry);
91447636
A
615 }
616 }
6d2010ae
A
617 lck_rw_unlock_shared(sock_filter_lock);
618
619 if (unlocked) {
620 socket_lock(so, 0);
621 }
91447636 622
6d2010ae
A
623 return error;
624}
625
626__private_extern__ int
627sflt_accept(
628 struct socket *head,
629 struct socket *so,
630 const struct sockaddr *local,
631 const struct sockaddr *remote)
632{
633 if (so->so_filt == NULL) return 0;
634
635 struct socket_filter_entry *entry;
636 int unlocked = 0;
637 int error = 0;
638
639 lck_rw_lock_shared(sock_filter_lock);
640 for (entry = so->so_filt; entry && error == 0;
641 entry = entry->sfe_next_onsocket) {
642 if ((entry->sfe_flags & SFEF_ATTACHED)
643 && entry->sfe_filter->sf_filter.sf_accept) {
644 // Retain the filter entry and release the socket filter lock
645 sflt_entry_retain(entry);
646 lck_rw_unlock_shared(sock_filter_lock);
91447636 647
6d2010ae
A
648 // If the socket isn't already unlocked, unlock it
649 if (unlocked == 0) {
650 socket_unlock(so, 0);
651 unlocked = 1;
91447636 652 }
6d2010ae
A
653
654 // Call the filter
655 error = entry->sfe_filter->sf_filter.
656 sf_accept(entry->sfe_cookie, head, so, local, remote);
657
658 // Take the socket filter lock again and release the entry
659 lck_rw_lock_shared(sock_filter_lock);
660 sflt_entry_release(entry);
91447636
A
661 }
662 }
6d2010ae
A
663 lck_rw_unlock_shared(sock_filter_lock);
664
665 if (unlocked) {
666 socket_lock(so, 0);
91447636
A
667 }
668
6d2010ae
A
669 return error;
670}
671
672__private_extern__ int
673sflt_getsockname(
674 struct socket *so,
675 struct sockaddr **local)
676{
677 if (so->so_filt == NULL) return 0;
678
679 struct socket_filter_entry *entry;
680 int unlocked = 0;
681 int error = 0;
682
683 lck_rw_lock_shared(sock_filter_lock);
684 for (entry = so->so_filt; entry && error == 0;
685 entry = entry->sfe_next_onsocket) {
686 if ((entry->sfe_flags & SFEF_ATTACHED)
687 && entry->sfe_filter->sf_filter.sf_getsockname) {
688 // Retain the filter entry and release the socket filter lock
689 sflt_entry_retain(entry);
690 lck_rw_unlock_shared(sock_filter_lock);
691
692 // If the socket isn't already unlocked, unlock it
693 if (unlocked == 0) {
694 socket_unlock(so, 0);
695 unlocked = 1;
696 }
697
698 // Call the filter
699 error = entry->sfe_filter->sf_filter.
700 sf_getsockname(entry->sfe_cookie, so, local);
701
702 // Take the socket filter lock again and release the entry
703 lck_rw_lock_shared(sock_filter_lock);
704 sflt_entry_release(entry);
705 }
706 }
707 lck_rw_unlock_shared(sock_filter_lock);
708
709 if (unlocked) {
710 socket_lock(so, 0);
91447636
A
711 }
712
713 return error;
714}
715
6d2010ae
A
716__private_extern__ int
717sflt_getpeername(
718 struct socket *so,
719 struct sockaddr **remote)
720{
721 if (so->so_filt == NULL) return 0;
722
723 struct socket_filter_entry *entry;
724 int unlocked = 0;
725 int error = 0;
726
727 lck_rw_lock_shared(sock_filter_lock);
728 for (entry = so->so_filt; entry && error == 0;
729 entry = entry->sfe_next_onsocket) {
730 if ((entry->sfe_flags & SFEF_ATTACHED)
731 && entry->sfe_filter->sf_filter.sf_getpeername) {
732 // Retain the filter entry and release the socket filter lock
733 sflt_entry_retain(entry);
734 lck_rw_unlock_shared(sock_filter_lock);
735
736 // If the socket isn't already unlocked, unlock it
737 if (unlocked == 0) {
738 socket_unlock(so, 0);
739 unlocked = 1;
740 }
741
742 // Call the filter
743 error = entry->sfe_filter->sf_filter.
744 sf_getpeername(entry->sfe_cookie, so, remote);
745
746 // Take the socket filter lock again and release the entry
747 lck_rw_lock_shared(sock_filter_lock);
748 sflt_entry_release(entry);
749 }
750 }
751 lck_rw_unlock_shared(sock_filter_lock);
91447636 752
6d2010ae
A
753 if (unlocked) {
754 socket_lock(so, 0);
755 }
756
757 return error;
758}
91447636 759
6d2010ae
A
760__private_extern__ int
761sflt_connectin(
762 struct socket *so,
763 const struct sockaddr *remote)
91447636 764{
6d2010ae 765 if (so->so_filt == NULL) return 0;
91447636 766
6d2010ae
A
767 struct socket_filter_entry *entry;
768 int unlocked = 0;
769 int error = 0;
91447636 770
6d2010ae
A
771 lck_rw_lock_shared(sock_filter_lock);
772 for (entry = so->so_filt; entry && error == 0;
773 entry = entry->sfe_next_onsocket) {
774 if ((entry->sfe_flags & SFEF_ATTACHED)
775 && entry->sfe_filter->sf_filter.sf_connect_in) {
776 // Retain the filter entry and release the socket filter lock
777 sflt_entry_retain(entry);
778 lck_rw_unlock_shared(sock_filter_lock);
779
780 // If the socket isn't already unlocked, unlock it
781 if (unlocked == 0) {
782 socket_unlock(so, 0);
783 unlocked = 1;
4a3eedf9 784 }
6d2010ae
A
785
786 // Call the filter
787 error = entry->sfe_filter->sf_filter.
788 sf_connect_in(entry->sfe_cookie, so, remote);
789
790 // Take the socket filter lock again and release the entry
791 lck_rw_lock_shared(sock_filter_lock);
792 sflt_entry_release(entry);
3a60a9f5 793 }
6d2010ae
A
794 }
795 lck_rw_unlock_shared(sock_filter_lock);
796
797 if (unlocked) {
798 socket_lock(so, 0);
799 }
800
801 return error;
802}
803
804__private_extern__ int
805sflt_connectout(
806 struct socket *so,
807 const struct sockaddr *nam)
808{
809 if (so->so_filt == NULL) return 0;
810
811 struct socket_filter_entry *entry;
812 int unlocked = 0;
813 int error = 0;
814
815 lck_rw_lock_shared(sock_filter_lock);
816 for (entry = so->so_filt; entry && error == 0;
817 entry = entry->sfe_next_onsocket) {
818 if ((entry->sfe_flags & SFEF_ATTACHED)
819 && entry->sfe_filter->sf_filter.sf_connect_out) {
820 // Retain the filter entry and release the socket filter lock
821 sflt_entry_retain(entry);
822 lck_rw_unlock_shared(sock_filter_lock);
823
824 // If the socket isn't already unlocked, unlock it
825 if (unlocked == 0) {
826 socket_unlock(so, 0);
827 unlocked = 1;
91447636 828 }
6d2010ae
A
829
830 // Call the filter
831 error = entry->sfe_filter->sf_filter.
832 sf_connect_out(entry->sfe_cookie, so, nam);
833
834 // Take the socket filter lock again and release the entry
835 lck_rw_lock_shared(sock_filter_lock);
836 sflt_entry_release(entry);
91447636 837 }
91447636 838 }
6d2010ae
A
839 lck_rw_unlock_shared(sock_filter_lock);
840
841 if (unlocked) {
842 socket_lock(so, 0);
843 }
844
845 return error;
846}
91447636 847
6d2010ae
A
848__private_extern__ int
849sflt_setsockopt(
850 struct socket *so,
851 struct sockopt *sopt)
852{
853 if (so->so_filt == NULL) return 0;
b0d623f7 854
6d2010ae
A
855 struct socket_filter_entry *entry;
856 int unlocked = 0;
857 int error = 0;
858
859 lck_rw_lock_shared(sock_filter_lock);
860 for (entry = so->so_filt; entry && error == 0;
861 entry = entry->sfe_next_onsocket) {
862 if ((entry->sfe_flags & SFEF_ATTACHED)
863 && entry->sfe_filter->sf_filter.sf_setoption) {
864 // Retain the filter entry and release the socket filter lock
865 sflt_entry_retain(entry);
866 lck_rw_unlock_shared(sock_filter_lock);
867
868 // If the socket isn't already unlocked, unlock it
869 if (unlocked == 0) {
870 socket_unlock(so, 0);
871 unlocked = 1;
872 }
873
874 // Call the filter
875 error = entry->sfe_filter->sf_filter.
876 sf_setoption(entry->sfe_cookie, so, sopt);
877
878 // Take the socket filter lock again and release the entry
879 lck_rw_lock_shared(sock_filter_lock);
880 sflt_entry_release(entry);
c910b4d9 881 }
6d2010ae
A
882 }
883 lck_rw_unlock_shared(sock_filter_lock);
884
885 if (unlocked) {
886 socket_lock(so, 0);
887 }
888
889 return error;
890}
891
892__private_extern__ int
893sflt_getsockopt(
894 struct socket *so,
895 struct sockopt *sopt)
896{
897 if (so->so_filt == NULL) return 0;
898
899 struct socket_filter_entry *entry;
900 int unlocked = 0;
901 int error = 0;
902
903 lck_rw_lock_shared(sock_filter_lock);
904 for (entry = so->so_filt; entry && error == 0;
905 entry = entry->sfe_next_onsocket) {
906 if ((entry->sfe_flags & SFEF_ATTACHED)
907 && entry->sfe_filter->sf_filter.sf_getoption) {
908 // Retain the filter entry and release the socket filter lock
909 sflt_entry_retain(entry);
910 lck_rw_unlock_shared(sock_filter_lock);
911
912 // If the socket isn't already unlocked, unlock it
913 if (unlocked == 0) {
914 socket_unlock(so, 0);
915 unlocked = 1;
916 }
917
918 // Call the filter
919 error = entry->sfe_filter->sf_filter.
920 sf_getoption(entry->sfe_cookie, so, sopt);
921
922 // Take the socket filter lock again and release the entry
923 lck_rw_lock_shared(sock_filter_lock);
924 sflt_entry_release(entry);
91447636
A
925 }
926 }
6d2010ae
A
927 lck_rw_unlock_shared(sock_filter_lock);
928
929 if (unlocked) {
930 socket_lock(so, 0);
931 }
932
933 return error;
934}
935
936__private_extern__ int
937sflt_data_out(
938 struct socket *so,
939 const struct sockaddr *to,
940 mbuf_t *data,
941 mbuf_t *control,
942 sflt_data_flag_t flags)
943{
944 if (so->so_filt == NULL) return 0;
91447636 945
6d2010ae
A
946 struct socket_filter_entry *entry;
947 int unlocked = 0;
948 int setsendthread = 0;
949 int error = 0;
91447636 950
6d2010ae
A
951 lck_rw_lock_shared(sock_filter_lock);
952 for (entry = so->so_filt; entry && error == 0;
953 entry = entry->sfe_next_onsocket) {
954 if ((entry->sfe_flags & SFEF_ATTACHED)
955 && entry->sfe_filter->sf_filter.sf_data_out) {
956 // Retain the filter entry and release the socket filter lock
957 sflt_entry_retain(entry);
958 lck_rw_unlock_shared(sock_filter_lock);
959
960 // If the socket isn't already unlocked, unlock it
961 if (unlocked == 0) {
962 if (so->so_send_filt_thread == NULL) {
963 setsendthread = 1;
964 so->so_send_filt_thread = current_thread();
965 }
966 socket_unlock(so, 0);
967 unlocked = 1;
968 }
969
970 // Call the filter
971 error = entry->sfe_filter->sf_filter.
972 sf_data_out(entry->sfe_cookie, so, to, data, control, flags);
973
974 // Take the socket filter lock again and release the entry
975 lck_rw_lock_shared(sock_filter_lock);
976 sflt_entry_release(entry);
977 }
91447636 978 }
6d2010ae 979 lck_rw_unlock_shared(sock_filter_lock);
3a60a9f5 980
6d2010ae
A
981 if (unlocked) {
982 socket_lock(so, 0);
983 if (setsendthread) so->so_send_filt_thread = NULL;
984 }
985
986 return error;
987}
3a60a9f5 988
6d2010ae
A
989__private_extern__ int
990sflt_data_in(
991 struct socket *so,
992 const struct sockaddr *from,
993 mbuf_t *data,
994 mbuf_t *control,
995 sflt_data_flag_t flags)
996{
997 if (so->so_filt == NULL) return 0;
998
999 struct socket_filter_entry *entry;
1000 int error = 0;
1001 int unlocked = 0;
1002
1003 lck_rw_lock_shared(sock_filter_lock);
1004
1005 for (entry = so->so_filt; entry && (error == 0);
1006 entry = entry->sfe_next_onsocket) {
1007 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1008 entry->sfe_filter->sf_filter.sf_data_in) {
1009 // Retain the filter entry and release the socket filter lock
1010 sflt_entry_retain(entry);
1011 lck_rw_unlock_shared(sock_filter_lock);
1012
1013 // If the socket isn't already unlocked, unlock it
1014 if (unlocked == 0) {
1015 unlocked = 1;
1016 socket_unlock(so, 0);
1017 }
1018
1019 // Call the filter
1020 error = entry->sfe_filter->sf_filter.sf_data_in(
1021 entry->sfe_cookie, so, from, data, control, flags);
1022
1023 // Take the socket filter lock again and release the entry
1024 lck_rw_lock_shared(sock_filter_lock);
1025 sflt_entry_release(entry);
1026 }
1027 }
1028 lck_rw_unlock_shared(sock_filter_lock);
1029
1030 if (unlocked) {
1031 socket_lock(so, 0);
1032 }
1033
1034 return error;
91447636
A
1035}
1036
6d2010ae
A
1037#pragma mark -- KPI --
1038
91447636
A
1039errno_t
1040sflt_attach(
1041 socket_t socket,
1042 sflt_handle handle)
1043{
6d2010ae
A
1044 socket_lock(socket, 1);
1045 errno_t result = sflt_attach_internal(socket, handle);
1046 socket_unlock(socket, 1);
1047 return result;
91447636
A
1048}
1049
1050errno_t
1051sflt_detach(
1052 socket_t socket,
1053 sflt_handle handle)
1054{
6d2010ae 1055 struct socket_filter_entry *entry;
91447636
A
1056 errno_t result = 0;
1057
1058 if (socket == NULL || handle == 0)
1059 return EINVAL;
1060
6d2010ae
A
1061 lck_rw_lock_exclusive(sock_filter_lock);
1062 for (entry = socket->so_filt; entry;
1063 entry = entry->sfe_next_onsocket) {
1064 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
1065 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
91447636 1066 break;
6d2010ae 1067 }
91447636
A
1068 }
1069
6d2010ae
A
1070 if (entry != NULL) {
1071 sflt_detach_locked(entry);
91447636 1072 }
6d2010ae 1073 lck_rw_unlock_exclusive(sock_filter_lock);
91447636
A
1074
1075 return result;
1076}
1077
91447636
A
1078errno_t
1079sflt_register(
1080 const struct sflt_filter *filter,
2d21ac55
A
1081 int domain,
1082 int type,
1083 int protocol)
91447636
A
1084{
1085 struct socket_filter *sock_filt = NULL;
1086 struct socket_filter *match = NULL;
1087 int error = 0;
1088 struct protosw *pr = pffindproto(domain, protocol, type);
2d21ac55
A
1089 unsigned int len;
1090
1091 if (pr == NULL)
1092 return ENOENT;
1093
1094 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
1095 filter->sf_handle == 0 || filter->sf_name == NULL)
1096 return EINVAL;
91447636
A
1097
1098 /* Allocate the socket filter */
2d21ac55
A
1099 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
1100 M_IFADDR, M_WAITOK);
91447636
A
1101 if (sock_filt == NULL) {
1102 return ENOBUFS;
1103 }
2d21ac55
A
1104
1105 bzero(sock_filt, sizeof (*sock_filt));
1106
1107 /* Legacy sflt_filter length; current structure minus extended */
1108 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
1109 /*
1110 * Include extended fields if filter defines SFLT_EXTENDED.
1111 * We've zeroed out our internal sflt_filter placeholder,
1112 * so any unused portion would have been taken care of.
1113 */
1114 if (filter->sf_flags & SFLT_EXTENDED) {
1115 unsigned int ext_len = filter->sf_len;
1116
1117 if (ext_len > sizeof (struct sflt_filter_ext))
1118 ext_len = sizeof (struct sflt_filter_ext);
1119
1120 len += ext_len;
1121 }
1122 bcopy(filter, &sock_filt->sf_filter, len);
1123
6d2010ae 1124 lck_rw_lock_exclusive(sock_filter_lock);
91447636
A
1125 /* Look for an existing entry */
1126 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
2d21ac55
A
1127 if (match->sf_filter.sf_handle ==
1128 sock_filt->sf_filter.sf_handle) {
91447636
A
1129 break;
1130 }
1131 }
6d2010ae 1132
91447636
A
1133 /* Add the entry only if there was no existing entry */
1134 if (match == NULL) {
1135 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1136 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
2d21ac55
A
1137 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1138 sf_protosw_next);
91447636
A
1139 sock_filt->sf_proto = pr;
1140 }
6d2010ae 1141 sflt_retain_locked(sock_filt);
91447636 1142 }
6d2010ae
A
1143 lck_rw_unlock_exclusive(sock_filter_lock);
1144
91447636
A
1145 if (match != NULL) {
1146 FREE(sock_filt, M_IFADDR);
1147 return EEXIST;
1148 }
2d21ac55 1149
91447636
A
1150 return error;
1151}
1152
1153errno_t
1154sflt_unregister(
1155 sflt_handle handle)
1156{
1157 struct socket_filter *filter;
6d2010ae 1158 lck_rw_lock_exclusive(sock_filter_lock);
91447636 1159
6d2010ae 1160 /* Find the entry by the handle */
91447636
A
1161 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
1162 if (filter->sf_filter.sf_handle == handle)
1163 break;
1164 }
1165
1166 if (filter) {
6d2010ae 1167 // Remove it from the global list
91447636 1168 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
6d2010ae
A
1169
1170 // Remove it from the protosw list
91447636
A
1171 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
1172 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, filter, sf_protosw_next);
1173 }
6d2010ae
A
1174
1175 // Detach from any sockets
1176 struct socket_filter_entry *entry = NULL;
1177
1178 for (entry = filter->sf_entry_head; entry; entry = entry->sfe_next_onfilter) {
1179 sflt_detach_locked(entry);
3a60a9f5 1180 }
6d2010ae
A
1181
1182 // Release the filter
1183 sflt_release_locked(filter);
91447636
A
1184 }
1185
6d2010ae 1186 lck_rw_unlock_exclusive(sock_filter_lock);
91447636
A
1187
1188 if (filter == NULL)
1189 return ENOENT;
1190
91447636
A
1191 return 0;
1192}
1193
1194errno_t
1195sock_inject_data_in(
1196 socket_t so,
1197 const struct sockaddr* from,
1198 mbuf_t data,
1199 mbuf_t control,
1200 sflt_data_flag_t flags)
1201{
1202 int error = 0;
1203 if (so == NULL || data == NULL) return EINVAL;
1204
1205 if (flags & sock_data_filt_flag_oob) {
1206 return ENOTSUP;
1207 }
1208
1209 socket_lock(so, 1);
1210
1211 if (from) {
b0d623f7 1212 if (sbappendaddr(&so->so_rcv, (struct sockaddr*)(uintptr_t)from, data,
91447636
A
1213 control, NULL))
1214 sorwakeup(so);
1215 goto done;
1216 }
1217
1218 if (control) {
1219 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
1220 sorwakeup(so);
1221 goto done;
1222 }
1223
1224 if (flags & sock_data_filt_flag_record) {
1225 if (control || from) {
1226 error = EINVAL;
1227 goto done;
1228 }
1229 if (sbappendrecord(&so->so_rcv, (struct mbuf*)data))
1230 sorwakeup(so);
1231 goto done;
1232 }
1233
1234 if (sbappend(&so->so_rcv, data))
1235 sorwakeup(so);
1236done:
1237 socket_unlock(so, 1);
1238 return error;
1239}
1240
1241errno_t
1242sock_inject_data_out(
1243 socket_t so,
1244 const struct sockaddr* to,
1245 mbuf_t data,
1246 mbuf_t control,
1247 sflt_data_flag_t flags)
1248{
1249 int sosendflags = 0;
1250 if (flags & sock_data_filt_flag_oob) sosendflags = MSG_OOB;
b0d623f7 1251 return sosend(so, (struct sockaddr*)(uintptr_t)to, NULL,
91447636
A
1252 data, control, sosendflags);
1253}
1254
1255sockopt_dir
1256sockopt_direction(
1257 sockopt_t sopt)
1258{
1259 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
1260}
1261
1262int
1263sockopt_level(
1264 sockopt_t sopt)
1265{
1266 return sopt->sopt_level;
1267}
1268
1269int
1270sockopt_name(
1271 sockopt_t sopt)
1272{
1273 return sopt->sopt_name;
1274}
1275
1276size_t
1277sockopt_valsize(
1278 sockopt_t sopt)
1279{
1280 return sopt->sopt_valsize;
1281}
1282
1283errno_t
1284sockopt_copyin(
1285 sockopt_t sopt,
1286 void *data,
1287 size_t len)
1288{
1289 return sooptcopyin(sopt, data, len, len);
1290}
1291
1292errno_t
1293sockopt_copyout(
1294 sockopt_t sopt,
1295 void *data,
1296 size_t len)
1297{
1298 return sooptcopyout(sopt, data, len);
1299}