]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/kpi_socketfilter.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
CommitLineData
91447636 1/*
5ba3f43e 2 * Copyright (c) 2003-2017 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39236c6e 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39236c6e 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39236c6e 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39236c6e 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
29#include <sys/kpi_socketfilter.h>
30
31#include <sys/socket.h>
32#include <sys/param.h>
33#include <sys/errno.h>
34#include <sys/malloc.h>
35#include <sys/protosw.h>
316670eb 36#include <sys/domain.h>
6d2010ae 37#include <sys/proc.h>
91447636 38#include <kern/locks.h>
6d2010ae
A
39#include <kern/thread.h>
40#include <kern/debug.h>
91447636 41#include <net/kext_net.h>
316670eb 42#include <net/if.h>
5ba3f43e 43#include <net/net_api_stats.h>
316670eb
A
44#include <netinet/in_var.h>
45#include <netinet/ip.h>
46#include <netinet/ip_var.h>
47#include <netinet/tcp.h>
48#include <netinet/tcp_var.h>
49#include <netinet/udp.h>
50#include <netinet/udp_var.h>
91447636 51
c910b4d9 52#include <libkern/libkern.h>
6d2010ae 53#include <libkern/OSAtomic.h>
0a7de745 54#include <os/refcnt.h>
c910b4d9 55
5ba3f43e 56#include <stdbool.h>
2d21ac55
A
57#include <string.h>
58
0a7de745
A
59#define SFEF_ATTACHED 0x1 /* SFE is on socket list */
60#define SFEF_NODETACH 0x2 /* Detach should not be called */
61#define SFEF_NOSOCKET 0x4 /* Socket is gone */
6d2010ae 62
c3c9b80d
A
63/*
64 * If you need accounting for KM_IFADDR consider using
65 * KALLOC_HEAP_DEFINE to define a view.
66 */
67#define KM_IFADDR KHEAP_DEFAULT
68
6d2010ae 69struct socket_filter_entry {
0a7de745
A
70 struct socket_filter_entry *sfe_next_onsocket;
71 struct socket_filter_entry *sfe_next_onfilter;
72 struct socket_filter_entry *sfe_next_oncleanup;
39236c6e 73
0a7de745
A
74 struct socket_filter *sfe_filter;
75 struct socket *sfe_socket;
76 void *sfe_cookie;
39236c6e 77
0a7de745
A
78 uint32_t sfe_flags;
79 int32_t sfe_refcount;
6d2010ae
A
80};
81
82struct socket_filter {
0a7de745
A
83 TAILQ_ENTRY(socket_filter) sf_protosw_next;
84 TAILQ_ENTRY(socket_filter) sf_global_next;
85 struct socket_filter_entry *sf_entry_head;
39236c6e 86
0a7de745
A
87 struct protosw *sf_proto;
88 struct sflt_filter sf_filter;
89 struct os_refcnt sf_refcount;
6d2010ae
A
90};
91
92TAILQ_HEAD(socket_filter_list, socket_filter);
93
c3c9b80d
A
94static LCK_GRP_DECLARE(sock_filter_lock_grp, "socket filter lock");
95static LCK_RW_DECLARE(sock_filter_lock, &sock_filter_lock_grp);
96static LCK_MTX_DECLARE(sock_filter_cleanup_lock, &sock_filter_lock_grp);
97
98static struct socket_filter_list sock_filter_head =
99 TAILQ_HEAD_INITIALIZER(sock_filter_head);
0a7de745
A
100static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
101static thread_t sock_filter_cleanup_thread = NULL;
91447636 102
6d2010ae
A
103static void sflt_cleanup_thread(void *, wait_result_t);
104static void sflt_detach_locked(struct socket_filter_entry *entry);
105
5ba3f43e
A
106#undef sflt_register
107static errno_t sflt_register_common(const struct sflt_filter *filter, int domain,
108 int type, int protocol, bool is_internal);
109errno_t sflt_register(const struct sflt_filter *filter, int domain,
110 int type, int protocol);
111
112
6d2010ae 113#pragma mark -- Internal State Management --
3a60a9f5 114
743345f9
A
115__private_extern__ int
116sflt_permission_check(struct inpcb *inp)
117{
cb323159
A
118 /* Only IPv4 or IPv6 sockets can bypass filters */
119 if (!(inp->inp_vflag & INP_IPV4) &&
120 !(inp->inp_vflag & INP_IPV6)) {
0a7de745 121 return 0;
743345f9
A
122 }
123 /* Sockets that have this entitlement bypass socket filters. */
124 if (INP_INTCOPROC_ALLOWED(inp)) {
0a7de745 125 return 1;
743345f9 126 }
cb323159 127 /* Sockets bound to an intcoproc interface bypass socket filters. */
743345f9
A
128 if ((inp->inp_flags & INP_BOUND_IF) &&
129 IFNET_IS_INTCOPROC(inp->inp_boundifp)) {
0a7de745 130 return 1;
743345f9 131 }
cb323159
A
132#if NECP
133 /*
134 * Make sure that the NECP policy is populated.
135 * If result is not populated, the policy ID will be
136 * NECP_KERNEL_POLICY_ID_NONE. Note that if the result
137 * is populated, but there was no match, it will be
138 * NECP_KERNEL_POLICY_ID_NO_MATCH.
139 * Do not call inp_update_necp_policy() to avoid scoping
140 * a socket prior to calls to bind().
141 */
142 if (inp->inp_policyresult.policy_id == NECP_KERNEL_POLICY_ID_NONE) {
143 necp_socket_find_policy_match(inp, NULL, NULL, 0);
144 }
145
146 /* If the filter unit is marked to be "no filter", bypass filters */
147 if (inp->inp_policyresult.results.filter_control_unit ==
148 NECP_FILTER_UNIT_NO_FILTER) {
149 return 1;
150 }
151#endif /* NECP */
0a7de745 152 return 0;
743345f9
A
153}
154
6d2010ae 155static void
0a7de745 156sflt_retain_locked(struct socket_filter *filter)
91447636 157{
0a7de745 158 os_ref_retain_locked(&filter->sf_refcount);
6d2010ae
A
159}
160
161static void
39236c6e 162sflt_release_locked(struct socket_filter *filter)
6d2010ae 163{
0a7de745 164 if (os_ref_release_locked(&filter->sf_refcount) == 0) {
39236c6e 165 /* Call the unregistered function */
6d2010ae 166 if (filter->sf_filter.sf_unregistered) {
c3c9b80d 167 lck_rw_unlock_exclusive(&sock_filter_lock);
39236c6e 168 filter->sf_filter.sf_unregistered(
0a7de745 169 filter->sf_filter.sf_handle);
c3c9b80d 170 lck_rw_lock_exclusive(&sock_filter_lock);
6d2010ae 171 }
39236c6e
A
172
173 /* Free the entry */
c3c9b80d 174 kheap_free(KM_IFADDR, filter, sizeof(struct socket_filter));
6d2010ae
A
175 }
176}
177
178static void
39236c6e 179sflt_entry_retain(struct socket_filter_entry *entry)
6d2010ae 180{
39236c6e 181 if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) {
6d2010ae 182 panic("sflt_entry_retain - sfe_refcount <= 0\n");
39236c6e
A
183 /* NOTREACHED */
184 }
6d2010ae
A
185}
186
187static void
39236c6e 188sflt_entry_release(struct socket_filter_entry *entry)
6d2010ae
A
189{
190 SInt32 old = OSDecrementAtomic(&entry->sfe_refcount);
191 if (old == 1) {
39236c6e
A
192 /* That was the last reference */
193
194 /* Take the cleanup lock */
c3c9b80d 195 lck_mtx_lock(&sock_filter_cleanup_lock);
39236c6e
A
196
197 /* Put this item on the cleanup list */
6d2010ae
A
198 entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
199 sock_filter_cleanup_entries = entry;
39236c6e
A
200
201 /* If the item is the first item in the list */
6d2010ae
A
202 if (entry->sfe_next_oncleanup == NULL) {
203 if (sock_filter_cleanup_thread == NULL) {
39236c6e
A
204 /* Create a thread */
205 kernel_thread_start(sflt_cleanup_thread,
206 NULL, &sock_filter_cleanup_thread);
6d2010ae 207 } else {
39236c6e 208 /* Wakeup the thread */
6d2010ae
A
209 wakeup(&sock_filter_cleanup_entries);
210 }
211 }
39236c6e
A
212
213 /* Drop the cleanup lock */
c3c9b80d 214 lck_mtx_unlock(&sock_filter_cleanup_lock);
39236c6e
A
215 } else if (old <= 0) {
216 panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
217 (int)old);
218 /* NOTREACHED */
6d2010ae
A
219 }
220}
221
39037602 222__attribute__((noreturn))
6d2010ae 223static void
39236c6e 224sflt_cleanup_thread(void *blah, wait_result_t blah2)
6d2010ae 225{
39236c6e 226#pragma unused(blah, blah2)
6d2010ae 227 while (1) {
c3c9b80d 228 lck_mtx_lock(&sock_filter_cleanup_lock);
6d2010ae 229 while (sock_filter_cleanup_entries == NULL) {
39236c6e
A
230 /* Sleep until we've got something better to do */
231 msleep(&sock_filter_cleanup_entries,
c3c9b80d 232 &sock_filter_cleanup_lock, PWAIT,
39236c6e 233 "sflt_cleanup", NULL);
6d2010ae 234 }
39236c6e
A
235
236 /* Pull the current list of dead items */
237 struct socket_filter_entry *dead = sock_filter_cleanup_entries;
6d2010ae 238 sock_filter_cleanup_entries = NULL;
39236c6e
A
239
240 /* Drop the lock */
c3c9b80d 241 lck_mtx_unlock(&sock_filter_cleanup_lock);
39236c6e
A
242
243 /* Take the socket filter lock */
c3c9b80d 244 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e
A
245
246 /* Cleanup every dead item */
0a7de745 247 struct socket_filter_entry *entry;
6d2010ae 248 for (entry = dead; entry; entry = dead) {
0a7de745 249 struct socket_filter_entry **nextpp;
39236c6e 250
6d2010ae 251 dead = entry->sfe_next_oncleanup;
39236c6e
A
252
253 /* Call detach function if necessary - drop the lock */
6d2010ae 254 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
39236c6e 255 entry->sfe_filter->sf_filter.sf_detach) {
6d2010ae 256 entry->sfe_flags |= SFEF_NODETACH;
c3c9b80d 257 lck_rw_unlock_exclusive(&sock_filter_lock);
39236c6e
A
258
259 /*
260 * Warning - passing a potentially
261 * dead socket may be bad
262 */
0a7de745
A
263 entry->sfe_filter->sf_filter.sf_detach(
264 entry->sfe_cookie, entry->sfe_socket);
39236c6e 265
c3c9b80d 266 lck_rw_lock_exclusive(&sock_filter_lock);
6d2010ae 267 }
39236c6e
A
268
269 /*
270 * Pull entry off the socket list --
271 * if the socket still exists
272 */
6d2010ae 273 if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) {
39236c6e
A
274 for (nextpp = &entry->sfe_socket->so_filt;
275 *nextpp;
276 nextpp = &(*nextpp)->sfe_next_onsocket) {
6d2010ae 277 if (*nextpp == entry) {
39236c6e
A
278 *nextpp =
279 entry->sfe_next_onsocket;
6d2010ae
A
280 break;
281 }
282 }
283 }
39236c6e
A
284
285 /* Pull entry off the filter list */
286 for (nextpp = &entry->sfe_filter->sf_entry_head;
287 *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) {
6d2010ae
A
288 if (*nextpp == entry) {
289 *nextpp = entry->sfe_next_onfilter;
290 break;
291 }
292 }
39236c6e
A
293
294 /*
295 * Release the filter -- may drop lock, but that's okay
296 */
6d2010ae
A
297 sflt_release_locked(entry->sfe_filter);
298 entry->sfe_socket = NULL;
299 entry->sfe_filter = NULL;
c3c9b80d 300 kheap_free(KM_IFADDR, entry, sizeof(struct socket_filter_entry));
6d2010ae 301 }
39236c6e
A
302
303 /* Drop the socket filter lock */
c3c9b80d 304 lck_rw_unlock_exclusive(&sock_filter_lock);
6d2010ae 305 }
39236c6e 306 /* NOTREACHED */
6d2010ae
A
307}
308
309static int
39236c6e
A
310sflt_attach_locked(struct socket *so, struct socket_filter *filter,
311 int socklocked)
6d2010ae
A
312{
313 int error = 0;
314 struct socket_filter_entry *entry = NULL;
39236c6e 315
0a7de745
A
316 if (sflt_permission_check(sotoinpcb(so))) {
317 return 0;
318 }
743345f9 319
0a7de745
A
320 if (filter == NULL) {
321 return ENOENT;
322 }
316670eb 323
39236c6e 324 for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) {
316670eb 325 if (entry->sfe_filter->sf_filter.sf_handle ==
0a7de745
A
326 filter->sf_filter.sf_handle) {
327 return EEXIST;
328 }
39236c6e 329 }
316670eb 330 /* allocate the socket filter entry */
c3c9b80d
A
331 entry = kheap_alloc(KM_IFADDR, sizeof(struct socket_filter_entry),
332 Z_WAITOK);
0a7de745
A
333 if (entry == NULL) {
334 return ENOMEM;
335 }
39236c6e 336
316670eb
A
337 /* Initialize the socket filter entry */
338 entry->sfe_cookie = NULL;
339 entry->sfe_flags = SFEF_ATTACHED;
39236c6e
A
340 entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */
341
316670eb
A
342 /* Put the entry in the filter list */
343 sflt_retain_locked(filter);
344 entry->sfe_filter = filter;
345 entry->sfe_next_onfilter = filter->sf_entry_head;
346 filter->sf_entry_head = entry;
39236c6e 347
316670eb
A
348 /* Put the entry on the socket filter list */
349 entry->sfe_socket = so;
350 entry->sfe_next_onsocket = so->so_filt;
351 so->so_filt = entry;
352
353 if (entry->sfe_filter->sf_filter.sf_attach) {
39236c6e 354 /* Retain the entry while we call attach */
316670eb 355 sflt_entry_retain(entry);
39236c6e
A
356
357 /*
358 * Release the filter lock --
359 * callers must be aware we will do this
360 */
c3c9b80d 361 lck_rw_unlock_exclusive(&sock_filter_lock);
39236c6e
A
362
363 /* Unlock the socket */
0a7de745 364 if (socklocked) {
316670eb 365 socket_unlock(so, 0);
0a7de745 366 }
39236c6e
A
367
368 /* It's finally safe to call the filter function */
369 error = entry->sfe_filter->sf_filter.sf_attach(
0a7de745 370 &entry->sfe_cookie, so);
39236c6e
A
371
372 /* Lock the socket again */
0a7de745 373 if (socklocked) {
316670eb 374 socket_lock(so, 0);
0a7de745 375 }
39236c6e
A
376
377 /* Lock the filters again */
c3c9b80d 378 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e
A
379
380 /*
381 * If the attach function returns an error,
382 * this filter must be detached
383 */
316670eb 384 if (error) {
39236c6e
A
385 /* don't call sf_detach */
386 entry->sfe_flags |= SFEF_NODETACH;
316670eb 387 sflt_detach_locked(entry);
91447636 388 }
39236c6e
A
389
390 /* Release the retain we held through the attach call */
316670eb 391 sflt_entry_release(entry);
91447636 392 }
39236c6e 393
0a7de745 394 return error;
91447636
A
395}
396
6d2010ae 397errno_t
39236c6e 398sflt_attach_internal(socket_t socket, sflt_handle handle)
91447636 399{
0a7de745
A
400 if (socket == NULL || handle == 0) {
401 return EINVAL;
402 }
39236c6e 403
6d2010ae 404 int result = EINVAL;
39236c6e 405
c3c9b80d 406 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e 407
6d2010ae
A
408 struct socket_filter *filter = NULL;
409 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
0a7de745
A
410 if (filter->sf_filter.sf_handle == handle) {
411 break;
412 }
6d2010ae 413 }
39236c6e 414
6d2010ae
A
415 if (filter) {
416 result = sflt_attach_locked(socket, filter, 1);
91447636 417 }
39236c6e 418
c3c9b80d 419 lck_rw_unlock_exclusive(&sock_filter_lock);
39236c6e 420
0a7de745 421 return result;
91447636
A
422}
423
6d2010ae 424static void
39236c6e 425sflt_detach_locked(struct socket_filter_entry *entry)
6d2010ae
A
426{
427 if ((entry->sfe_flags & SFEF_ATTACHED) != 0) {
428 entry->sfe_flags &= ~SFEF_ATTACHED;
429 sflt_entry_release(entry);
430 }
431}
432
433#pragma mark -- Socket Layer Hooks --
434
91447636 435__private_extern__ void
39236c6e 436sflt_initsock(struct socket *so)
91447636 437{
39236c6e
A
438 /*
439 * Point to the real protosw, as so_proto might have been
440 * pointed to a modified version.
441 */
442 struct protosw *proto = so->so_proto->pr_protosw;
443
c3c9b80d 444 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 445 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
39236c6e 446 /* Promote lock to exclusive */
c3c9b80d
A
447 if (!lck_rw_lock_shared_to_exclusive(&sock_filter_lock)) {
448 lck_rw_lock_exclusive(&sock_filter_lock);
0a7de745 449 }
39236c6e
A
450
451 /*
452 * Warning: A filter unregistering will be pulled out of
453 * the list. This could happen while we drop the lock in
454 * sftl_attach_locked or sflt_release_locked. For this
455 * reason we retain a reference on the filter (or next_filter)
456 * while calling this function. This protects us from a panic,
457 * but it could result in a socket being created without all
458 * of the global filters if we're attaching a filter as it
459 * is removed, if that's possible.
460 */
461 struct socket_filter *filter =
462 TAILQ_FIRST(&proto->pr_filter_head);
463
6d2010ae 464 sflt_retain_locked(filter);
39236c6e
A
465
466 while (filter) {
6d2010ae 467 struct socket_filter *filter_next;
39236c6e
A
468 /*
469 * Warning: sflt_attach_private_locked
470 * will drop the lock
471 */
6d2010ae 472 sflt_attach_locked(so, filter, 0);
39236c6e 473
6d2010ae 474 filter_next = TAILQ_NEXT(filter, sf_protosw_next);
0a7de745 475 if (filter_next) {
6d2010ae 476 sflt_retain_locked(filter_next);
0a7de745 477 }
39236c6e
A
478
479 /*
480 * Warning: filt_release_locked may remove
481 * the filter from the queue
482 */
6d2010ae
A
483 sflt_release_locked(filter);
484 filter = filter_next;
485 }
486 }
c3c9b80d 487 lck_rw_done(&sock_filter_lock);
91447636
A
488}
489
6d2010ae
A
490/*
491 * sflt_termsock
492 *
493 * Detaches all filters from the socket.
494 */
91447636 495__private_extern__ void
39236c6e 496sflt_termsock(struct socket *so)
91447636 497{
c3c9b80d 498 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e 499
6d2010ae 500 struct socket_filter_entry *entry;
39236c6e 501
6d2010ae 502 while ((entry = so->so_filt) != NULL) {
39236c6e 503 /* Pull filter off the socket */
6d2010ae
A
504 so->so_filt = entry->sfe_next_onsocket;
505 entry->sfe_flags |= SFEF_NOSOCKET;
39236c6e
A
506
507 /* Call detach */
6d2010ae 508 sflt_detach_locked(entry);
39236c6e
A
509
510 /*
511 * On sflt_termsock, we can't return until the detach function
512 * has been called. Call the detach function - this is gross
513 * because the socket filter entry could be freed when we drop
514 * the lock, so we make copies on the stack and retain
515 * everything we need before dropping the lock.
516 */
6d2010ae 517 if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
39236c6e
A
518 entry->sfe_filter->sf_filter.sf_detach) {
519 void *sfe_cookie = entry->sfe_cookie;
520 struct socket_filter *sfe_filter = entry->sfe_filter;
521
522 /* Retain the socket filter */
6d2010ae 523 sflt_retain_locked(sfe_filter);
39236c6e
A
524
525 /* Mark that we've called the detach function */
6d2010ae 526 entry->sfe_flags |= SFEF_NODETACH;
39236c6e
A
527
528 /* Drop the lock before calling the detach function */
c3c9b80d 529 lck_rw_unlock_exclusive(&sock_filter_lock);
6d2010ae 530 sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
c3c9b80d 531 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e
A
532
533 /* Release the filter */
6d2010ae 534 sflt_release_locked(sfe_filter);
91447636
A
535 }
536 }
39236c6e 537
c3c9b80d 538 lck_rw_unlock_exclusive(&sock_filter_lock);
91447636
A
539}
540
316670eb
A
541
542static void
39236c6e
A
543sflt_notify_internal(struct socket *so, sflt_event_t event, void *param,
544 sflt_handle handle)
91447636 545{
0a7de745 546 if (so->so_filt == NULL) {
39236c6e 547 return;
0a7de745 548 }
39236c6e
A
549
550 struct socket_filter_entry *entry;
551 int unlocked = 0;
552
c3c9b80d 553 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 554 for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
39236c6e
A
555 if ((entry->sfe_flags & SFEF_ATTACHED) &&
556 entry->sfe_filter->sf_filter.sf_notify &&
557 ((handle && entry->sfe_filter->sf_filter.sf_handle !=
558 handle) || !handle)) {
559 /*
560 * Retain the filter entry and release
561 * the socket filter lock
562 */
6d2010ae 563 sflt_entry_retain(entry);
c3c9b80d 564 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
565
566 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
567 if (unlocked == 0) {
568 unlocked = 1;
91447636
A
569 socket_unlock(so, 0);
570 }
39236c6e
A
571
572 /* Finally call the filter */
573 entry->sfe_filter->sf_filter.sf_notify(
0a7de745 574 entry->sfe_cookie, so, event, param);
39236c6e
A
575
576 /*
577 * Take the socket filter lock again
578 * and release the entry
579 */
c3c9b80d 580 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 581 sflt_entry_release(entry);
91447636
A
582 }
583 }
c3c9b80d 584 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e 585
6d2010ae 586 if (unlocked != 0) {
91447636 587 socket_lock(so, 0);
91447636
A
588 }
589}
590
316670eb 591__private_extern__ void
0a7de745 592sflt_notify(struct socket *so, sflt_event_t event, void *param)
316670eb
A
593{
594 sflt_notify_internal(so, event, param, 0);
595}
596
597static void
39236c6e
A
598sflt_notify_after_register(struct socket *so, sflt_event_t event,
599 sflt_handle handle)
316670eb
A
600{
601 sflt_notify_internal(so, event, NULL, handle);
602}
603
91447636 604__private_extern__ int
39236c6e 605sflt_ioctl(struct socket *so, u_long cmd, caddr_t data)
91447636 606{
0a7de745
A
607 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
608 return 0;
609 }
39236c6e
A
610
611 struct socket_filter_entry *entry;
612 int unlocked = 0;
613 int error = 0;
614
c3c9b80d 615 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 616 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
617 entry = entry->sfe_next_onsocket) {
618 if ((entry->sfe_flags & SFEF_ATTACHED) &&
619 entry->sfe_filter->sf_filter.sf_ioctl) {
620 /*
621 * Retain the filter entry and release
622 * the socket filter lock
623 */
6d2010ae 624 sflt_entry_retain(entry);
c3c9b80d 625 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
626
627 /* If the socket isn't already unlocked, unlock it */
6d2010ae 628 if (unlocked == 0) {
91447636 629 socket_unlock(so, 0);
6d2010ae 630 unlocked = 1;
91447636 631 }
39236c6e
A
632
633 /* Call the filter */
634 error = entry->sfe_filter->sf_filter.sf_ioctl(
0a7de745 635 entry->sfe_cookie, so, cmd, data);
39236c6e
A
636
637 /*
638 * Take the socket filter lock again
639 * and release the entry
640 */
c3c9b80d 641 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 642 sflt_entry_release(entry);
91447636
A
643 }
644 }
c3c9b80d 645 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
646
647 if (unlocked) {
91447636 648 socket_lock(so, 0);
91447636 649 }
39236c6e 650
0a7de745 651 return error;
91447636
A
652}
653
91447636 654__private_extern__ int
39236c6e 655sflt_bind(struct socket *so, const struct sockaddr *nam)
91447636 656{
0a7de745
A
657 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
658 return 0;
659 }
39236c6e
A
660
661 struct socket_filter_entry *entry;
662 int unlocked = 0;
663 int error = 0;
664
c3c9b80d 665 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 666 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
667 entry = entry->sfe_next_onsocket) {
668 if ((entry->sfe_flags & SFEF_ATTACHED) &&
669 entry->sfe_filter->sf_filter.sf_bind) {
670 /*
671 * Retain the filter entry and
672 * release the socket filter lock
673 */
6d2010ae 674 sflt_entry_retain(entry);
c3c9b80d 675 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
676
677 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
678 if (unlocked == 0) {
679 socket_unlock(so, 0);
680 unlocked = 1;
681 }
39236c6e
A
682
683 /* Call the filter */
684 error = entry->sfe_filter->sf_filter.sf_bind(
0a7de745 685 entry->sfe_cookie, so, nam);
39236c6e
A
686
687 /*
688 * Take the socket filter lock again and
689 * release the entry
690 */
c3c9b80d 691 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 692 sflt_entry_release(entry);
91447636
A
693 }
694 }
c3c9b80d 695 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
696
697 if (unlocked) {
698 socket_lock(so, 0);
699 }
39236c6e 700
0a7de745 701 return error;
6d2010ae
A
702}
703
704__private_extern__ int
39236c6e 705sflt_listen(struct socket *so)
6d2010ae 706{
0a7de745
A
707 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
708 return 0;
709 }
39236c6e
A
710
711 struct socket_filter_entry *entry;
712 int unlocked = 0;
713 int error = 0;
714
c3c9b80d 715 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 716 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
717 entry = entry->sfe_next_onsocket) {
718 if ((entry->sfe_flags & SFEF_ATTACHED) &&
719 entry->sfe_filter->sf_filter.sf_listen) {
720 /*
721 * Retain the filter entry and release
722 * the socket filter lock
723 */
6d2010ae 724 sflt_entry_retain(entry);
c3c9b80d 725 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
726
727 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
728 if (unlocked == 0) {
729 socket_unlock(so, 0);
730 unlocked = 1;
731 }
39236c6e
A
732
733 /* Call the filter */
734 error = entry->sfe_filter->sf_filter.sf_listen(
0a7de745 735 entry->sfe_cookie, so);
39236c6e
A
736
737 /*
738 * Take the socket filter lock again
739 * and release the entry
740 */
c3c9b80d 741 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 742 sflt_entry_release(entry);
91447636
A
743 }
744 }
c3c9b80d 745 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
746
747 if (unlocked) {
748 socket_lock(so, 0);
749 }
39236c6e 750
0a7de745 751 return error;
6d2010ae
A
752}
753
754__private_extern__ int
39236c6e
A
755sflt_accept(struct socket *head, struct socket *so,
756 const struct sockaddr *local, const struct sockaddr *remote)
6d2010ae 757{
0a7de745
A
758 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
759 return 0;
760 }
39236c6e
A
761
762 struct socket_filter_entry *entry;
763 int unlocked = 0;
764 int error = 0;
765
c3c9b80d 766 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 767 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
768 entry = entry->sfe_next_onsocket) {
769 if ((entry->sfe_flags & SFEF_ATTACHED) &&
770 entry->sfe_filter->sf_filter.sf_accept) {
771 /*
772 * Retain the filter entry and
773 * release the socket filter lock
774 */
6d2010ae 775 sflt_entry_retain(entry);
c3c9b80d 776 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
777
778 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
779 if (unlocked == 0) {
780 socket_unlock(so, 0);
781 unlocked = 1;
91447636 782 }
39236c6e
A
783
784 /* Call the filter */
785 error = entry->sfe_filter->sf_filter.sf_accept(
0a7de745 786 entry->sfe_cookie, head, so, local, remote);
39236c6e
A
787
788 /*
789 * Take the socket filter lock again
790 * and release the entry
791 */
c3c9b80d 792 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 793 sflt_entry_release(entry);
91447636
A
794 }
795 }
c3c9b80d 796 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
797
798 if (unlocked) {
799 socket_lock(so, 0);
91447636 800 }
39236c6e 801
0a7de745 802 return error;
6d2010ae
A
803}
804
805__private_extern__ int
39236c6e 806sflt_getsockname(struct socket *so, struct sockaddr **local)
6d2010ae 807{
0a7de745
A
808 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
809 return 0;
810 }
39236c6e
A
811
812 struct socket_filter_entry *entry;
813 int unlocked = 0;
814 int error = 0;
815
c3c9b80d 816 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 817 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
818 entry = entry->sfe_next_onsocket) {
819 if ((entry->sfe_flags & SFEF_ATTACHED) &&
820 entry->sfe_filter->sf_filter.sf_getsockname) {
821 /*
822 * Retain the filter entry and
823 * release the socket filter lock
824 */
6d2010ae 825 sflt_entry_retain(entry);
c3c9b80d 826 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
827
828 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
829 if (unlocked == 0) {
830 socket_unlock(so, 0);
831 unlocked = 1;
832 }
39236c6e
A
833
834 /* Call the filter */
835 error = entry->sfe_filter->sf_filter.sf_getsockname(
0a7de745 836 entry->sfe_cookie, so, local);
39236c6e
A
837
838 /*
839 * Take the socket filter lock again
840 * and release the entry
841 */
c3c9b80d 842 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae
A
843 sflt_entry_release(entry);
844 }
845 }
c3c9b80d 846 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
847
848 if (unlocked) {
849 socket_lock(so, 0);
91447636 850 }
39236c6e 851
0a7de745 852 return error;
91447636
A
853}
854
6d2010ae 855__private_extern__ int
39236c6e 856sflt_getpeername(struct socket *so, struct sockaddr **remote)
6d2010ae 857{
0a7de745
A
858 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
859 return 0;
860 }
39236c6e
A
861
862 struct socket_filter_entry *entry;
863 int unlocked = 0;
864 int error = 0;
865
c3c9b80d 866 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 867 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
868 entry = entry->sfe_next_onsocket) {
869 if ((entry->sfe_flags & SFEF_ATTACHED) &&
870 entry->sfe_filter->sf_filter.sf_getpeername) {
871 /*
872 * Retain the filter entry and release
873 * the socket filter lock
874 */
6d2010ae 875 sflt_entry_retain(entry);
c3c9b80d 876 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
877
878 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
879 if (unlocked == 0) {
880 socket_unlock(so, 0);
881 unlocked = 1;
882 }
39236c6e
A
883
884 /* Call the filter */
885 error = entry->sfe_filter->sf_filter.sf_getpeername(
0a7de745 886 entry->sfe_cookie, so, remote);
39236c6e
A
887
888 /*
889 * Take the socket filter lock again
890 * and release the entry
891 */
c3c9b80d 892 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae
A
893 sflt_entry_release(entry);
894 }
895 }
c3c9b80d 896 lck_rw_unlock_shared(&sock_filter_lock);
91447636 897
6d2010ae
A
898 if (unlocked) {
899 socket_lock(so, 0);
900 }
39236c6e 901
0a7de745 902 return error;
6d2010ae 903}
91447636 904
6d2010ae 905__private_extern__ int
0a7de745 906sflt_connectin(struct socket *so, const struct sockaddr *remote)
91447636 907{
0a7de745
A
908 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
909 return 0;
910 }
39236c6e
A
911
912 struct socket_filter_entry *entry;
913 int unlocked = 0;
914 int error = 0;
915
c3c9b80d 916 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 917 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
918 entry = entry->sfe_next_onsocket) {
919 if ((entry->sfe_flags & SFEF_ATTACHED) &&
920 entry->sfe_filter->sf_filter.sf_connect_in) {
921 /*
922 * Retain the filter entry and release
923 * the socket filter lock
924 */
6d2010ae 925 sflt_entry_retain(entry);
c3c9b80d 926 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
927
928 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
929 if (unlocked == 0) {
930 socket_unlock(so, 0);
931 unlocked = 1;
4a3eedf9 932 }
39236c6e
A
933
934 /* Call the filter */
935 error = entry->sfe_filter->sf_filter.sf_connect_in(
0a7de745 936 entry->sfe_cookie, so, remote);
39236c6e
A
937
938 /*
939 * Take the socket filter lock again
940 * and release the entry
941 */
c3c9b80d 942 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 943 sflt_entry_release(entry);
3a60a9f5 944 }
6d2010ae 945 }
c3c9b80d 946 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
947
948 if (unlocked) {
949 socket_lock(so, 0);
950 }
39236c6e 951
0a7de745 952 return error;
6d2010ae
A
953}
954
490019cf
A
955static int
956sflt_connectout_common(struct socket *so, const struct sockaddr *nam)
6d2010ae 957{
39236c6e
A
958 struct socket_filter_entry *entry;
959 int unlocked = 0;
960 int error = 0;
961
c3c9b80d 962 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 963 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
964 entry = entry->sfe_next_onsocket) {
965 if ((entry->sfe_flags & SFEF_ATTACHED) &&
966 entry->sfe_filter->sf_filter.sf_connect_out) {
967 /*
968 * Retain the filter entry and release
969 * the socket filter lock
970 */
6d2010ae 971 sflt_entry_retain(entry);
c3c9b80d 972 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
973
974 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
975 if (unlocked == 0) {
976 socket_unlock(so, 0);
977 unlocked = 1;
91447636 978 }
39236c6e
A
979
980 /* Call the filter */
981 error = entry->sfe_filter->sf_filter.sf_connect_out(
0a7de745 982 entry->sfe_cookie, so, nam);
39236c6e
A
983
984 /*
985 * Take the socket filter lock again
986 * and release the entry
987 */
c3c9b80d 988 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 989 sflt_entry_release(entry);
91447636 990 }
91447636 991 }
c3c9b80d 992 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
993
994 if (unlocked) {
995 socket_lock(so, 0);
996 }
39236c6e 997
0a7de745 998 return error;
39236c6e
A
999}
1000
1001__private_extern__ int
490019cf 1002sflt_connectout(struct socket *so, const struct sockaddr *nam)
39236c6e
A
1003{
1004 char buf[SOCK_MAXADDRLEN];
490019cf
A
1005 struct sockaddr *sa;
1006 int error;
1007
0a7de745
A
1008 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1009 return 0;
1010 }
490019cf
A
1011
1012 /*
1013 * Workaround for rdar://23362120
1014 * Always pass a buffer that can hold an IPv6 socket address
1015 */
0a7de745 1016 bzero(buf, sizeof(buf));
490019cf
A
1017 bcopy(nam, buf, nam->sa_len);
1018 sa = (struct sockaddr *)buf;
1019
1020 error = sflt_connectout_common(so, sa);
0a7de745
A
1021 if (error != 0) {
1022 return error;
1023 }
490019cf 1024
743345f9 1025 /*
490019cf
A
1026 * If the address was modified, copy it back
1027 */
1028 if (bcmp(sa, nam, nam->sa_len) != 0) {
1029 bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len);
1030 }
1031
0a7de745 1032 return 0;
490019cf
A
1033}
1034
6d2010ae 1035__private_extern__ int
39236c6e 1036sflt_setsockopt(struct socket *so, struct sockopt *sopt)
6d2010ae 1037{
0a7de745
A
1038 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1039 return 0;
1040 }
39236c6e
A
1041
1042 struct socket_filter_entry *entry;
1043 int unlocked = 0;
1044 int error = 0;
1045
c3c9b80d 1046 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 1047 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
1048 entry = entry->sfe_next_onsocket) {
1049 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1050 entry->sfe_filter->sf_filter.sf_setoption) {
1051 /*
1052 * Retain the filter entry and release
1053 * the socket filter lock
1054 */
6d2010ae 1055 sflt_entry_retain(entry);
c3c9b80d 1056 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
1057
1058 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
1059 if (unlocked == 0) {
1060 socket_unlock(so, 0);
1061 unlocked = 1;
1062 }
39236c6e
A
1063
1064 /* Call the filter */
1065 error = entry->sfe_filter->sf_filter.sf_setoption(
0a7de745 1066 entry->sfe_cookie, so, sopt);
39236c6e
A
1067
1068 /*
1069 * Take the socket filter lock again
1070 * and release the entry
1071 */
c3c9b80d 1072 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 1073 sflt_entry_release(entry);
c910b4d9 1074 }
6d2010ae 1075 }
c3c9b80d 1076 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
1077
1078 if (unlocked) {
1079 socket_lock(so, 0);
1080 }
39236c6e 1081
0a7de745 1082 return error;
6d2010ae
A
1083}
1084
1085__private_extern__ int
39236c6e 1086sflt_getsockopt(struct socket *so, struct sockopt *sopt)
6d2010ae 1087{
0a7de745
A
1088 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1089 return 0;
1090 }
39236c6e
A
1091
1092 struct socket_filter_entry *entry;
1093 int unlocked = 0;
1094 int error = 0;
1095
c3c9b80d 1096 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 1097 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
1098 entry = entry->sfe_next_onsocket) {
1099 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1100 entry->sfe_filter->sf_filter.sf_getoption) {
1101 /*
1102 * Retain the filter entry and release
1103 * the socket filter lock
1104 */
6d2010ae 1105 sflt_entry_retain(entry);
c3c9b80d 1106 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
1107
1108 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
1109 if (unlocked == 0) {
1110 socket_unlock(so, 0);
1111 unlocked = 1;
1112 }
39236c6e
A
1113
1114 /* Call the filter */
1115 error = entry->sfe_filter->sf_filter.sf_getoption(
0a7de745 1116 entry->sfe_cookie, so, sopt);
39236c6e
A
1117
1118 /*
1119 * Take the socket filter lock again
1120 * and release the entry
1121 */
c3c9b80d 1122 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 1123 sflt_entry_release(entry);
91447636
A
1124 }
1125 }
c3c9b80d 1126 lck_rw_unlock_shared(&sock_filter_lock);
6d2010ae
A
1127
1128 if (unlocked) {
1129 socket_lock(so, 0);
1130 }
39236c6e 1131
0a7de745 1132 return error;
6d2010ae
A
1133}
1134
1135__private_extern__ int
39236c6e
A
1136sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data,
1137 mbuf_t *control, sflt_data_flag_t flags)
6d2010ae 1138{
0a7de745
A
1139 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1140 return 0;
1141 }
39236c6e
A
1142
1143 struct socket_filter_entry *entry;
1144 int unlocked = 0;
1145 int setsendthread = 0;
1146 int error = 0;
1147
c3c9b80d 1148 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae 1149 for (entry = so->so_filt; entry && error == 0;
39236c6e
A
1150 entry = entry->sfe_next_onsocket) {
1151 /* skip if this is a subflow socket */
0a7de745 1152 if (so->so_flags & SOF_MP_SUBFLOW) {
39236c6e 1153 continue;
0a7de745 1154 }
39236c6e
A
1155 if ((entry->sfe_flags & SFEF_ATTACHED) &&
1156 entry->sfe_filter->sf_filter.sf_data_out) {
1157 /*
1158 * Retain the filter entry and
1159 * release the socket filter lock
1160 */
6d2010ae 1161 sflt_entry_retain(entry);
c3c9b80d 1162 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
1163
1164 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
1165 if (unlocked == 0) {
1166 if (so->so_send_filt_thread == NULL) {
1167 setsendthread = 1;
39236c6e
A
1168 so->so_send_filt_thread =
1169 current_thread();
6d2010ae
A
1170 }
1171 socket_unlock(so, 0);
1172 unlocked = 1;
1173 }
39236c6e
A
1174
1175 /* Call the filter */
1176 error = entry->sfe_filter->sf_filter.sf_data_out(
0a7de745 1177 entry->sfe_cookie, so, to, data, control, flags);
39236c6e
A
1178
1179 /*
1180 * Take the socket filter lock again
1181 * and release the entry
1182 */
c3c9b80d 1183 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae
A
1184 sflt_entry_release(entry);
1185 }
91447636 1186 }
c3c9b80d 1187 lck_rw_unlock_shared(&sock_filter_lock);
3a60a9f5 1188
6d2010ae
A
1189 if (unlocked) {
1190 socket_lock(so, 0);
0a7de745 1191 if (setsendthread) {
39236c6e 1192 so->so_send_filt_thread = NULL;
0a7de745 1193 }
6d2010ae 1194 }
39236c6e 1195
0a7de745 1196 return error;
6d2010ae 1197}
3a60a9f5 1198
6d2010ae 1199__private_extern__ int
39236c6e
A
1200sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data,
1201 mbuf_t *control, sflt_data_flag_t flags)
6d2010ae 1202{
0a7de745
A
1203 if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) {
1204 return 0;
1205 }
39236c6e
A
1206
1207 struct socket_filter_entry *entry;
1208 int error = 0;
1209 int unlocked = 0;
1210
c3c9b80d 1211 lck_rw_lock_shared(&sock_filter_lock);
39236c6e 1212
6d2010ae 1213 for (entry = so->so_filt; entry && (error == 0);
39236c6e
A
1214 entry = entry->sfe_next_onsocket) {
1215 /* skip if this is a subflow socket */
0a7de745 1216 if (so->so_flags & SOF_MP_SUBFLOW) {
39236c6e 1217 continue;
0a7de745 1218 }
6d2010ae 1219 if ((entry->sfe_flags & SFEF_ATTACHED) &&
39236c6e
A
1220 entry->sfe_filter->sf_filter.sf_data_in) {
1221 /*
1222 * Retain the filter entry and
1223 * release the socket filter lock
1224 */
6d2010ae 1225 sflt_entry_retain(entry);
c3c9b80d 1226 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e
A
1227
1228 /* If the socket isn't already unlocked, unlock it */
6d2010ae
A
1229 if (unlocked == 0) {
1230 unlocked = 1;
1231 socket_unlock(so, 0);
1232 }
39236c6e
A
1233
1234 /* Call the filter */
6d2010ae 1235 error = entry->sfe_filter->sf_filter.sf_data_in(
0a7de745 1236 entry->sfe_cookie, so, from, data, control, flags);
39236c6e
A
1237
1238 /*
1239 * Take the socket filter lock again
1240 * and release the entry
1241 */
c3c9b80d 1242 lck_rw_lock_shared(&sock_filter_lock);
6d2010ae
A
1243 sflt_entry_release(entry);
1244 }
1245 }
c3c9b80d 1246 lck_rw_unlock_shared(&sock_filter_lock);
39236c6e 1247
6d2010ae
A
1248 if (unlocked) {
1249 socket_lock(so, 0);
1250 }
39236c6e 1251
0a7de745 1252 return error;
91447636
A
1253}
1254
6d2010ae
A
1255#pragma mark -- KPI --
1256
91447636 1257errno_t
39236c6e 1258sflt_attach(socket_t socket, sflt_handle handle)
91447636 1259{
6d2010ae
A
1260 socket_lock(socket, 1);
1261 errno_t result = sflt_attach_internal(socket, handle);
1262 socket_unlock(socket, 1);
0a7de745 1263 return result;
91447636
A
1264}
1265
1266errno_t
39236c6e 1267sflt_detach(socket_t socket, sflt_handle handle)
91447636 1268{
39236c6e 1269 struct socket_filter_entry *entry;
0a7de745 1270 errno_t result = 0;
39236c6e 1271
0a7de745
A
1272 if (socket == NULL || handle == 0) {
1273 return EINVAL;
1274 }
39236c6e 1275
c3c9b80d 1276 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e 1277 for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
6d2010ae 1278 if (entry->sfe_filter->sf_filter.sf_handle == handle &&
39236c6e 1279 (entry->sfe_flags & SFEF_ATTACHED) != 0) {
91447636 1280 break;
6d2010ae 1281 }
91447636 1282 }
39236c6e 1283
6d2010ae
A
1284 if (entry != NULL) {
1285 sflt_detach_locked(entry);
91447636 1286 }
c3c9b80d 1287 lck_rw_unlock_exclusive(&sock_filter_lock);
39236c6e 1288
0a7de745 1289 return result;
91447636
A
1290}
1291
316670eb
A
1292struct solist {
1293 struct solist *next;
1294 struct socket *so;
1295};
1296
5ba3f43e
A
1297static errno_t
1298sflt_register_common(const struct sflt_filter *filter, int domain, int type,
0a7de745 1299 int protocol, bool is_internal)
91447636
A
1300{
1301 struct socket_filter *sock_filt = NULL;
1302 struct socket_filter *match = NULL;
1303 int error = 0;
3e170ce0 1304 struct protosw *pr;
2d21ac55 1305 unsigned int len;
316670eb
A
1306 struct socket *so;
1307 struct inpcb *inp;
1308 struct solist *solisthead = NULL, *solist = NULL;
2d21ac55 1309
0a7de745
A
1310 if ((domain != PF_INET) && (domain != PF_INET6)) {
1311 return ENOTSUP;
1312 }
3e170ce0
A
1313
1314 pr = pffindproto(domain, protocol, type);
0a7de745
A
1315 if (pr == NULL) {
1316 return ENOENT;
1317 }
2d21ac55
A
1318
1319 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
0a7de745
A
1320 filter->sf_handle == 0 || filter->sf_name == NULL) {
1321 return EINVAL;
1322 }
91447636
A
1323
1324 /* Allocate the socket filter */
c3c9b80d
A
1325 sock_filt = kheap_alloc(KM_IFADDR,
1326 sizeof(struct socket_filter), Z_WAITOK | Z_ZERO);
91447636 1327 if (sock_filt == NULL) {
0a7de745 1328 return ENOBUFS;
91447636 1329 }
2d21ac55 1330
2d21ac55 1331 /* Legacy sflt_filter length; current structure minus extended */
0a7de745 1332 len = sizeof(*filter) - sizeof(struct sflt_filter_ext);
2d21ac55
A
1333 /*
1334 * Include extended fields if filter defines SFLT_EXTENDED.
1335 * We've zeroed out our internal sflt_filter placeholder,
1336 * so any unused portion would have been taken care of.
1337 */
1338 if (filter->sf_flags & SFLT_EXTENDED) {
1339 unsigned int ext_len = filter->sf_len;
1340
0a7de745
A
1341 if (ext_len > sizeof(struct sflt_filter_ext)) {
1342 ext_len = sizeof(struct sflt_filter_ext);
1343 }
2d21ac55
A
1344
1345 len += ext_len;
1346 }
1347 bcopy(filter, &sock_filt->sf_filter, len);
1348
c3c9b80d 1349 lck_rw_lock_exclusive(&sock_filter_lock);
91447636
A
1350 /* Look for an existing entry */
1351 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
2d21ac55
A
1352 if (match->sf_filter.sf_handle ==
1353 sock_filt->sf_filter.sf_handle) {
91447636
A
1354 break;
1355 }
1356 }
39236c6e 1357
91447636
A
1358 /* Add the entry only if there was no existing entry */
1359 if (match == NULL) {
1360 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
1361 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
2d21ac55
A
1362 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
1363 sf_protosw_next);
91447636
A
1364 sock_filt->sf_proto = pr;
1365 }
0a7de745 1366 os_ref_init(&sock_filt->sf_refcount, NULL);
5ba3f43e
A
1367
1368 OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count);
1369 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total);
1370 if (is_internal) {
1371 INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total);
1372 }
91447636 1373 }
c3c9b80d 1374 lck_rw_unlock_exclusive(&sock_filter_lock);
316670eb 1375
91447636 1376 if (match != NULL) {
c3c9b80d 1377 kheap_free(KM_IFADDR, sock_filt, sizeof(struct socket_filter));
0a7de745 1378 return EEXIST;
91447636 1379 }
2d21ac55 1380
0a7de745
A
1381 if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) {
1382 return error;
1383 }
316670eb
A
1384
1385 /*
1386 * Setup the filter on the TCP and UDP sockets already created.
1387 */
0a7de745
A
1388#define SOLIST_ADD(_so) do { \
1389 solist->next = solisthead; \
1390 sock_retain((_so)); \
1391 solist->so = (_so); \
1392 solisthead = solist; \
316670eb
A
1393} while (0)
1394 if (protocol == IPPROTO_TCP) {
39236c6e
A
1395 lck_rw_lock_shared(tcbinfo.ipi_lock);
1396 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) {
316670eb 1397 so = inp->inp_socket;
39236c6e
A
1398 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1399 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1400 (so->so_state & SS_NOFDREF)) ||
1401 !SOCK_CHECK_DOM(so, domain) ||
0a7de745 1402 !SOCK_CHECK_TYPE(so, type)) {
316670eb 1403 continue;
0a7de745 1404 }
c3c9b80d 1405 solist = kheap_alloc(KHEAP_TEMP, sizeof(struct solist), Z_NOWAIT);
0a7de745 1406 if (!solist) {
316670eb 1407 continue;
0a7de745 1408 }
316670eb
A
1409 SOLIST_ADD(so);
1410 }
39236c6e 1411 lck_rw_done(tcbinfo.ipi_lock);
316670eb 1412 } else if (protocol == IPPROTO_UDP) {
39236c6e
A
1413 lck_rw_lock_shared(udbinfo.ipi_lock);
1414 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) {
316670eb 1415 so = inp->inp_socket;
39236c6e
A
1416 if (so == NULL || (so->so_state & SS_DEFUNCT) ||
1417 (!(so->so_flags & SOF_MP_SUBFLOW) &&
1418 (so->so_state & SS_NOFDREF)) ||
1419 !SOCK_CHECK_DOM(so, domain) ||
0a7de745 1420 !SOCK_CHECK_TYPE(so, type)) {
316670eb 1421 continue;
0a7de745 1422 }
c3c9b80d 1423 solist = kheap_alloc(KHEAP_TEMP, sizeof(struct solist), Z_NOWAIT);
0a7de745 1424 if (!solist) {
316670eb 1425 continue;
0a7de745 1426 }
316670eb
A
1427 SOLIST_ADD(so);
1428 }
39236c6e 1429 lck_rw_done(udbinfo.ipi_lock);
316670eb
A
1430 }
1431 /* XXX it's possible to walk the raw socket list as well */
1432#undef SOLIST_ADD
1433
1434 while (solisthead) {
1435 sflt_handle handle = filter->sf_handle;
1436
1437 so = solisthead->so;
fe8ab488 1438 socket_lock(so, 0);
316670eb 1439 sflt_initsock(so);
0a7de745 1440 if (so->so_state & SS_ISCONNECTING) {
316670eb
A
1441 sflt_notify_after_register(so, sock_evt_connecting,
1442 handle);
0a7de745 1443 } else if (so->so_state & SS_ISCONNECTED) {
316670eb
A
1444 sflt_notify_after_register(so, sock_evt_connected,
1445 handle);
0a7de745
A
1446 } else if ((so->so_state &
1447 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) ==
1448 (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) {
316670eb
A
1449 sflt_notify_after_register(so, sock_evt_disconnecting,
1450 handle);
0a7de745
A
1451 } else if ((so->so_state &
1452 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) ==
1453 (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) {
316670eb
A
1454 sflt_notify_after_register(so, sock_evt_disconnected,
1455 handle);
0a7de745 1456 } else if (so->so_state & SS_CANTSENDMORE) {
316670eb
A
1457 sflt_notify_after_register(so, sock_evt_cantsendmore,
1458 handle);
0a7de745 1459 } else if (so->so_state & SS_CANTRCVMORE) {
316670eb
A
1460 sflt_notify_after_register(so, sock_evt_cantrecvmore,
1461 handle);
0a7de745 1462 }
fe8ab488 1463 socket_unlock(so, 0);
316670eb
A
1464 /* XXX no easy way to post the sock_evt_closing event */
1465 sock_release(so);
1466 solist = solisthead;
1467 solisthead = solisthead->next;
c3c9b80d 1468 kheap_free(KHEAP_TEMP, solist, sizeof(struct solist));
316670eb
A
1469 }
1470
0a7de745 1471 return error;
91447636
A
1472}
1473
5ba3f43e
A
1474errno_t
1475sflt_register_internal(const struct sflt_filter *filter, int domain, int type,
0a7de745 1476 int protocol)
5ba3f43e 1477{
0a7de745 1478 return sflt_register_common(filter, domain, type, protocol, true);
5ba3f43e
A
1479}
1480
1481errno_t
1482sflt_register(const struct sflt_filter *filter, int domain, int type,
0a7de745 1483 int protocol)
5ba3f43e 1484{
0a7de745 1485 return sflt_register_common(filter, domain, type, protocol, false);
5ba3f43e
A
1486}
1487
91447636 1488errno_t
39236c6e 1489sflt_unregister(sflt_handle handle)
91447636
A
1490{
1491 struct socket_filter *filter;
c3c9b80d 1492 lck_rw_lock_exclusive(&sock_filter_lock);
39236c6e 1493
6d2010ae 1494 /* Find the entry by the handle */
91447636 1495 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
0a7de745 1496 if (filter->sf_filter.sf_handle == handle) {
91447636 1497 break;
0a7de745 1498 }
91447636 1499 }
39236c6e 1500
91447636 1501 if (filter) {
5ba3f43e
A
1502 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0);
1503
39236c6e 1504 /* Remove it from the global list */
91447636 1505 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
39236c6e
A
1506
1507 /* Remove it from the protosw list */
91447636 1508 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
39236c6e
A
1509 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head,
1510 filter, sf_protosw_next);
91447636 1511 }
39236c6e
A
1512
1513 /* Detach from any sockets */
6d2010ae 1514 struct socket_filter_entry *entry = NULL;
39236c6e
A
1515
1516 for (entry = filter->sf_entry_head; entry;
1517 entry = entry->sfe_next_onfilter) {
6d2010ae 1518 sflt_detach_locked(entry);
3a60a9f5 1519 }
39236c6e
A
1520
1521 /* Release the filter */
6d2010ae 1522 sflt_release_locked(filter);
91447636 1523 }
39236c6e 1524
c3c9b80d 1525 lck_rw_unlock_exclusive(&sock_filter_lock);
39236c6e 1526
0a7de745
A
1527 if (filter == NULL) {
1528 return ENOENT;
1529 }
39236c6e 1530
0a7de745 1531 return 0;
91447636
A
1532}
1533
1534errno_t
39236c6e
A
1535sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data,
1536 mbuf_t control, sflt_data_flag_t flags)
91447636
A
1537{
1538 int error = 0;
39236c6e 1539
0a7de745
A
1540 if (so == NULL || data == NULL) {
1541 return EINVAL;
1542 }
39236c6e 1543
91447636 1544 if (flags & sock_data_filt_flag_oob) {
0a7de745 1545 return ENOTSUP;
91447636 1546 }
39236c6e 1547
91447636 1548 socket_lock(so, 1);
39236c6e
A
1549
1550 /* reject if this is a subflow socket */
1551 if (so->so_flags & SOF_MP_SUBFLOW) {
1552 error = ENOTSUP;
1553 goto done;
1554 }
1555
91447636 1556 if (from) {
39236c6e 1557 if (sbappendaddr(&so->so_rcv,
0a7de745 1558 (struct sockaddr *)(uintptr_t)from, data, control, NULL)) {
91447636 1559 sorwakeup(so);
0a7de745 1560 }
91447636
A
1561 goto done;
1562 }
39236c6e 1563
91447636 1564 if (control) {
0a7de745 1565 if (sbappendcontrol(&so->so_rcv, data, control, NULL)) {
91447636 1566 sorwakeup(so);
0a7de745 1567 }
91447636
A
1568 goto done;
1569 }
39236c6e 1570
91447636
A
1571 if (flags & sock_data_filt_flag_record) {
1572 if (control || from) {
1573 error = EINVAL;
1574 goto done;
1575 }
0a7de745 1576 if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) {
91447636 1577 sorwakeup(so);
0a7de745 1578 }
91447636
A
1579 goto done;
1580 }
39236c6e 1581
0a7de745 1582 if (sbappend(&so->so_rcv, data)) {
91447636 1583 sorwakeup(so);
0a7de745 1584 }
91447636
A
1585done:
1586 socket_unlock(so, 1);
0a7de745 1587 return error;
91447636
A
1588}
1589
1590errno_t
39236c6e
A
1591sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data,
1592 mbuf_t control, sflt_data_flag_t flags)
91447636 1593{
39236c6e
A
1594 int sosendflags = 0;
1595
1596 /* reject if this is a subflow socket */
0a7de745
A
1597 if (so->so_flags & SOF_MP_SUBFLOW) {
1598 return ENOTSUP;
1599 }
39236c6e 1600
0a7de745 1601 if (flags & sock_data_filt_flag_oob) {
39236c6e 1602 sosendflags = MSG_OOB;
0a7de745
A
1603 }
1604 return sosend(so, (struct sockaddr *)(uintptr_t)to, NULL,
1605 data, control, sosendflags);
91447636
A
1606}
1607
1608sockopt_dir
39236c6e 1609sockopt_direction(sockopt_t sopt)
91447636 1610{
0a7de745 1611 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
91447636
A
1612}
1613
1614int
39236c6e 1615sockopt_level(sockopt_t sopt)
91447636 1616{
0a7de745 1617 return sopt->sopt_level;
91447636
A
1618}
1619
1620int
39236c6e 1621sockopt_name(sockopt_t sopt)
91447636 1622{
0a7de745 1623 return sopt->sopt_name;
91447636
A
1624}
1625
1626size_t
39236c6e 1627sockopt_valsize(sockopt_t sopt)
91447636 1628{
0a7de745 1629 return sopt->sopt_valsize;
91447636
A
1630}
1631
1632errno_t
39236c6e 1633sockopt_copyin(sockopt_t sopt, void *data, size_t len)
91447636 1634{
0a7de745 1635 return sooptcopyin(sopt, data, len, len);
91447636
A
1636}
1637
1638errno_t
39236c6e 1639sockopt_copyout(sockopt_t sopt, void *data, size_t len)
91447636 1640{
0a7de745 1641 return sooptcopyout(sopt, data, len);
91447636 1642}