]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2003-2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <sys/kpi_socketfilter.h> | |
30 | ||
31 | #include <sys/socket.h> | |
32 | #include <sys/param.h> | |
33 | #include <sys/errno.h> | |
34 | #include <sys/malloc.h> | |
35 | #include <sys/protosw.h> | |
36 | #include <sys/domain.h> | |
37 | #include <sys/proc.h> | |
38 | #include <kern/locks.h> | |
39 | #include <kern/thread.h> | |
40 | #include <kern/debug.h> | |
41 | #include <net/kext_net.h> | |
42 | #include <net/if.h> | |
43 | #include <net/net_api_stats.h> | |
44 | #include <netinet/in_var.h> | |
45 | #include <netinet/ip.h> | |
46 | #include <netinet/ip_var.h> | |
47 | #include <netinet/tcp.h> | |
48 | #include <netinet/tcp_var.h> | |
49 | #include <netinet/udp.h> | |
50 | #include <netinet/udp_var.h> | |
51 | ||
52 | #include <libkern/libkern.h> | |
53 | #include <libkern/OSAtomic.h> | |
54 | #include <os/refcnt.h> | |
55 | ||
56 | #include <stdbool.h> | |
57 | #include <string.h> | |
58 | ||
59 | #define SFEF_ATTACHED 0x1 /* SFE is on socket list */ | |
60 | #define SFEF_NODETACH 0x2 /* Detach should not be called */ | |
61 | #define SFEF_NOSOCKET 0x4 /* Socket is gone */ | |
62 | ||
63 | /* | |
64 | * If you need accounting for KM_IFADDR consider using | |
65 | * KALLOC_HEAP_DEFINE to define a view. | |
66 | */ | |
67 | #define KM_IFADDR KHEAP_DEFAULT | |
68 | ||
69 | struct socket_filter_entry { | |
70 | struct socket_filter_entry *sfe_next_onsocket; | |
71 | struct socket_filter_entry *sfe_next_onfilter; | |
72 | struct socket_filter_entry *sfe_next_oncleanup; | |
73 | ||
74 | struct socket_filter *sfe_filter; | |
75 | struct socket *sfe_socket; | |
76 | void *sfe_cookie; | |
77 | ||
78 | uint32_t sfe_flags; | |
79 | int32_t sfe_refcount; | |
80 | }; | |
81 | ||
82 | struct socket_filter { | |
83 | TAILQ_ENTRY(socket_filter) sf_protosw_next; | |
84 | TAILQ_ENTRY(socket_filter) sf_global_next; | |
85 | struct socket_filter_entry *sf_entry_head; | |
86 | ||
87 | struct protosw *sf_proto; | |
88 | struct sflt_filter sf_filter; | |
89 | struct os_refcnt sf_refcount; | |
90 | }; | |
91 | ||
92 | TAILQ_HEAD(socket_filter_list, socket_filter); | |
93 | ||
94 | static LCK_GRP_DECLARE(sock_filter_lock_grp, "socket filter lock"); | |
95 | static LCK_RW_DECLARE(sock_filter_lock, &sock_filter_lock_grp); | |
96 | static LCK_MTX_DECLARE(sock_filter_cleanup_lock, &sock_filter_lock_grp); | |
97 | ||
98 | static struct socket_filter_list sock_filter_head = | |
99 | TAILQ_HEAD_INITIALIZER(sock_filter_head); | |
100 | static struct socket_filter_entry *sock_filter_cleanup_entries = NULL; | |
101 | static thread_t sock_filter_cleanup_thread = NULL; | |
102 | ||
103 | static void sflt_cleanup_thread(void *, wait_result_t); | |
104 | static void sflt_detach_locked(struct socket_filter_entry *entry); | |
105 | ||
106 | #undef sflt_register | |
107 | static errno_t sflt_register_common(const struct sflt_filter *filter, int domain, | |
108 | int type, int protocol, bool is_internal); | |
109 | errno_t sflt_register(const struct sflt_filter *filter, int domain, | |
110 | int type, int protocol); | |
111 | ||
112 | ||
113 | #pragma mark -- Internal State Management -- | |
114 | ||
115 | __private_extern__ int | |
116 | sflt_permission_check(struct inpcb *inp) | |
117 | { | |
118 | /* Only IPv4 or IPv6 sockets can bypass filters */ | |
119 | if (!(inp->inp_vflag & INP_IPV4) && | |
120 | !(inp->inp_vflag & INP_IPV6)) { | |
121 | return 0; | |
122 | } | |
123 | /* Sockets that have this entitlement bypass socket filters. */ | |
124 | if (INP_INTCOPROC_ALLOWED(inp)) { | |
125 | return 1; | |
126 | } | |
127 | /* Sockets bound to an intcoproc interface bypass socket filters. */ | |
128 | if ((inp->inp_flags & INP_BOUND_IF) && | |
129 | IFNET_IS_INTCOPROC(inp->inp_boundifp)) { | |
130 | return 1; | |
131 | } | |
132 | #if NECP | |
133 | /* | |
134 | * Make sure that the NECP policy is populated. | |
135 | * If result is not populated, the policy ID will be | |
136 | * NECP_KERNEL_POLICY_ID_NONE. Note that if the result | |
137 | * is populated, but there was no match, it will be | |
138 | * NECP_KERNEL_POLICY_ID_NO_MATCH. | |
139 | * Do not call inp_update_necp_policy() to avoid scoping | |
140 | * a socket prior to calls to bind(). | |
141 | */ | |
142 | if (inp->inp_policyresult.policy_id == NECP_KERNEL_POLICY_ID_NONE) { | |
143 | necp_socket_find_policy_match(inp, NULL, NULL, 0); | |
144 | } | |
145 | ||
146 | /* If the filter unit is marked to be "no filter", bypass filters */ | |
147 | if (inp->inp_policyresult.results.filter_control_unit == | |
148 | NECP_FILTER_UNIT_NO_FILTER) { | |
149 | return 1; | |
150 | } | |
151 | #endif /* NECP */ | |
152 | return 0; | |
153 | } | |
154 | ||
155 | static void | |
156 | sflt_retain_locked(struct socket_filter *filter) | |
157 | { | |
158 | os_ref_retain_locked(&filter->sf_refcount); | |
159 | } | |
160 | ||
161 | static void | |
162 | sflt_release_locked(struct socket_filter *filter) | |
163 | { | |
164 | if (os_ref_release_locked(&filter->sf_refcount) == 0) { | |
165 | /* Call the unregistered function */ | |
166 | if (filter->sf_filter.sf_unregistered) { | |
167 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
168 | filter->sf_filter.sf_unregistered( | |
169 | filter->sf_filter.sf_handle); | |
170 | lck_rw_lock_exclusive(&sock_filter_lock); | |
171 | } | |
172 | ||
173 | /* Free the entry */ | |
174 | kheap_free(KM_IFADDR, filter, sizeof(struct socket_filter)); | |
175 | } | |
176 | } | |
177 | ||
178 | static void | |
179 | sflt_entry_retain(struct socket_filter_entry *entry) | |
180 | { | |
181 | if (OSIncrementAtomic(&entry->sfe_refcount) <= 0) { | |
182 | panic("sflt_entry_retain - sfe_refcount <= 0\n"); | |
183 | /* NOTREACHED */ | |
184 | } | |
185 | } | |
186 | ||
187 | static void | |
188 | sflt_entry_release(struct socket_filter_entry *entry) | |
189 | { | |
190 | SInt32 old = OSDecrementAtomic(&entry->sfe_refcount); | |
191 | if (old == 1) { | |
192 | /* That was the last reference */ | |
193 | ||
194 | /* Take the cleanup lock */ | |
195 | lck_mtx_lock(&sock_filter_cleanup_lock); | |
196 | ||
197 | /* Put this item on the cleanup list */ | |
198 | entry->sfe_next_oncleanup = sock_filter_cleanup_entries; | |
199 | sock_filter_cleanup_entries = entry; | |
200 | ||
201 | /* If the item is the first item in the list */ | |
202 | if (entry->sfe_next_oncleanup == NULL) { | |
203 | if (sock_filter_cleanup_thread == NULL) { | |
204 | /* Create a thread */ | |
205 | kernel_thread_start(sflt_cleanup_thread, | |
206 | NULL, &sock_filter_cleanup_thread); | |
207 | } else { | |
208 | /* Wakeup the thread */ | |
209 | wakeup(&sock_filter_cleanup_entries); | |
210 | } | |
211 | } | |
212 | ||
213 | /* Drop the cleanup lock */ | |
214 | lck_mtx_unlock(&sock_filter_cleanup_lock); | |
215 | } else if (old <= 0) { | |
216 | panic("sflt_entry_release - sfe_refcount (%d) <= 0\n", | |
217 | (int)old); | |
218 | /* NOTREACHED */ | |
219 | } | |
220 | } | |
221 | ||
222 | __attribute__((noreturn)) | |
223 | static void | |
224 | sflt_cleanup_thread(void *blah, wait_result_t blah2) | |
225 | { | |
226 | #pragma unused(blah, blah2) | |
227 | while (1) { | |
228 | lck_mtx_lock(&sock_filter_cleanup_lock); | |
229 | while (sock_filter_cleanup_entries == NULL) { | |
230 | /* Sleep until we've got something better to do */ | |
231 | msleep(&sock_filter_cleanup_entries, | |
232 | &sock_filter_cleanup_lock, PWAIT, | |
233 | "sflt_cleanup", NULL); | |
234 | } | |
235 | ||
236 | /* Pull the current list of dead items */ | |
237 | struct socket_filter_entry *dead = sock_filter_cleanup_entries; | |
238 | sock_filter_cleanup_entries = NULL; | |
239 | ||
240 | /* Drop the lock */ | |
241 | lck_mtx_unlock(&sock_filter_cleanup_lock); | |
242 | ||
243 | /* Take the socket filter lock */ | |
244 | lck_rw_lock_exclusive(&sock_filter_lock); | |
245 | ||
246 | /* Cleanup every dead item */ | |
247 | struct socket_filter_entry *entry; | |
248 | for (entry = dead; entry; entry = dead) { | |
249 | struct socket_filter_entry **nextpp; | |
250 | ||
251 | dead = entry->sfe_next_oncleanup; | |
252 | ||
253 | /* Call detach function if necessary - drop the lock */ | |
254 | if ((entry->sfe_flags & SFEF_NODETACH) == 0 && | |
255 | entry->sfe_filter->sf_filter.sf_detach) { | |
256 | entry->sfe_flags |= SFEF_NODETACH; | |
257 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
258 | ||
259 | /* | |
260 | * Warning - passing a potentially | |
261 | * dead socket may be bad | |
262 | */ | |
263 | entry->sfe_filter->sf_filter.sf_detach( | |
264 | entry->sfe_cookie, entry->sfe_socket); | |
265 | ||
266 | lck_rw_lock_exclusive(&sock_filter_lock); | |
267 | } | |
268 | ||
269 | /* | |
270 | * Pull entry off the socket list -- | |
271 | * if the socket still exists | |
272 | */ | |
273 | if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) { | |
274 | for (nextpp = &entry->sfe_socket->so_filt; | |
275 | *nextpp; | |
276 | nextpp = &(*nextpp)->sfe_next_onsocket) { | |
277 | if (*nextpp == entry) { | |
278 | *nextpp = | |
279 | entry->sfe_next_onsocket; | |
280 | break; | |
281 | } | |
282 | } | |
283 | } | |
284 | ||
285 | /* Pull entry off the filter list */ | |
286 | for (nextpp = &entry->sfe_filter->sf_entry_head; | |
287 | *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) { | |
288 | if (*nextpp == entry) { | |
289 | *nextpp = entry->sfe_next_onfilter; | |
290 | break; | |
291 | } | |
292 | } | |
293 | ||
294 | /* | |
295 | * Release the filter -- may drop lock, but that's okay | |
296 | */ | |
297 | sflt_release_locked(entry->sfe_filter); | |
298 | entry->sfe_socket = NULL; | |
299 | entry->sfe_filter = NULL; | |
300 | kheap_free(KM_IFADDR, entry, sizeof(struct socket_filter_entry)); | |
301 | } | |
302 | ||
303 | /* Drop the socket filter lock */ | |
304 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
305 | } | |
306 | /* NOTREACHED */ | |
307 | } | |
308 | ||
309 | static int | |
310 | sflt_attach_locked(struct socket *so, struct socket_filter *filter, | |
311 | int socklocked) | |
312 | { | |
313 | int error = 0; | |
314 | struct socket_filter_entry *entry = NULL; | |
315 | ||
316 | if (sflt_permission_check(sotoinpcb(so))) { | |
317 | return 0; | |
318 | } | |
319 | ||
320 | if (filter == NULL) { | |
321 | return ENOENT; | |
322 | } | |
323 | ||
324 | for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) { | |
325 | if (entry->sfe_filter->sf_filter.sf_handle == | |
326 | filter->sf_filter.sf_handle) { | |
327 | return EEXIST; | |
328 | } | |
329 | } | |
330 | /* allocate the socket filter entry */ | |
331 | entry = kheap_alloc(KM_IFADDR, sizeof(struct socket_filter_entry), | |
332 | Z_WAITOK); | |
333 | if (entry == NULL) { | |
334 | return ENOMEM; | |
335 | } | |
336 | ||
337 | /* Initialize the socket filter entry */ | |
338 | entry->sfe_cookie = NULL; | |
339 | entry->sfe_flags = SFEF_ATTACHED; | |
340 | entry->sfe_refcount = 1; /* corresponds to SFEF_ATTACHED flag set */ | |
341 | ||
342 | /* Put the entry in the filter list */ | |
343 | sflt_retain_locked(filter); | |
344 | entry->sfe_filter = filter; | |
345 | entry->sfe_next_onfilter = filter->sf_entry_head; | |
346 | filter->sf_entry_head = entry; | |
347 | ||
348 | /* Put the entry on the socket filter list */ | |
349 | entry->sfe_socket = so; | |
350 | entry->sfe_next_onsocket = so->so_filt; | |
351 | so->so_filt = entry; | |
352 | ||
353 | if (entry->sfe_filter->sf_filter.sf_attach) { | |
354 | /* Retain the entry while we call attach */ | |
355 | sflt_entry_retain(entry); | |
356 | ||
357 | /* | |
358 | * Release the filter lock -- | |
359 | * callers must be aware we will do this | |
360 | */ | |
361 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
362 | ||
363 | /* Unlock the socket */ | |
364 | if (socklocked) { | |
365 | socket_unlock(so, 0); | |
366 | } | |
367 | ||
368 | /* It's finally safe to call the filter function */ | |
369 | error = entry->sfe_filter->sf_filter.sf_attach( | |
370 | &entry->sfe_cookie, so); | |
371 | ||
372 | /* Lock the socket again */ | |
373 | if (socklocked) { | |
374 | socket_lock(so, 0); | |
375 | } | |
376 | ||
377 | /* Lock the filters again */ | |
378 | lck_rw_lock_exclusive(&sock_filter_lock); | |
379 | ||
380 | /* | |
381 | * If the attach function returns an error, | |
382 | * this filter must be detached | |
383 | */ | |
384 | if (error) { | |
385 | /* don't call sf_detach */ | |
386 | entry->sfe_flags |= SFEF_NODETACH; | |
387 | sflt_detach_locked(entry); | |
388 | } | |
389 | ||
390 | /* Release the retain we held through the attach call */ | |
391 | sflt_entry_release(entry); | |
392 | } | |
393 | ||
394 | return error; | |
395 | } | |
396 | ||
397 | errno_t | |
398 | sflt_attach_internal(socket_t socket, sflt_handle handle) | |
399 | { | |
400 | if (socket == NULL || handle == 0) { | |
401 | return EINVAL; | |
402 | } | |
403 | ||
404 | int result = EINVAL; | |
405 | ||
406 | lck_rw_lock_exclusive(&sock_filter_lock); | |
407 | ||
408 | struct socket_filter *filter = NULL; | |
409 | TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) { | |
410 | if (filter->sf_filter.sf_handle == handle) { | |
411 | break; | |
412 | } | |
413 | } | |
414 | ||
415 | if (filter) { | |
416 | result = sflt_attach_locked(socket, filter, 1); | |
417 | } | |
418 | ||
419 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
420 | ||
421 | return result; | |
422 | } | |
423 | ||
424 | static void | |
425 | sflt_detach_locked(struct socket_filter_entry *entry) | |
426 | { | |
427 | if ((entry->sfe_flags & SFEF_ATTACHED) != 0) { | |
428 | entry->sfe_flags &= ~SFEF_ATTACHED; | |
429 | sflt_entry_release(entry); | |
430 | } | |
431 | } | |
432 | ||
433 | #pragma mark -- Socket Layer Hooks -- | |
434 | ||
435 | __private_extern__ void | |
436 | sflt_initsock(struct socket *so) | |
437 | { | |
438 | /* | |
439 | * Point to the real protosw, as so_proto might have been | |
440 | * pointed to a modified version. | |
441 | */ | |
442 | struct protosw *proto = so->so_proto->pr_protosw; | |
443 | ||
444 | lck_rw_lock_shared(&sock_filter_lock); | |
445 | if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) { | |
446 | /* Promote lock to exclusive */ | |
447 | if (!lck_rw_lock_shared_to_exclusive(&sock_filter_lock)) { | |
448 | lck_rw_lock_exclusive(&sock_filter_lock); | |
449 | } | |
450 | ||
451 | /* | |
452 | * Warning: A filter unregistering will be pulled out of | |
453 | * the list. This could happen while we drop the lock in | |
454 | * sftl_attach_locked or sflt_release_locked. For this | |
455 | * reason we retain a reference on the filter (or next_filter) | |
456 | * while calling this function. This protects us from a panic, | |
457 | * but it could result in a socket being created without all | |
458 | * of the global filters if we're attaching a filter as it | |
459 | * is removed, if that's possible. | |
460 | */ | |
461 | struct socket_filter *filter = | |
462 | TAILQ_FIRST(&proto->pr_filter_head); | |
463 | ||
464 | sflt_retain_locked(filter); | |
465 | ||
466 | while (filter) { | |
467 | struct socket_filter *filter_next; | |
468 | /* | |
469 | * Warning: sflt_attach_private_locked | |
470 | * will drop the lock | |
471 | */ | |
472 | sflt_attach_locked(so, filter, 0); | |
473 | ||
474 | filter_next = TAILQ_NEXT(filter, sf_protosw_next); | |
475 | if (filter_next) { | |
476 | sflt_retain_locked(filter_next); | |
477 | } | |
478 | ||
479 | /* | |
480 | * Warning: filt_release_locked may remove | |
481 | * the filter from the queue | |
482 | */ | |
483 | sflt_release_locked(filter); | |
484 | filter = filter_next; | |
485 | } | |
486 | } | |
487 | lck_rw_done(&sock_filter_lock); | |
488 | } | |
489 | ||
490 | /* | |
491 | * sflt_termsock | |
492 | * | |
493 | * Detaches all filters from the socket. | |
494 | */ | |
495 | __private_extern__ void | |
496 | sflt_termsock(struct socket *so) | |
497 | { | |
498 | lck_rw_lock_exclusive(&sock_filter_lock); | |
499 | ||
500 | struct socket_filter_entry *entry; | |
501 | ||
502 | while ((entry = so->so_filt) != NULL) { | |
503 | /* Pull filter off the socket */ | |
504 | so->so_filt = entry->sfe_next_onsocket; | |
505 | entry->sfe_flags |= SFEF_NOSOCKET; | |
506 | ||
507 | /* Call detach */ | |
508 | sflt_detach_locked(entry); | |
509 | ||
510 | /* | |
511 | * On sflt_termsock, we can't return until the detach function | |
512 | * has been called. Call the detach function - this is gross | |
513 | * because the socket filter entry could be freed when we drop | |
514 | * the lock, so we make copies on the stack and retain | |
515 | * everything we need before dropping the lock. | |
516 | */ | |
517 | if ((entry->sfe_flags & SFEF_NODETACH) == 0 && | |
518 | entry->sfe_filter->sf_filter.sf_detach) { | |
519 | void *sfe_cookie = entry->sfe_cookie; | |
520 | struct socket_filter *sfe_filter = entry->sfe_filter; | |
521 | ||
522 | /* Retain the socket filter */ | |
523 | sflt_retain_locked(sfe_filter); | |
524 | ||
525 | /* Mark that we've called the detach function */ | |
526 | entry->sfe_flags |= SFEF_NODETACH; | |
527 | ||
528 | /* Drop the lock before calling the detach function */ | |
529 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
530 | sfe_filter->sf_filter.sf_detach(sfe_cookie, so); | |
531 | lck_rw_lock_exclusive(&sock_filter_lock); | |
532 | ||
533 | /* Release the filter */ | |
534 | sflt_release_locked(sfe_filter); | |
535 | } | |
536 | } | |
537 | ||
538 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
539 | } | |
540 | ||
541 | ||
542 | static void | |
543 | sflt_notify_internal(struct socket *so, sflt_event_t event, void *param, | |
544 | sflt_handle handle) | |
545 | { | |
546 | if (so->so_filt == NULL) { | |
547 | return; | |
548 | } | |
549 | ||
550 | struct socket_filter_entry *entry; | |
551 | int unlocked = 0; | |
552 | ||
553 | lck_rw_lock_shared(&sock_filter_lock); | |
554 | for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) { | |
555 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
556 | entry->sfe_filter->sf_filter.sf_notify && | |
557 | ((handle && entry->sfe_filter->sf_filter.sf_handle != | |
558 | handle) || !handle)) { | |
559 | /* | |
560 | * Retain the filter entry and release | |
561 | * the socket filter lock | |
562 | */ | |
563 | sflt_entry_retain(entry); | |
564 | lck_rw_unlock_shared(&sock_filter_lock); | |
565 | ||
566 | /* If the socket isn't already unlocked, unlock it */ | |
567 | if (unlocked == 0) { | |
568 | unlocked = 1; | |
569 | socket_unlock(so, 0); | |
570 | } | |
571 | ||
572 | /* Finally call the filter */ | |
573 | entry->sfe_filter->sf_filter.sf_notify( | |
574 | entry->sfe_cookie, so, event, param); | |
575 | ||
576 | /* | |
577 | * Take the socket filter lock again | |
578 | * and release the entry | |
579 | */ | |
580 | lck_rw_lock_shared(&sock_filter_lock); | |
581 | sflt_entry_release(entry); | |
582 | } | |
583 | } | |
584 | lck_rw_unlock_shared(&sock_filter_lock); | |
585 | ||
586 | if (unlocked != 0) { | |
587 | socket_lock(so, 0); | |
588 | } | |
589 | } | |
590 | ||
591 | __private_extern__ void | |
592 | sflt_notify(struct socket *so, sflt_event_t event, void *param) | |
593 | { | |
594 | sflt_notify_internal(so, event, param, 0); | |
595 | } | |
596 | ||
597 | static void | |
598 | sflt_notify_after_register(struct socket *so, sflt_event_t event, | |
599 | sflt_handle handle) | |
600 | { | |
601 | sflt_notify_internal(so, event, NULL, handle); | |
602 | } | |
603 | ||
604 | __private_extern__ int | |
605 | sflt_ioctl(struct socket *so, u_long cmd, caddr_t data) | |
606 | { | |
607 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
608 | return 0; | |
609 | } | |
610 | ||
611 | struct socket_filter_entry *entry; | |
612 | int unlocked = 0; | |
613 | int error = 0; | |
614 | ||
615 | lck_rw_lock_shared(&sock_filter_lock); | |
616 | for (entry = so->so_filt; entry && error == 0; | |
617 | entry = entry->sfe_next_onsocket) { | |
618 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
619 | entry->sfe_filter->sf_filter.sf_ioctl) { | |
620 | /* | |
621 | * Retain the filter entry and release | |
622 | * the socket filter lock | |
623 | */ | |
624 | sflt_entry_retain(entry); | |
625 | lck_rw_unlock_shared(&sock_filter_lock); | |
626 | ||
627 | /* If the socket isn't already unlocked, unlock it */ | |
628 | if (unlocked == 0) { | |
629 | socket_unlock(so, 0); | |
630 | unlocked = 1; | |
631 | } | |
632 | ||
633 | /* Call the filter */ | |
634 | error = entry->sfe_filter->sf_filter.sf_ioctl( | |
635 | entry->sfe_cookie, so, cmd, data); | |
636 | ||
637 | /* | |
638 | * Take the socket filter lock again | |
639 | * and release the entry | |
640 | */ | |
641 | lck_rw_lock_shared(&sock_filter_lock); | |
642 | sflt_entry_release(entry); | |
643 | } | |
644 | } | |
645 | lck_rw_unlock_shared(&sock_filter_lock); | |
646 | ||
647 | if (unlocked) { | |
648 | socket_lock(so, 0); | |
649 | } | |
650 | ||
651 | return error; | |
652 | } | |
653 | ||
654 | __private_extern__ int | |
655 | sflt_bind(struct socket *so, const struct sockaddr *nam) | |
656 | { | |
657 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
658 | return 0; | |
659 | } | |
660 | ||
661 | struct socket_filter_entry *entry; | |
662 | int unlocked = 0; | |
663 | int error = 0; | |
664 | ||
665 | lck_rw_lock_shared(&sock_filter_lock); | |
666 | for (entry = so->so_filt; entry && error == 0; | |
667 | entry = entry->sfe_next_onsocket) { | |
668 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
669 | entry->sfe_filter->sf_filter.sf_bind) { | |
670 | /* | |
671 | * Retain the filter entry and | |
672 | * release the socket filter lock | |
673 | */ | |
674 | sflt_entry_retain(entry); | |
675 | lck_rw_unlock_shared(&sock_filter_lock); | |
676 | ||
677 | /* If the socket isn't already unlocked, unlock it */ | |
678 | if (unlocked == 0) { | |
679 | socket_unlock(so, 0); | |
680 | unlocked = 1; | |
681 | } | |
682 | ||
683 | /* Call the filter */ | |
684 | error = entry->sfe_filter->sf_filter.sf_bind( | |
685 | entry->sfe_cookie, so, nam); | |
686 | ||
687 | /* | |
688 | * Take the socket filter lock again and | |
689 | * release the entry | |
690 | */ | |
691 | lck_rw_lock_shared(&sock_filter_lock); | |
692 | sflt_entry_release(entry); | |
693 | } | |
694 | } | |
695 | lck_rw_unlock_shared(&sock_filter_lock); | |
696 | ||
697 | if (unlocked) { | |
698 | socket_lock(so, 0); | |
699 | } | |
700 | ||
701 | return error; | |
702 | } | |
703 | ||
704 | __private_extern__ int | |
705 | sflt_listen(struct socket *so) | |
706 | { | |
707 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
708 | return 0; | |
709 | } | |
710 | ||
711 | struct socket_filter_entry *entry; | |
712 | int unlocked = 0; | |
713 | int error = 0; | |
714 | ||
715 | lck_rw_lock_shared(&sock_filter_lock); | |
716 | for (entry = so->so_filt; entry && error == 0; | |
717 | entry = entry->sfe_next_onsocket) { | |
718 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
719 | entry->sfe_filter->sf_filter.sf_listen) { | |
720 | /* | |
721 | * Retain the filter entry and release | |
722 | * the socket filter lock | |
723 | */ | |
724 | sflt_entry_retain(entry); | |
725 | lck_rw_unlock_shared(&sock_filter_lock); | |
726 | ||
727 | /* If the socket isn't already unlocked, unlock it */ | |
728 | if (unlocked == 0) { | |
729 | socket_unlock(so, 0); | |
730 | unlocked = 1; | |
731 | } | |
732 | ||
733 | /* Call the filter */ | |
734 | error = entry->sfe_filter->sf_filter.sf_listen( | |
735 | entry->sfe_cookie, so); | |
736 | ||
737 | /* | |
738 | * Take the socket filter lock again | |
739 | * and release the entry | |
740 | */ | |
741 | lck_rw_lock_shared(&sock_filter_lock); | |
742 | sflt_entry_release(entry); | |
743 | } | |
744 | } | |
745 | lck_rw_unlock_shared(&sock_filter_lock); | |
746 | ||
747 | if (unlocked) { | |
748 | socket_lock(so, 0); | |
749 | } | |
750 | ||
751 | return error; | |
752 | } | |
753 | ||
754 | __private_extern__ int | |
755 | sflt_accept(struct socket *head, struct socket *so, | |
756 | const struct sockaddr *local, const struct sockaddr *remote) | |
757 | { | |
758 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
759 | return 0; | |
760 | } | |
761 | ||
762 | struct socket_filter_entry *entry; | |
763 | int unlocked = 0; | |
764 | int error = 0; | |
765 | ||
766 | lck_rw_lock_shared(&sock_filter_lock); | |
767 | for (entry = so->so_filt; entry && error == 0; | |
768 | entry = entry->sfe_next_onsocket) { | |
769 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
770 | entry->sfe_filter->sf_filter.sf_accept) { | |
771 | /* | |
772 | * Retain the filter entry and | |
773 | * release the socket filter lock | |
774 | */ | |
775 | sflt_entry_retain(entry); | |
776 | lck_rw_unlock_shared(&sock_filter_lock); | |
777 | ||
778 | /* If the socket isn't already unlocked, unlock it */ | |
779 | if (unlocked == 0) { | |
780 | socket_unlock(so, 0); | |
781 | unlocked = 1; | |
782 | } | |
783 | ||
784 | /* Call the filter */ | |
785 | error = entry->sfe_filter->sf_filter.sf_accept( | |
786 | entry->sfe_cookie, head, so, local, remote); | |
787 | ||
788 | /* | |
789 | * Take the socket filter lock again | |
790 | * and release the entry | |
791 | */ | |
792 | lck_rw_lock_shared(&sock_filter_lock); | |
793 | sflt_entry_release(entry); | |
794 | } | |
795 | } | |
796 | lck_rw_unlock_shared(&sock_filter_lock); | |
797 | ||
798 | if (unlocked) { | |
799 | socket_lock(so, 0); | |
800 | } | |
801 | ||
802 | return error; | |
803 | } | |
804 | ||
805 | __private_extern__ int | |
806 | sflt_getsockname(struct socket *so, struct sockaddr **local) | |
807 | { | |
808 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
809 | return 0; | |
810 | } | |
811 | ||
812 | struct socket_filter_entry *entry; | |
813 | int unlocked = 0; | |
814 | int error = 0; | |
815 | ||
816 | lck_rw_lock_shared(&sock_filter_lock); | |
817 | for (entry = so->so_filt; entry && error == 0; | |
818 | entry = entry->sfe_next_onsocket) { | |
819 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
820 | entry->sfe_filter->sf_filter.sf_getsockname) { | |
821 | /* | |
822 | * Retain the filter entry and | |
823 | * release the socket filter lock | |
824 | */ | |
825 | sflt_entry_retain(entry); | |
826 | lck_rw_unlock_shared(&sock_filter_lock); | |
827 | ||
828 | /* If the socket isn't already unlocked, unlock it */ | |
829 | if (unlocked == 0) { | |
830 | socket_unlock(so, 0); | |
831 | unlocked = 1; | |
832 | } | |
833 | ||
834 | /* Call the filter */ | |
835 | error = entry->sfe_filter->sf_filter.sf_getsockname( | |
836 | entry->sfe_cookie, so, local); | |
837 | ||
838 | /* | |
839 | * Take the socket filter lock again | |
840 | * and release the entry | |
841 | */ | |
842 | lck_rw_lock_shared(&sock_filter_lock); | |
843 | sflt_entry_release(entry); | |
844 | } | |
845 | } | |
846 | lck_rw_unlock_shared(&sock_filter_lock); | |
847 | ||
848 | if (unlocked) { | |
849 | socket_lock(so, 0); | |
850 | } | |
851 | ||
852 | return error; | |
853 | } | |
854 | ||
855 | __private_extern__ int | |
856 | sflt_getpeername(struct socket *so, struct sockaddr **remote) | |
857 | { | |
858 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
859 | return 0; | |
860 | } | |
861 | ||
862 | struct socket_filter_entry *entry; | |
863 | int unlocked = 0; | |
864 | int error = 0; | |
865 | ||
866 | lck_rw_lock_shared(&sock_filter_lock); | |
867 | for (entry = so->so_filt; entry && error == 0; | |
868 | entry = entry->sfe_next_onsocket) { | |
869 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
870 | entry->sfe_filter->sf_filter.sf_getpeername) { | |
871 | /* | |
872 | * Retain the filter entry and release | |
873 | * the socket filter lock | |
874 | */ | |
875 | sflt_entry_retain(entry); | |
876 | lck_rw_unlock_shared(&sock_filter_lock); | |
877 | ||
878 | /* If the socket isn't already unlocked, unlock it */ | |
879 | if (unlocked == 0) { | |
880 | socket_unlock(so, 0); | |
881 | unlocked = 1; | |
882 | } | |
883 | ||
884 | /* Call the filter */ | |
885 | error = entry->sfe_filter->sf_filter.sf_getpeername( | |
886 | entry->sfe_cookie, so, remote); | |
887 | ||
888 | /* | |
889 | * Take the socket filter lock again | |
890 | * and release the entry | |
891 | */ | |
892 | lck_rw_lock_shared(&sock_filter_lock); | |
893 | sflt_entry_release(entry); | |
894 | } | |
895 | } | |
896 | lck_rw_unlock_shared(&sock_filter_lock); | |
897 | ||
898 | if (unlocked) { | |
899 | socket_lock(so, 0); | |
900 | } | |
901 | ||
902 | return error; | |
903 | } | |
904 | ||
905 | __private_extern__ int | |
906 | sflt_connectin(struct socket *so, const struct sockaddr *remote) | |
907 | { | |
908 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
909 | return 0; | |
910 | } | |
911 | ||
912 | struct socket_filter_entry *entry; | |
913 | int unlocked = 0; | |
914 | int error = 0; | |
915 | ||
916 | lck_rw_lock_shared(&sock_filter_lock); | |
917 | for (entry = so->so_filt; entry && error == 0; | |
918 | entry = entry->sfe_next_onsocket) { | |
919 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
920 | entry->sfe_filter->sf_filter.sf_connect_in) { | |
921 | /* | |
922 | * Retain the filter entry and release | |
923 | * the socket filter lock | |
924 | */ | |
925 | sflt_entry_retain(entry); | |
926 | lck_rw_unlock_shared(&sock_filter_lock); | |
927 | ||
928 | /* If the socket isn't already unlocked, unlock it */ | |
929 | if (unlocked == 0) { | |
930 | socket_unlock(so, 0); | |
931 | unlocked = 1; | |
932 | } | |
933 | ||
934 | /* Call the filter */ | |
935 | error = entry->sfe_filter->sf_filter.sf_connect_in( | |
936 | entry->sfe_cookie, so, remote); | |
937 | ||
938 | /* | |
939 | * Take the socket filter lock again | |
940 | * and release the entry | |
941 | */ | |
942 | lck_rw_lock_shared(&sock_filter_lock); | |
943 | sflt_entry_release(entry); | |
944 | } | |
945 | } | |
946 | lck_rw_unlock_shared(&sock_filter_lock); | |
947 | ||
948 | if (unlocked) { | |
949 | socket_lock(so, 0); | |
950 | } | |
951 | ||
952 | return error; | |
953 | } | |
954 | ||
955 | static int | |
956 | sflt_connectout_common(struct socket *so, const struct sockaddr *nam) | |
957 | { | |
958 | struct socket_filter_entry *entry; | |
959 | int unlocked = 0; | |
960 | int error = 0; | |
961 | ||
962 | lck_rw_lock_shared(&sock_filter_lock); | |
963 | for (entry = so->so_filt; entry && error == 0; | |
964 | entry = entry->sfe_next_onsocket) { | |
965 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
966 | entry->sfe_filter->sf_filter.sf_connect_out) { | |
967 | /* | |
968 | * Retain the filter entry and release | |
969 | * the socket filter lock | |
970 | */ | |
971 | sflt_entry_retain(entry); | |
972 | lck_rw_unlock_shared(&sock_filter_lock); | |
973 | ||
974 | /* If the socket isn't already unlocked, unlock it */ | |
975 | if (unlocked == 0) { | |
976 | socket_unlock(so, 0); | |
977 | unlocked = 1; | |
978 | } | |
979 | ||
980 | /* Call the filter */ | |
981 | error = entry->sfe_filter->sf_filter.sf_connect_out( | |
982 | entry->sfe_cookie, so, nam); | |
983 | ||
984 | /* | |
985 | * Take the socket filter lock again | |
986 | * and release the entry | |
987 | */ | |
988 | lck_rw_lock_shared(&sock_filter_lock); | |
989 | sflt_entry_release(entry); | |
990 | } | |
991 | } | |
992 | lck_rw_unlock_shared(&sock_filter_lock); | |
993 | ||
994 | if (unlocked) { | |
995 | socket_lock(so, 0); | |
996 | } | |
997 | ||
998 | return error; | |
999 | } | |
1000 | ||
1001 | __private_extern__ int | |
1002 | sflt_connectout(struct socket *so, const struct sockaddr *nam) | |
1003 | { | |
1004 | char buf[SOCK_MAXADDRLEN]; | |
1005 | struct sockaddr *sa; | |
1006 | int error; | |
1007 | ||
1008 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
1009 | return 0; | |
1010 | } | |
1011 | ||
1012 | /* | |
1013 | * Workaround for rdar://23362120 | |
1014 | * Always pass a buffer that can hold an IPv6 socket address | |
1015 | */ | |
1016 | bzero(buf, sizeof(buf)); | |
1017 | bcopy(nam, buf, nam->sa_len); | |
1018 | sa = (struct sockaddr *)buf; | |
1019 | ||
1020 | error = sflt_connectout_common(so, sa); | |
1021 | if (error != 0) { | |
1022 | return error; | |
1023 | } | |
1024 | ||
1025 | /* | |
1026 | * If the address was modified, copy it back | |
1027 | */ | |
1028 | if (bcmp(sa, nam, nam->sa_len) != 0) { | |
1029 | bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len); | |
1030 | } | |
1031 | ||
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | __private_extern__ int | |
1036 | sflt_setsockopt(struct socket *so, struct sockopt *sopt) | |
1037 | { | |
1038 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | struct socket_filter_entry *entry; | |
1043 | int unlocked = 0; | |
1044 | int error = 0; | |
1045 | ||
1046 | lck_rw_lock_shared(&sock_filter_lock); | |
1047 | for (entry = so->so_filt; entry && error == 0; | |
1048 | entry = entry->sfe_next_onsocket) { | |
1049 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
1050 | entry->sfe_filter->sf_filter.sf_setoption) { | |
1051 | /* | |
1052 | * Retain the filter entry and release | |
1053 | * the socket filter lock | |
1054 | */ | |
1055 | sflt_entry_retain(entry); | |
1056 | lck_rw_unlock_shared(&sock_filter_lock); | |
1057 | ||
1058 | /* If the socket isn't already unlocked, unlock it */ | |
1059 | if (unlocked == 0) { | |
1060 | socket_unlock(so, 0); | |
1061 | unlocked = 1; | |
1062 | } | |
1063 | ||
1064 | /* Call the filter */ | |
1065 | error = entry->sfe_filter->sf_filter.sf_setoption( | |
1066 | entry->sfe_cookie, so, sopt); | |
1067 | ||
1068 | /* | |
1069 | * Take the socket filter lock again | |
1070 | * and release the entry | |
1071 | */ | |
1072 | lck_rw_lock_shared(&sock_filter_lock); | |
1073 | sflt_entry_release(entry); | |
1074 | } | |
1075 | } | |
1076 | lck_rw_unlock_shared(&sock_filter_lock); | |
1077 | ||
1078 | if (unlocked) { | |
1079 | socket_lock(so, 0); | |
1080 | } | |
1081 | ||
1082 | return error; | |
1083 | } | |
1084 | ||
1085 | __private_extern__ int | |
1086 | sflt_getsockopt(struct socket *so, struct sockopt *sopt) | |
1087 | { | |
1088 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
1089 | return 0; | |
1090 | } | |
1091 | ||
1092 | struct socket_filter_entry *entry; | |
1093 | int unlocked = 0; | |
1094 | int error = 0; | |
1095 | ||
1096 | lck_rw_lock_shared(&sock_filter_lock); | |
1097 | for (entry = so->so_filt; entry && error == 0; | |
1098 | entry = entry->sfe_next_onsocket) { | |
1099 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
1100 | entry->sfe_filter->sf_filter.sf_getoption) { | |
1101 | /* | |
1102 | * Retain the filter entry and release | |
1103 | * the socket filter lock | |
1104 | */ | |
1105 | sflt_entry_retain(entry); | |
1106 | lck_rw_unlock_shared(&sock_filter_lock); | |
1107 | ||
1108 | /* If the socket isn't already unlocked, unlock it */ | |
1109 | if (unlocked == 0) { | |
1110 | socket_unlock(so, 0); | |
1111 | unlocked = 1; | |
1112 | } | |
1113 | ||
1114 | /* Call the filter */ | |
1115 | error = entry->sfe_filter->sf_filter.sf_getoption( | |
1116 | entry->sfe_cookie, so, sopt); | |
1117 | ||
1118 | /* | |
1119 | * Take the socket filter lock again | |
1120 | * and release the entry | |
1121 | */ | |
1122 | lck_rw_lock_shared(&sock_filter_lock); | |
1123 | sflt_entry_release(entry); | |
1124 | } | |
1125 | } | |
1126 | lck_rw_unlock_shared(&sock_filter_lock); | |
1127 | ||
1128 | if (unlocked) { | |
1129 | socket_lock(so, 0); | |
1130 | } | |
1131 | ||
1132 | return error; | |
1133 | } | |
1134 | ||
1135 | __private_extern__ int | |
1136 | sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data, | |
1137 | mbuf_t *control, sflt_data_flag_t flags) | |
1138 | { | |
1139 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
1140 | return 0; | |
1141 | } | |
1142 | ||
1143 | struct socket_filter_entry *entry; | |
1144 | int unlocked = 0; | |
1145 | int setsendthread = 0; | |
1146 | int error = 0; | |
1147 | ||
1148 | lck_rw_lock_shared(&sock_filter_lock); | |
1149 | for (entry = so->so_filt; entry && error == 0; | |
1150 | entry = entry->sfe_next_onsocket) { | |
1151 | /* skip if this is a subflow socket */ | |
1152 | if (so->so_flags & SOF_MP_SUBFLOW) { | |
1153 | continue; | |
1154 | } | |
1155 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
1156 | entry->sfe_filter->sf_filter.sf_data_out) { | |
1157 | /* | |
1158 | * Retain the filter entry and | |
1159 | * release the socket filter lock | |
1160 | */ | |
1161 | sflt_entry_retain(entry); | |
1162 | lck_rw_unlock_shared(&sock_filter_lock); | |
1163 | ||
1164 | /* If the socket isn't already unlocked, unlock it */ | |
1165 | if (unlocked == 0) { | |
1166 | if (so->so_send_filt_thread == NULL) { | |
1167 | setsendthread = 1; | |
1168 | so->so_send_filt_thread = | |
1169 | current_thread(); | |
1170 | } | |
1171 | socket_unlock(so, 0); | |
1172 | unlocked = 1; | |
1173 | } | |
1174 | ||
1175 | /* Call the filter */ | |
1176 | error = entry->sfe_filter->sf_filter.sf_data_out( | |
1177 | entry->sfe_cookie, so, to, data, control, flags); | |
1178 | ||
1179 | /* | |
1180 | * Take the socket filter lock again | |
1181 | * and release the entry | |
1182 | */ | |
1183 | lck_rw_lock_shared(&sock_filter_lock); | |
1184 | sflt_entry_release(entry); | |
1185 | } | |
1186 | } | |
1187 | lck_rw_unlock_shared(&sock_filter_lock); | |
1188 | ||
1189 | if (unlocked) { | |
1190 | socket_lock(so, 0); | |
1191 | if (setsendthread) { | |
1192 | so->so_send_filt_thread = NULL; | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | return error; | |
1197 | } | |
1198 | ||
1199 | __private_extern__ int | |
1200 | sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data, | |
1201 | mbuf_t *control, sflt_data_flag_t flags) | |
1202 | { | |
1203 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { | |
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | struct socket_filter_entry *entry; | |
1208 | int error = 0; | |
1209 | int unlocked = 0; | |
1210 | ||
1211 | lck_rw_lock_shared(&sock_filter_lock); | |
1212 | ||
1213 | for (entry = so->so_filt; entry && (error == 0); | |
1214 | entry = entry->sfe_next_onsocket) { | |
1215 | /* skip if this is a subflow socket */ | |
1216 | if (so->so_flags & SOF_MP_SUBFLOW) { | |
1217 | continue; | |
1218 | } | |
1219 | if ((entry->sfe_flags & SFEF_ATTACHED) && | |
1220 | entry->sfe_filter->sf_filter.sf_data_in) { | |
1221 | /* | |
1222 | * Retain the filter entry and | |
1223 | * release the socket filter lock | |
1224 | */ | |
1225 | sflt_entry_retain(entry); | |
1226 | lck_rw_unlock_shared(&sock_filter_lock); | |
1227 | ||
1228 | /* If the socket isn't already unlocked, unlock it */ | |
1229 | if (unlocked == 0) { | |
1230 | unlocked = 1; | |
1231 | socket_unlock(so, 0); | |
1232 | } | |
1233 | ||
1234 | /* Call the filter */ | |
1235 | error = entry->sfe_filter->sf_filter.sf_data_in( | |
1236 | entry->sfe_cookie, so, from, data, control, flags); | |
1237 | ||
1238 | /* | |
1239 | * Take the socket filter lock again | |
1240 | * and release the entry | |
1241 | */ | |
1242 | lck_rw_lock_shared(&sock_filter_lock); | |
1243 | sflt_entry_release(entry); | |
1244 | } | |
1245 | } | |
1246 | lck_rw_unlock_shared(&sock_filter_lock); | |
1247 | ||
1248 | if (unlocked) { | |
1249 | socket_lock(so, 0); | |
1250 | } | |
1251 | ||
1252 | return error; | |
1253 | } | |
1254 | ||
1255 | #pragma mark -- KPI -- | |
1256 | ||
1257 | errno_t | |
1258 | sflt_attach(socket_t socket, sflt_handle handle) | |
1259 | { | |
1260 | socket_lock(socket, 1); | |
1261 | errno_t result = sflt_attach_internal(socket, handle); | |
1262 | socket_unlock(socket, 1); | |
1263 | return result; | |
1264 | } | |
1265 | ||
1266 | errno_t | |
1267 | sflt_detach(socket_t socket, sflt_handle handle) | |
1268 | { | |
1269 | struct socket_filter_entry *entry; | |
1270 | errno_t result = 0; | |
1271 | ||
1272 | if (socket == NULL || handle == 0) { | |
1273 | return EINVAL; | |
1274 | } | |
1275 | ||
1276 | lck_rw_lock_exclusive(&sock_filter_lock); | |
1277 | for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) { | |
1278 | if (entry->sfe_filter->sf_filter.sf_handle == handle && | |
1279 | (entry->sfe_flags & SFEF_ATTACHED) != 0) { | |
1280 | break; | |
1281 | } | |
1282 | } | |
1283 | ||
1284 | if (entry != NULL) { | |
1285 | sflt_detach_locked(entry); | |
1286 | } | |
1287 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
1288 | ||
1289 | return result; | |
1290 | } | |
1291 | ||
1292 | struct solist { | |
1293 | struct solist *next; | |
1294 | struct socket *so; | |
1295 | }; | |
1296 | ||
1297 | static errno_t | |
1298 | sflt_register_common(const struct sflt_filter *filter, int domain, int type, | |
1299 | int protocol, bool is_internal) | |
1300 | { | |
1301 | struct socket_filter *sock_filt = NULL; | |
1302 | struct socket_filter *match = NULL; | |
1303 | int error = 0; | |
1304 | struct protosw *pr; | |
1305 | unsigned int len; | |
1306 | struct socket *so; | |
1307 | struct inpcb *inp; | |
1308 | struct solist *solisthead = NULL, *solist = NULL; | |
1309 | ||
1310 | if ((domain != PF_INET) && (domain != PF_INET6)) { | |
1311 | return ENOTSUP; | |
1312 | } | |
1313 | ||
1314 | pr = pffindproto(domain, protocol, type); | |
1315 | if (pr == NULL) { | |
1316 | return ENOENT; | |
1317 | } | |
1318 | ||
1319 | if (filter->sf_attach == NULL || filter->sf_detach == NULL || | |
1320 | filter->sf_handle == 0 || filter->sf_name == NULL) { | |
1321 | return EINVAL; | |
1322 | } | |
1323 | ||
1324 | /* Allocate the socket filter */ | |
1325 | sock_filt = kheap_alloc(KM_IFADDR, | |
1326 | sizeof(struct socket_filter), Z_WAITOK | Z_ZERO); | |
1327 | if (sock_filt == NULL) { | |
1328 | return ENOBUFS; | |
1329 | } | |
1330 | ||
1331 | /* Legacy sflt_filter length; current structure minus extended */ | |
1332 | len = sizeof(*filter) - sizeof(struct sflt_filter_ext); | |
1333 | /* | |
1334 | * Include extended fields if filter defines SFLT_EXTENDED. | |
1335 | * We've zeroed out our internal sflt_filter placeholder, | |
1336 | * so any unused portion would have been taken care of. | |
1337 | */ | |
1338 | if (filter->sf_flags & SFLT_EXTENDED) { | |
1339 | unsigned int ext_len = filter->sf_len; | |
1340 | ||
1341 | if (ext_len > sizeof(struct sflt_filter_ext)) { | |
1342 | ext_len = sizeof(struct sflt_filter_ext); | |
1343 | } | |
1344 | ||
1345 | len += ext_len; | |
1346 | } | |
1347 | bcopy(filter, &sock_filt->sf_filter, len); | |
1348 | ||
1349 | lck_rw_lock_exclusive(&sock_filter_lock); | |
1350 | /* Look for an existing entry */ | |
1351 | TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) { | |
1352 | if (match->sf_filter.sf_handle == | |
1353 | sock_filt->sf_filter.sf_handle) { | |
1354 | break; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | /* Add the entry only if there was no existing entry */ | |
1359 | if (match == NULL) { | |
1360 | TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next); | |
1361 | if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) { | |
1362 | TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt, | |
1363 | sf_protosw_next); | |
1364 | sock_filt->sf_proto = pr; | |
1365 | } | |
1366 | os_ref_init(&sock_filt->sf_refcount, NULL); | |
1367 | ||
1368 | OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count); | |
1369 | INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total); | |
1370 | if (is_internal) { | |
1371 | INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total); | |
1372 | } | |
1373 | } | |
1374 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
1375 | ||
1376 | if (match != NULL) { | |
1377 | kheap_free(KM_IFADDR, sock_filt, sizeof(struct socket_filter)); | |
1378 | return EEXIST; | |
1379 | } | |
1380 | ||
1381 | if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) { | |
1382 | return error; | |
1383 | } | |
1384 | ||
1385 | /* | |
1386 | * Setup the filter on the TCP and UDP sockets already created. | |
1387 | */ | |
1388 | #define SOLIST_ADD(_so) do { \ | |
1389 | solist->next = solisthead; \ | |
1390 | sock_retain((_so)); \ | |
1391 | solist->so = (_so); \ | |
1392 | solisthead = solist; \ | |
1393 | } while (0) | |
1394 | if (protocol == IPPROTO_TCP) { | |
1395 | lck_rw_lock_shared(tcbinfo.ipi_lock); | |
1396 | LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { | |
1397 | so = inp->inp_socket; | |
1398 | if (so == NULL || (so->so_state & SS_DEFUNCT) || | |
1399 | (!(so->so_flags & SOF_MP_SUBFLOW) && | |
1400 | (so->so_state & SS_NOFDREF)) || | |
1401 | !SOCK_CHECK_DOM(so, domain) || | |
1402 | !SOCK_CHECK_TYPE(so, type)) { | |
1403 | continue; | |
1404 | } | |
1405 | solist = kheap_alloc(KHEAP_TEMP, sizeof(struct solist), Z_NOWAIT); | |
1406 | if (!solist) { | |
1407 | continue; | |
1408 | } | |
1409 | SOLIST_ADD(so); | |
1410 | } | |
1411 | lck_rw_done(tcbinfo.ipi_lock); | |
1412 | } else if (protocol == IPPROTO_UDP) { | |
1413 | lck_rw_lock_shared(udbinfo.ipi_lock); | |
1414 | LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) { | |
1415 | so = inp->inp_socket; | |
1416 | if (so == NULL || (so->so_state & SS_DEFUNCT) || | |
1417 | (!(so->so_flags & SOF_MP_SUBFLOW) && | |
1418 | (so->so_state & SS_NOFDREF)) || | |
1419 | !SOCK_CHECK_DOM(so, domain) || | |
1420 | !SOCK_CHECK_TYPE(so, type)) { | |
1421 | continue; | |
1422 | } | |
1423 | solist = kheap_alloc(KHEAP_TEMP, sizeof(struct solist), Z_NOWAIT); | |
1424 | if (!solist) { | |
1425 | continue; | |
1426 | } | |
1427 | SOLIST_ADD(so); | |
1428 | } | |
1429 | lck_rw_done(udbinfo.ipi_lock); | |
1430 | } | |
1431 | /* XXX it's possible to walk the raw socket list as well */ | |
1432 | #undef SOLIST_ADD | |
1433 | ||
1434 | while (solisthead) { | |
1435 | sflt_handle handle = filter->sf_handle; | |
1436 | ||
1437 | so = solisthead->so; | |
1438 | socket_lock(so, 0); | |
1439 | sflt_initsock(so); | |
1440 | if (so->so_state & SS_ISCONNECTING) { | |
1441 | sflt_notify_after_register(so, sock_evt_connecting, | |
1442 | handle); | |
1443 | } else if (so->so_state & SS_ISCONNECTED) { | |
1444 | sflt_notify_after_register(so, sock_evt_connected, | |
1445 | handle); | |
1446 | } else if ((so->so_state & | |
1447 | (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) == | |
1448 | (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) { | |
1449 | sflt_notify_after_register(so, sock_evt_disconnecting, | |
1450 | handle); | |
1451 | } else if ((so->so_state & | |
1452 | (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) == | |
1453 | (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) { | |
1454 | sflt_notify_after_register(so, sock_evt_disconnected, | |
1455 | handle); | |
1456 | } else if (so->so_state & SS_CANTSENDMORE) { | |
1457 | sflt_notify_after_register(so, sock_evt_cantsendmore, | |
1458 | handle); | |
1459 | } else if (so->so_state & SS_CANTRCVMORE) { | |
1460 | sflt_notify_after_register(so, sock_evt_cantrecvmore, | |
1461 | handle); | |
1462 | } | |
1463 | socket_unlock(so, 0); | |
1464 | /* XXX no easy way to post the sock_evt_closing event */ | |
1465 | sock_release(so); | |
1466 | solist = solisthead; | |
1467 | solisthead = solisthead->next; | |
1468 | kheap_free(KHEAP_TEMP, solist, sizeof(struct solist)); | |
1469 | } | |
1470 | ||
1471 | return error; | |
1472 | } | |
1473 | ||
1474 | errno_t | |
1475 | sflt_register_internal(const struct sflt_filter *filter, int domain, int type, | |
1476 | int protocol) | |
1477 | { | |
1478 | return sflt_register_common(filter, domain, type, protocol, true); | |
1479 | } | |
1480 | ||
1481 | errno_t | |
1482 | sflt_register(const struct sflt_filter *filter, int domain, int type, | |
1483 | int protocol) | |
1484 | { | |
1485 | return sflt_register_common(filter, domain, type, protocol, false); | |
1486 | } | |
1487 | ||
1488 | errno_t | |
1489 | sflt_unregister(sflt_handle handle) | |
1490 | { | |
1491 | struct socket_filter *filter; | |
1492 | lck_rw_lock_exclusive(&sock_filter_lock); | |
1493 | ||
1494 | /* Find the entry by the handle */ | |
1495 | TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) { | |
1496 | if (filter->sf_filter.sf_handle == handle) { | |
1497 | break; | |
1498 | } | |
1499 | } | |
1500 | ||
1501 | if (filter) { | |
1502 | VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0); | |
1503 | ||
1504 | /* Remove it from the global list */ | |
1505 | TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next); | |
1506 | ||
1507 | /* Remove it from the protosw list */ | |
1508 | if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) { | |
1509 | TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, | |
1510 | filter, sf_protosw_next); | |
1511 | } | |
1512 | ||
1513 | /* Detach from any sockets */ | |
1514 | struct socket_filter_entry *entry = NULL; | |
1515 | ||
1516 | for (entry = filter->sf_entry_head; entry; | |
1517 | entry = entry->sfe_next_onfilter) { | |
1518 | sflt_detach_locked(entry); | |
1519 | } | |
1520 | ||
1521 | /* Release the filter */ | |
1522 | sflt_release_locked(filter); | |
1523 | } | |
1524 | ||
1525 | lck_rw_unlock_exclusive(&sock_filter_lock); | |
1526 | ||
1527 | if (filter == NULL) { | |
1528 | return ENOENT; | |
1529 | } | |
1530 | ||
1531 | return 0; | |
1532 | } | |
1533 | ||
1534 | errno_t | |
1535 | sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data, | |
1536 | mbuf_t control, sflt_data_flag_t flags) | |
1537 | { | |
1538 | int error = 0; | |
1539 | ||
1540 | if (so == NULL || data == NULL) { | |
1541 | return EINVAL; | |
1542 | } | |
1543 | ||
1544 | if (flags & sock_data_filt_flag_oob) { | |
1545 | return ENOTSUP; | |
1546 | } | |
1547 | ||
1548 | socket_lock(so, 1); | |
1549 | ||
1550 | /* reject if this is a subflow socket */ | |
1551 | if (so->so_flags & SOF_MP_SUBFLOW) { | |
1552 | error = ENOTSUP; | |
1553 | goto done; | |
1554 | } | |
1555 | ||
1556 | if (from) { | |
1557 | if (sbappendaddr(&so->so_rcv, | |
1558 | (struct sockaddr *)(uintptr_t)from, data, control, NULL)) { | |
1559 | sorwakeup(so); | |
1560 | } | |
1561 | goto done; | |
1562 | } | |
1563 | ||
1564 | if (control) { | |
1565 | if (sbappendcontrol(&so->so_rcv, data, control, NULL)) { | |
1566 | sorwakeup(so); | |
1567 | } | |
1568 | goto done; | |
1569 | } | |
1570 | ||
1571 | if (flags & sock_data_filt_flag_record) { | |
1572 | if (control || from) { | |
1573 | error = EINVAL; | |
1574 | goto done; | |
1575 | } | |
1576 | if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) { | |
1577 | sorwakeup(so); | |
1578 | } | |
1579 | goto done; | |
1580 | } | |
1581 | ||
1582 | if (sbappend(&so->so_rcv, data)) { | |
1583 | sorwakeup(so); | |
1584 | } | |
1585 | done: | |
1586 | socket_unlock(so, 1); | |
1587 | return error; | |
1588 | } | |
1589 | ||
1590 | errno_t | |
1591 | sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data, | |
1592 | mbuf_t control, sflt_data_flag_t flags) | |
1593 | { | |
1594 | int sosendflags = 0; | |
1595 | ||
1596 | /* reject if this is a subflow socket */ | |
1597 | if (so->so_flags & SOF_MP_SUBFLOW) { | |
1598 | return ENOTSUP; | |
1599 | } | |
1600 | ||
1601 | if (flags & sock_data_filt_flag_oob) { | |
1602 | sosendflags = MSG_OOB; | |
1603 | } | |
1604 | return sosend(so, (struct sockaddr *)(uintptr_t)to, NULL, | |
1605 | data, control, sosendflags); | |
1606 | } | |
1607 | ||
1608 | sockopt_dir | |
1609 | sockopt_direction(sockopt_t sopt) | |
1610 | { | |
1611 | return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set; | |
1612 | } | |
1613 | ||
1614 | int | |
1615 | sockopt_level(sockopt_t sopt) | |
1616 | { | |
1617 | return sopt->sopt_level; | |
1618 | } | |
1619 | ||
1620 | int | |
1621 | sockopt_name(sockopt_t sopt) | |
1622 | { | |
1623 | return sopt->sopt_name; | |
1624 | } | |
1625 | ||
1626 | size_t | |
1627 | sockopt_valsize(sockopt_t sopt) | |
1628 | { | |
1629 | return sopt->sopt_valsize; | |
1630 | } | |
1631 | ||
1632 | errno_t | |
1633 | sockopt_copyin(sockopt_t sopt, void *data, size_t len) | |
1634 | { | |
1635 | return sooptcopyin(sopt, data, len, len); | |
1636 | } | |
1637 | ||
1638 | errno_t | |
1639 | sockopt_copyout(sockopt_t sopt, void *data, size_t len) | |
1640 | { | |
1641 | return sooptcopyout(sopt, data, len); | |
1642 | } |