]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <kern/locks.h>
37 #include <net/kext_net.h>
38
39 #include <libkern/libkern.h>
40
41 #include <string.h>
42
43 static struct socket_filter_list sock_filter_head;
44 static lck_mtx_t *sock_filter_lock = 0;
45
46 static void sflt_detach_private(struct socket_filter_entry *entry, int unregistering);
47
48 __private_extern__ void
49 sflt_init(void)
50 {
51 lck_grp_attr_t *grp_attrib = 0;
52 lck_attr_t *lck_attrib = 0;
53 lck_grp_t *lck_group = 0;
54
55 TAILQ_INIT(&sock_filter_head);
56
57 /* Allocate a spin lock */
58 grp_attrib = lck_grp_attr_alloc_init();
59 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
60 lck_grp_attr_free(grp_attrib);
61 lck_attrib = lck_attr_alloc_init();
62 sock_filter_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
63 lck_grp_free(lck_group);
64 lck_attr_free(lck_attrib);
65 }
66
67 __private_extern__ void
68 sflt_initsock(
69 struct socket *so)
70 {
71 struct protosw *proto = so->so_proto;
72 struct socket_filter *filter;
73
74 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
75 lck_mtx_lock(sock_filter_lock);
76 TAILQ_FOREACH(filter, &proto->pr_filter_head, sf_protosw_next) {
77 sflt_attach_private(so, filter, 0, 0);
78 }
79 lck_mtx_unlock(sock_filter_lock);
80 }
81 }
82
83 __private_extern__ void
84 sflt_termsock(
85 struct socket *so)
86 {
87 struct socket_filter_entry *filter;
88 struct socket_filter_entry *filter_next;
89
90 for (filter = so->so_filt; filter; filter = filter_next) {
91 filter_next = filter->sfe_next_onsocket;
92 sflt_detach_private(filter, 0);
93 }
94 so->so_filt = NULL;
95 }
96
97 __private_extern__ void
98 sflt_use(
99 struct socket *so)
100 {
101 so->so_filteruse++;
102 }
103
104 __private_extern__ void
105 sflt_unuse(
106 struct socket *so)
107 {
108 so->so_filteruse--;
109 if (so->so_filteruse == 0) {
110 struct socket_filter_entry *filter;
111 struct socket_filter_entry *next_filter;
112 // search for detaching filters
113 for (filter = so->so_filt; filter; filter = next_filter) {
114 next_filter = filter->sfe_next_onsocket;
115
116 if (filter->sfe_flags & SFEF_DETACHUSEZERO) {
117 sflt_detach_private(filter, 0);
118 }
119 }
120 }
121 }
122
123 __private_extern__ void
124 sflt_notify(
125 struct socket *so,
126 sflt_event_t event,
127 void *param)
128 {
129 struct socket_filter_entry *filter;
130 int filtered = 0;
131
132 for (filter = so->so_filt; filter;
133 filter = filter->sfe_next_onsocket) {
134 if (filter->sfe_filter->sf_filter.sf_notify) {
135 if (filtered == 0) {
136 filtered = 1;
137 sflt_use(so);
138 socket_unlock(so, 0);
139 }
140 filter->sfe_filter->sf_filter.sf_notify(
141 filter->sfe_cookie, so, event, param);
142 }
143 }
144
145 if (filtered != 0) {
146 socket_lock(so, 0);
147 sflt_unuse(so);
148 }
149 }
150
151 __private_extern__ int
152 sflt_data_in(
153 struct socket *so,
154 const struct sockaddr *from,
155 mbuf_t *data,
156 mbuf_t *control,
157 sflt_data_flag_t flags,
158 int *filtered)
159 {
160 struct socket_filter_entry *filter;
161 int error = 0;
162 int filtered_storage;
163
164 if (filtered == NULL)
165 filtered = &filtered_storage;
166 *filtered = 0;
167
168 for (filter = so->so_filt; filter && (error == 0);
169 filter = filter->sfe_next_onsocket) {
170 if (filter->sfe_filter->sf_filter.sf_data_in) {
171 if (*filtered == 0) {
172 *filtered = 1;
173 sflt_use(so);
174 socket_unlock(so, 0);
175 }
176 error = filter->sfe_filter->sf_filter.sf_data_in(
177 filter->sfe_cookie, so, from, data, control, flags);
178 }
179 }
180
181 if (*filtered != 0) {
182 socket_lock(so, 0);
183 sflt_unuse(so);
184 }
185
186 return error;
187 }
188
189 /* sflt_attach_private
190 *
191 * Assumptions: If filter is not NULL, socket_filter_lock is held.
192 */
193
194 __private_extern__ int
195 sflt_attach_private(
196 struct socket *so,
197 struct socket_filter *filter,
198 sflt_handle handle,
199 int sock_locked)
200 {
201 struct socket_filter_entry *entry = NULL;
202 int didlock = 0;
203 int error = 0;
204
205 if (filter == NULL) {
206 /* Find the filter by the handle */
207 lck_mtx_lock(sock_filter_lock);
208 didlock = 1;
209
210 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
211 if (filter->sf_filter.sf_handle == handle)
212 break;
213 }
214 }
215
216 if (filter == NULL)
217 error = ENOENT;
218
219 if (error == 0) {
220 /* allocate the socket filter entry */
221 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK);
222 if (entry == NULL) {
223 error = ENOMEM;
224 }
225 }
226
227 if (error == 0) {
228 /* Initialize the socket filter entry and call the attach function */
229 entry->sfe_filter = filter;
230 entry->sfe_socket = so;
231 entry->sfe_cookie = NULL;
232 entry->sfe_flags = 0;
233 if (entry->sfe_filter->sf_filter.sf_attach) {
234 filter->sf_usecount++;
235
236 if (sock_locked)
237 socket_unlock(so, 0);
238 error = entry->sfe_filter->sf_filter.sf_attach(&entry->sfe_cookie, so);
239 if (sock_locked)
240 socket_lock(so, 0);
241
242 filter->sf_usecount--;
243
244 /* If the attach function returns an error, this filter is not attached */
245 if (error) {
246 FREE(entry, M_IFADDR);
247 entry = NULL;
248 }
249 }
250 }
251
252 if (error == 0) {
253 /* Put the entry in the socket list */
254 entry->sfe_next_onsocket = so->so_filt;
255 so->so_filt = entry;
256
257 /* Put the entry in the filter list */
258 entry->sfe_next_onfilter = filter->sf_entry_head;
259 filter->sf_entry_head = entry;
260
261 /* Incremenet the parent filter's usecount */
262 filter->sf_usecount++;
263 }
264
265 if (didlock) {
266 lck_mtx_unlock(sock_filter_lock);
267 }
268
269 return error;
270 }
271
272
273 /* sflt_detach_private
274 *
275 * Assumptions: if you pass 0 in for the second parameter, you are holding the
276 * socket lock for the socket the entry is attached to. If you pass 1 in for
277 * the second parameter, it is assumed that the entry is not on the filter's
278 * list and the socket lock is not held.
279 */
280
281 static void
282 sflt_detach_private(
283 struct socket_filter_entry *entry,
284 int unregistering)
285 {
286 struct socket_filter_entry **next_ptr;
287 int detached = 0;
288 int found = 0;
289
290 if (unregistering) {
291 socket_lock(entry->sfe_socket, 0);
292 }
293
294 /*
295 * Attempt to find the entry on the filter's list and
296 * remove it. This prevents a filter detaching at the
297 * same time from attempting to remove the same entry.
298 */
299 lck_mtx_lock(sock_filter_lock);
300 if (!unregistering) {
301 if ((entry->sfe_flags & SFEF_UNREGISTERING) != 0) {
302 /*
303 * Another thread is unregistering the filter, we
304 * need to avoid detaching the filter here so the
305 * socket won't go away. Bump up the socket's
306 * usecount so that it won't be freed until after
307 * the filter unregistration has been completed;
308 * at this point the caller has already held the
309 * socket's lock, so we can directly modify the
310 * usecount.
311 */
312 if (!(entry->sfe_flags & SFEF_DETACHXREF)) {
313 entry->sfe_socket->so_usecount++;
314 entry->sfe_flags |= SFEF_DETACHXREF;
315 }
316 lck_mtx_unlock(sock_filter_lock);
317 return;
318 }
319 for (next_ptr = &entry->sfe_filter->sf_entry_head; *next_ptr;
320 next_ptr = &((*next_ptr)->sfe_next_onfilter)) {
321 if (*next_ptr == entry) {
322 found = 1;
323 *next_ptr = entry->sfe_next_onfilter;
324 break;
325 }
326 }
327
328 if (!found && (entry->sfe_flags & SFEF_DETACHUSEZERO) == 0) {
329 lck_mtx_unlock(sock_filter_lock);
330 return;
331 }
332 } else {
333 /*
334 * Clear the removing flag. We will perform the detach here or
335 * request a delayed detach. Since we do an extra ref release
336 * below, bump up the usecount if we haven't done so.
337 */
338 entry->sfe_flags &= ~SFEF_UNREGISTERING;
339 if (!(entry->sfe_flags & SFEF_DETACHXREF)) {
340 entry->sfe_socket->so_usecount++;
341 entry->sfe_flags |= SFEF_DETACHXREF;
342 }
343 }
344
345 if (entry->sfe_socket->so_filteruse != 0) {
346 entry->sfe_flags |= SFEF_DETACHUSEZERO;
347 lck_mtx_unlock(sock_filter_lock);
348
349 if (unregistering) {
350 #if DEBUG
351 printf("sflt_detach_private unregistering SFEF_DETACHUSEZERO "
352 "so%p so_filteruse %u so_usecount %d\n",
353 entry->sfe_socket, entry->sfe_socket->so_filteruse,
354 entry->sfe_socket->so_usecount);
355 #endif
356 socket_unlock(entry->sfe_socket, 0);
357 }
358 return;
359 } else {
360 /*
361 * Check if we are removing the last attached filter and
362 * the parent filter is being unregistered.
363 */
364 entry->sfe_filter->sf_usecount--;
365 if ((entry->sfe_filter->sf_usecount == 0) &&
366 (entry->sfe_filter->sf_flags & SFF_DETACHING) != 0)
367 detached = 1;
368 }
369 lck_mtx_unlock(sock_filter_lock);
370
371 /* Remove from the socket list */
372 for (next_ptr = &entry->sfe_socket->so_filt; *next_ptr;
373 next_ptr = &((*next_ptr)->sfe_next_onsocket)) {
374 if (*next_ptr == entry) {
375 *next_ptr = entry->sfe_next_onsocket;
376 break;
377 }
378 }
379
380 if (entry->sfe_filter->sf_filter.sf_detach)
381 entry->sfe_filter->sf_filter.sf_detach(entry->sfe_cookie, entry->sfe_socket);
382
383 if (detached && entry->sfe_filter->sf_filter.sf_unregistered) {
384 entry->sfe_filter->sf_filter.sf_unregistered(entry->sfe_filter->sf_filter.sf_handle);
385 FREE(entry->sfe_filter, M_IFADDR);
386 }
387
388 if (unregistering)
389 socket_unlock(entry->sfe_socket, 1);
390
391 FREE(entry, M_IFADDR);
392 }
393
394 errno_t
395 sflt_attach(
396 socket_t socket,
397 sflt_handle handle)
398 {
399 if (socket == NULL || handle == 0)
400 return EINVAL;
401
402 return sflt_attach_private(socket, NULL, handle, 0);
403 }
404
405 errno_t
406 sflt_detach(
407 socket_t socket,
408 sflt_handle handle)
409 {
410 struct socket_filter_entry *filter;
411 errno_t result = 0;
412
413 if (socket == NULL || handle == 0)
414 return EINVAL;
415
416 socket_lock(socket, 1);
417
418 for (filter = socket->so_filt; filter;
419 filter = filter->sfe_next_onsocket) {
420 if (filter->sfe_filter->sf_filter.sf_handle == handle)
421 break;
422 }
423
424 if (filter != NULL) {
425 sflt_detach_private(filter, 0);
426 }
427 else {
428 socket->so_filt = NULL;
429 result = ENOENT;
430 }
431
432 socket_unlock(socket, 1);
433
434 return result;
435 }
436
437
438 errno_t
439 sflt_register(
440 const struct sflt_filter *filter,
441 int domain,
442 int type,
443 int protocol)
444 {
445 struct socket_filter *sock_filt = NULL;
446 struct socket_filter *match = NULL;
447 int error = 0;
448 struct protosw *pr = pffindproto(domain, protocol, type);
449 unsigned int len;
450
451 if (pr == NULL)
452 return ENOENT;
453
454 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
455 filter->sf_handle == 0 || filter->sf_name == NULL)
456 return EINVAL;
457
458 /* Allocate the socket filter */
459 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
460 M_IFADDR, M_WAITOK);
461 if (sock_filt == NULL) {
462 return ENOBUFS;
463 }
464
465 bzero(sock_filt, sizeof (*sock_filt));
466
467 /* Legacy sflt_filter length; current structure minus extended */
468 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
469 /*
470 * Include extended fields if filter defines SFLT_EXTENDED.
471 * We've zeroed out our internal sflt_filter placeholder,
472 * so any unused portion would have been taken care of.
473 */
474 if (filter->sf_flags & SFLT_EXTENDED) {
475 unsigned int ext_len = filter->sf_len;
476
477 if (ext_len > sizeof (struct sflt_filter_ext))
478 ext_len = sizeof (struct sflt_filter_ext);
479
480 len += ext_len;
481 }
482 bcopy(filter, &sock_filt->sf_filter, len);
483
484 lck_mtx_lock(sock_filter_lock);
485 /* Look for an existing entry */
486 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
487 if (match->sf_filter.sf_handle ==
488 sock_filt->sf_filter.sf_handle) {
489 break;
490 }
491 }
492
493 /* Add the entry only if there was no existing entry */
494 if (match == NULL) {
495 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
496 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
497 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
498 sf_protosw_next);
499 sock_filt->sf_proto = pr;
500 }
501 }
502 lck_mtx_unlock(sock_filter_lock);
503
504 if (match != NULL) {
505 FREE(sock_filt, M_IFADDR);
506 return EEXIST;
507 }
508
509 return error;
510 }
511
512 errno_t
513 sflt_unregister(
514 sflt_handle handle)
515 {
516 struct socket_filter *filter;
517 struct socket_filter_entry *entry_head = NULL;
518 struct socket_filter_entry *next_entry = NULL;
519
520 /* Find the entry and remove it from the global and protosw lists */
521 lck_mtx_lock(sock_filter_lock);
522 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
523 if (filter->sf_filter.sf_handle == handle)
524 break;
525 }
526
527 if (filter) {
528 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
529 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
530 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, filter, sf_protosw_next);
531 }
532 entry_head = filter->sf_entry_head;
533 filter->sf_entry_head = NULL;
534 filter->sf_flags |= SFF_DETACHING;
535
536 for (next_entry = entry_head; next_entry;
537 next_entry = next_entry->sfe_next_onfilter) {
538 /*
539 * Mark this as "unregistering"; upon dropping the
540 * lock, another thread may win the race and attempt
541 * to detach a socket from it (e.g. as part of close)
542 * before we get a chance to detach. Setting this
543 * flag practically tells the other thread to go away.
544 * If the other thread wins, this causes an extra
545 * reference hold on the socket so that it won't be
546 * deallocated until after we finish with the detach
547 * for it below. If we win the race, the extra
548 * reference hold is also taken to compensate for the
549 * extra reference release when detach is called
550 * with a "1" for its second parameter.
551 */
552 next_entry->sfe_flags |= SFEF_UNREGISTERING;
553 }
554 }
555
556 lck_mtx_unlock(sock_filter_lock);
557
558 if (filter == NULL)
559 return ENOENT;
560
561 /* We need to detach the filter from any sockets it's attached to */
562 if (entry_head == 0) {
563 if (filter->sf_filter.sf_unregistered)
564 filter->sf_filter.sf_unregistered(filter->sf_filter.sf_handle);
565 } else {
566 while (entry_head) {
567 next_entry = entry_head->sfe_next_onfilter;
568 sflt_detach_private(entry_head, 1);
569 entry_head = next_entry;
570 }
571 }
572
573 return 0;
574 }
575
576 errno_t
577 sock_inject_data_in(
578 socket_t so,
579 const struct sockaddr* from,
580 mbuf_t data,
581 mbuf_t control,
582 sflt_data_flag_t flags)
583 {
584 int error = 0;
585 if (so == NULL || data == NULL) return EINVAL;
586
587 if (flags & sock_data_filt_flag_oob) {
588 return ENOTSUP;
589 }
590
591 socket_lock(so, 1);
592
593 if (from) {
594 if (sbappendaddr(&so->so_rcv, (struct sockaddr*)(uintptr_t)from, data,
595 control, NULL))
596 sorwakeup(so);
597 goto done;
598 }
599
600 if (control) {
601 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
602 sorwakeup(so);
603 goto done;
604 }
605
606 if (flags & sock_data_filt_flag_record) {
607 if (control || from) {
608 error = EINVAL;
609 goto done;
610 }
611 if (sbappendrecord(&so->so_rcv, (struct mbuf*)data))
612 sorwakeup(so);
613 goto done;
614 }
615
616 if (sbappend(&so->so_rcv, data))
617 sorwakeup(so);
618 done:
619 socket_unlock(so, 1);
620 return error;
621 }
622
623 errno_t
624 sock_inject_data_out(
625 socket_t so,
626 const struct sockaddr* to,
627 mbuf_t data,
628 mbuf_t control,
629 sflt_data_flag_t flags)
630 {
631 int sosendflags = 0;
632 if (flags & sock_data_filt_flag_oob) sosendflags = MSG_OOB;
633 return sosend(so, (struct sockaddr*)(uintptr_t)to, NULL,
634 data, control, sosendflags);
635 }
636
637 sockopt_dir
638 sockopt_direction(
639 sockopt_t sopt)
640 {
641 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
642 }
643
644 int
645 sockopt_level(
646 sockopt_t sopt)
647 {
648 return sopt->sopt_level;
649 }
650
651 int
652 sockopt_name(
653 sockopt_t sopt)
654 {
655 return sopt->sopt_name;
656 }
657
658 size_t
659 sockopt_valsize(
660 sockopt_t sopt)
661 {
662 return sopt->sopt_valsize;
663 }
664
665 errno_t
666 sockopt_copyin(
667 sockopt_t sopt,
668 void *data,
669 size_t len)
670 {
671 return sooptcopyin(sopt, data, len, len);
672 }
673
674 errno_t
675 sockopt_copyout(
676 sockopt_t sopt,
677 void *data,
678 size_t len)
679 {
680 return sooptcopyout(sopt, data, len);
681 }