]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <kern/locks.h>
37 #include <net/kext_net.h>
38
39 #include <string.h>
40
41 static struct socket_filter_list sock_filter_head;
42 static lck_mtx_t *sock_filter_lock = 0;
43
44 static void sflt_detach_private(struct socket_filter_entry *entry, int unregistering);
45
46 __private_extern__ void
47 sflt_init(void)
48 {
49 lck_grp_attr_t *grp_attrib = 0;
50 lck_attr_t *lck_attrib = 0;
51 lck_grp_t *lck_group = 0;
52
53 TAILQ_INIT(&sock_filter_head);
54
55 /* Allocate a spin lock */
56 grp_attrib = lck_grp_attr_alloc_init();
57 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
58 lck_grp_attr_free(grp_attrib);
59 lck_attrib = lck_attr_alloc_init();
60 sock_filter_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
61 lck_grp_free(lck_group);
62 lck_attr_free(lck_attrib);
63 }
64
65 __private_extern__ void
66 sflt_initsock(
67 struct socket *so)
68 {
69 struct protosw *proto = so->so_proto;
70 struct socket_filter *filter;
71
72 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
73 lck_mtx_lock(sock_filter_lock);
74 TAILQ_FOREACH(filter, &proto->pr_filter_head, sf_protosw_next) {
75 sflt_attach_private(so, filter, 0, 0);
76 }
77 lck_mtx_unlock(sock_filter_lock);
78 }
79 }
80
81 __private_extern__ void
82 sflt_termsock(
83 struct socket *so)
84 {
85 struct socket_filter_entry *filter;
86 struct socket_filter_entry *filter_next;
87
88 for (filter = so->so_filt; filter; filter = filter_next) {
89 filter_next = filter->sfe_next_onsocket;
90 sflt_detach_private(filter, 0);
91 }
92 so->so_filt = NULL;
93 }
94
95 __private_extern__ void
96 sflt_use(
97 struct socket *so)
98 {
99 so->so_filteruse++;
100 }
101
102 __private_extern__ void
103 sflt_unuse(
104 struct socket *so)
105 {
106 so->so_filteruse--;
107 if (so->so_filteruse == 0) {
108 struct socket_filter_entry *filter;
109 struct socket_filter_entry *next_filter;
110 // search for detaching filters
111 for (filter = so->so_filt; filter; filter = next_filter) {
112 next_filter = filter->sfe_next_onsocket;
113
114 if (filter->sfe_flags & SFEF_DETACHUSEZERO) {
115 sflt_detach_private(filter, 0);
116 }
117 }
118 }
119 }
120
121 __private_extern__ void
122 sflt_notify(
123 struct socket *so,
124 sflt_event_t event,
125 void *param)
126 {
127 struct socket_filter_entry *filter;
128 int filtered = 0;
129
130 for (filter = so->so_filt; filter;
131 filter = filter->sfe_next_onsocket) {
132 if (filter->sfe_filter->sf_filter.sf_notify) {
133 if (filtered == 0) {
134 filtered = 1;
135 sflt_use(so);
136 socket_unlock(so, 0);
137 }
138 filter->sfe_filter->sf_filter.sf_notify(
139 filter->sfe_cookie, so, event, param);
140 }
141 }
142
143 if (filtered != 0) {
144 socket_lock(so, 0);
145 sflt_unuse(so);
146 }
147 }
148
149 __private_extern__ int
150 sflt_data_in(
151 struct socket *so,
152 const struct sockaddr *from,
153 mbuf_t *data,
154 mbuf_t *control,
155 sflt_data_flag_t flags,
156 int *filtered)
157 {
158 struct socket_filter_entry *filter;
159 int error = 0;
160 int filtered_storage;
161
162 if (filtered == NULL)
163 filtered = &filtered_storage;
164 *filtered = 0;
165
166 for (filter = so->so_filt; filter && (error == 0);
167 filter = filter->sfe_next_onsocket) {
168 if (filter->sfe_filter->sf_filter.sf_data_in) {
169 if (*filtered == 0) {
170 *filtered = 1;
171 sflt_use(so);
172 socket_unlock(so, 0);
173 }
174 error = filter->sfe_filter->sf_filter.sf_data_in(
175 filter->sfe_cookie, so, from, data, control, flags);
176 }
177 }
178
179 if (*filtered != 0) {
180 socket_lock(so, 0);
181 sflt_unuse(so);
182 }
183
184 return error;
185 }
186
187 /* sflt_attach_private
188 *
189 * Assumptions: If filter is not NULL, socket_filter_lock is held.
190 */
191
192 __private_extern__ int
193 sflt_attach_private(
194 struct socket *so,
195 struct socket_filter *filter,
196 sflt_handle handle,
197 int sock_locked)
198 {
199 struct socket_filter_entry *entry = NULL;
200 int didlock = 0;
201 int error = 0;
202
203 if (filter == NULL) {
204 /* Find the filter by the handle */
205 lck_mtx_lock(sock_filter_lock);
206 didlock = 1;
207
208 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
209 if (filter->sf_filter.sf_handle == handle)
210 break;
211 }
212 }
213
214 if (filter == NULL)
215 error = ENOENT;
216
217 if (error == 0) {
218 /* allocate the socket filter entry */
219 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK);
220 if (entry == NULL) {
221 error = ENOMEM;
222 }
223 }
224
225 if (error == 0) {
226 /* Initialize the socket filter entry and call the attach function */
227 entry->sfe_filter = filter;
228 entry->sfe_socket = so;
229 entry->sfe_cookie = NULL;
230 entry->sfe_flags = 0;
231 if (entry->sfe_filter->sf_filter.sf_attach) {
232 filter->sf_usecount++;
233
234 if (sock_locked)
235 socket_unlock(so, 0);
236 error = entry->sfe_filter->sf_filter.sf_attach(&entry->sfe_cookie, so);
237 if (sock_locked)
238 socket_lock(so, 0);
239
240 filter->sf_usecount--;
241
242 /* If the attach function returns an error, this filter is not attached */
243 if (error) {
244 FREE(entry, M_IFADDR);
245 entry = NULL;
246 }
247 }
248 }
249
250 if (error == 0) {
251 /* Put the entry in the socket list */
252 entry->sfe_next_onsocket = so->so_filt;
253 so->so_filt = entry;
254
255 /* Put the entry in the filter list */
256 entry->sfe_next_onfilter = filter->sf_entry_head;
257 filter->sf_entry_head = entry;
258
259 /* Incremenet the parent filter's usecount */
260 filter->sf_usecount++;
261 }
262
263 if (didlock) {
264 lck_mtx_unlock(sock_filter_lock);
265 }
266
267 return error;
268 }
269
270
271 /* sflt_detach_private
272 *
273 * Assumptions: if you pass 0 in for the second parameter, you are holding the
274 * socket lock for the socket the entry is attached to. If you pass 1 in for
275 * the second parameter, it is assumed that the entry is not on the filter's
276 * list and the socket lock is not held.
277 */
278
279 static void
280 sflt_detach_private(
281 struct socket_filter_entry *entry,
282 int unregistering)
283 {
284 struct socket_filter_entry **next_ptr;
285 int detached = 0;
286 int found = 0;
287
288 if (unregistering) {
289 socket_lock(entry->sfe_socket, 0);
290 }
291
292 /*
293 * Attempt to find the entry on the filter's list and
294 * remove it. This prevents a filter detaching at the
295 * same time from attempting to remove the same entry.
296 */
297 lck_mtx_lock(sock_filter_lock);
298 if (!unregistering) {
299 if ((entry->sfe_flags & SFEF_UNREGISTERING) != 0) {
300 /*
301 * Another thread is unregistering the filter, we
302 * need to avoid detaching the filter here so the
303 * socket won't go away. Bump up the socket's
304 * usecount so that it won't be freed until after
305 * the filter unregistration has been completed;
306 * at this point the caller has already held the
307 * socket's lock, so we can directly modify the
308 * usecount.
309 */
310 if (!(entry->sfe_flags & SFEF_DETACHXREF)) {
311 entry->sfe_socket->so_usecount++;
312 entry->sfe_flags |= SFEF_DETACHXREF;
313 }
314 lck_mtx_unlock(sock_filter_lock);
315 return;
316 }
317 for (next_ptr = &entry->sfe_filter->sf_entry_head; *next_ptr;
318 next_ptr = &((*next_ptr)->sfe_next_onfilter)) {
319 if (*next_ptr == entry) {
320 found = 1;
321 *next_ptr = entry->sfe_next_onfilter;
322 break;
323 }
324 }
325
326 if (!found && (entry->sfe_flags & SFEF_DETACHUSEZERO) == 0) {
327 lck_mtx_unlock(sock_filter_lock);
328 return;
329 }
330 }
331 else {
332 /*
333 * Clear the removing flag. We will perform the detach here or
334 * request a delayed detach. Since we do an extra ref release
335 * below, bump up the usecount if we haven't done so.
336 */
337 entry->sfe_flags &= ~SFEF_UNREGISTERING;
338 if (!(entry->sfe_flags & SFEF_DETACHXREF)) {
339 entry->sfe_socket->so_usecount++;
340 entry->sfe_flags |= SFEF_DETACHXREF;
341 }
342 }
343
344 if (entry->sfe_socket->so_filteruse != 0) {
345 entry->sfe_flags |= SFEF_DETACHUSEZERO;
346 lck_mtx_unlock(sock_filter_lock);
347 return;
348 }
349 else {
350 /*
351 * Check if we are removing the last attached filter and
352 * the parent filter is being unregistered.
353 */
354 entry->sfe_filter->sf_usecount--;
355 if ((entry->sfe_filter->sf_usecount == 0) &&
356 (entry->sfe_filter->sf_flags & SFF_DETACHING) != 0)
357 detached = 1;
358 }
359 lck_mtx_unlock(sock_filter_lock);
360
361 /* Remove from the socket list */
362 for (next_ptr = &entry->sfe_socket->so_filt; *next_ptr;
363 next_ptr = &((*next_ptr)->sfe_next_onsocket)) {
364 if (*next_ptr == entry) {
365 *next_ptr = entry->sfe_next_onsocket;
366 break;
367 }
368 }
369
370 if (entry->sfe_filter->sf_filter.sf_detach)
371 entry->sfe_filter->sf_filter.sf_detach(entry->sfe_cookie, entry->sfe_socket);
372
373 if (detached && entry->sfe_filter->sf_filter.sf_unregistered) {
374 entry->sfe_filter->sf_filter.sf_unregistered(entry->sfe_filter->sf_filter.sf_handle);
375 FREE(entry->sfe_filter, M_IFADDR);
376 }
377
378 if (unregistering)
379 socket_unlock(entry->sfe_socket, 1);
380
381 FREE(entry, M_IFADDR);
382 }
383
384 errno_t
385 sflt_attach(
386 socket_t socket,
387 sflt_handle handle)
388 {
389 if (socket == NULL || handle == 0)
390 return EINVAL;
391
392 return sflt_attach_private(socket, NULL, handle, 0);
393 }
394
395 errno_t
396 sflt_detach(
397 socket_t socket,
398 sflt_handle handle)
399 {
400 struct socket_filter_entry *filter;
401 errno_t result = 0;
402
403 if (socket == NULL || handle == 0)
404 return EINVAL;
405
406 socket_lock(socket, 1);
407
408 for (filter = socket->so_filt; filter;
409 filter = filter->sfe_next_onsocket) {
410 if (filter->sfe_filter->sf_filter.sf_handle == handle)
411 break;
412 }
413
414 if (filter != NULL) {
415 sflt_detach_private(filter, 0);
416 }
417 else {
418 socket->so_filt = NULL;
419 result = ENOENT;
420 }
421
422 socket_unlock(socket, 1);
423
424 return result;
425 }
426
427
428 errno_t
429 sflt_register(
430 const struct sflt_filter *filter,
431 int domain,
432 int type,
433 int protocol)
434 {
435 struct socket_filter *sock_filt = NULL;
436 struct socket_filter *match = NULL;
437 int error = 0;
438 struct protosw *pr = pffindproto(domain, protocol, type);
439 unsigned int len;
440
441 if (pr == NULL)
442 return ENOENT;
443
444 if (filter->sf_attach == NULL || filter->sf_detach == NULL ||
445 filter->sf_handle == 0 || filter->sf_name == NULL)
446 return EINVAL;
447
448 /* Allocate the socket filter */
449 MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt),
450 M_IFADDR, M_WAITOK);
451 if (sock_filt == NULL) {
452 return ENOBUFS;
453 }
454
455 bzero(sock_filt, sizeof (*sock_filt));
456
457 /* Legacy sflt_filter length; current structure minus extended */
458 len = sizeof (*filter) - sizeof (struct sflt_filter_ext);
459 /*
460 * Include extended fields if filter defines SFLT_EXTENDED.
461 * We've zeroed out our internal sflt_filter placeholder,
462 * so any unused portion would have been taken care of.
463 */
464 if (filter->sf_flags & SFLT_EXTENDED) {
465 unsigned int ext_len = filter->sf_len;
466
467 if (ext_len > sizeof (struct sflt_filter_ext))
468 ext_len = sizeof (struct sflt_filter_ext);
469
470 len += ext_len;
471 }
472 bcopy(filter, &sock_filt->sf_filter, len);
473
474 lck_mtx_lock(sock_filter_lock);
475 /* Look for an existing entry */
476 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
477 if (match->sf_filter.sf_handle ==
478 sock_filt->sf_filter.sf_handle) {
479 break;
480 }
481 }
482
483 /* Add the entry only if there was no existing entry */
484 if (match == NULL) {
485 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
486 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
487 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt,
488 sf_protosw_next);
489 sock_filt->sf_proto = pr;
490 }
491 }
492 lck_mtx_unlock(sock_filter_lock);
493
494 if (match != NULL) {
495 FREE(sock_filt, M_IFADDR);
496 return EEXIST;
497 }
498
499 return error;
500 }
501
502 errno_t
503 sflt_unregister(
504 sflt_handle handle)
505 {
506 struct socket_filter *filter;
507 struct socket_filter_entry *entry_head = NULL;
508 struct socket_filter_entry *next_entry = NULL;
509
510 /* Find the entry and remove it from the global and protosw lists */
511 lck_mtx_lock(sock_filter_lock);
512 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
513 if (filter->sf_filter.sf_handle == handle)
514 break;
515 }
516
517 if (filter) {
518 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
519 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
520 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, filter, sf_protosw_next);
521 }
522 entry_head = filter->sf_entry_head;
523 filter->sf_entry_head = NULL;
524 filter->sf_flags |= SFF_DETACHING;
525
526 for (next_entry = entry_head; next_entry;
527 next_entry = next_entry->sfe_next_onfilter) {
528 /*
529 * Mark this as "unregistering"; upon dropping the
530 * lock, another thread may win the race and attempt
531 * to detach a socket from it (e.g. as part of close)
532 * before we get a chance to detach. Setting this
533 * flag practically tells the other thread to go away.
534 * If the other thread wins, this causes an extra
535 * reference hold on the socket so that it won't be
536 * deallocated until after we finish with the detach
537 * for it below. If we win the race, the extra
538 * reference hold is also taken to compensate for the
539 * extra reference release when detach is called
540 * with a "1" for its second parameter.
541 */
542 next_entry->sfe_flags |= SFEF_UNREGISTERING;
543 }
544 }
545
546 lck_mtx_unlock(sock_filter_lock);
547
548 if (filter == NULL)
549 return ENOENT;
550
551 /* We need to detach the filter from any sockets it's attached to */
552 if (entry_head == 0) {
553 if (filter->sf_filter.sf_unregistered)
554 filter->sf_filter.sf_unregistered(filter->sf_filter.sf_handle);
555 } else {
556 while (entry_head) {
557 next_entry = entry_head->sfe_next_onfilter;
558 sflt_detach_private(entry_head, 1);
559 entry_head = next_entry;
560 }
561 }
562
563 return 0;
564 }
565
566 errno_t
567 sock_inject_data_in(
568 socket_t so,
569 const struct sockaddr* from,
570 mbuf_t data,
571 mbuf_t control,
572 sflt_data_flag_t flags)
573 {
574 int error = 0;
575 if (so == NULL || data == NULL) return EINVAL;
576
577 if (flags & sock_data_filt_flag_oob) {
578 return ENOTSUP;
579 }
580
581 socket_lock(so, 1);
582
583 if (from) {
584 if (sbappendaddr(&so->so_rcv, (struct sockaddr*)from, data,
585 control, NULL))
586 sorwakeup(so);
587 goto done;
588 }
589
590 if (control) {
591 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
592 sorwakeup(so);
593 goto done;
594 }
595
596 if (flags & sock_data_filt_flag_record) {
597 if (control || from) {
598 error = EINVAL;
599 goto done;
600 }
601 if (sbappendrecord(&so->so_rcv, (struct mbuf*)data))
602 sorwakeup(so);
603 goto done;
604 }
605
606 if (sbappend(&so->so_rcv, data))
607 sorwakeup(so);
608 done:
609 socket_unlock(so, 1);
610 return error;
611 }
612
613 errno_t
614 sock_inject_data_out(
615 socket_t so,
616 const struct sockaddr* to,
617 mbuf_t data,
618 mbuf_t control,
619 sflt_data_flag_t flags)
620 {
621 int sosendflags = 0;
622 if (flags & sock_data_filt_flag_oob) sosendflags = MSG_OOB;
623 return sosend(so, (struct sockaddr*)to, NULL,
624 data, control, sosendflags);
625 }
626
627 sockopt_dir
628 sockopt_direction(
629 sockopt_t sopt)
630 {
631 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
632 }
633
634 int
635 sockopt_level(
636 sockopt_t sopt)
637 {
638 return sopt->sopt_level;
639 }
640
641 int
642 sockopt_name(
643 sockopt_t sopt)
644 {
645 return sopt->sopt_name;
646 }
647
648 size_t
649 sockopt_valsize(
650 sockopt_t sopt)
651 {
652 return sopt->sopt_valsize;
653 }
654
655 errno_t
656 sockopt_copyin(
657 sockopt_t sopt,
658 void *data,
659 size_t len)
660 {
661 return sooptcopyin(sopt, data, len, len);
662 }
663
664 errno_t
665 sockopt_copyout(
666 sockopt_t sopt,
667 void *data,
668 size_t len)
669 {
670 return sooptcopyout(sopt, data, len);
671 }