]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
59f9b2261cbc8d008dee6c54bd663918f136a7d8
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <sys/kpi_socketfilter.h>
32
33 #include <sys/socket.h>
34 #include <sys/param.h>
35 #include <sys/errno.h>
36 #include <sys/malloc.h>
37 #include <sys/protosw.h>
38 #include <kern/locks.h>
39 #include <net/kext_net.h>
40
41 static struct socket_filter_list sock_filter_head;
42 static lck_mtx_t *sock_filter_lock = 0;
43
44 static void sflt_detach_private(struct socket_filter_entry *entry, int unregistering);
45
46 __private_extern__ void
47 sflt_init(void)
48 {
49 lck_grp_attr_t *grp_attrib = 0;
50 lck_attr_t *lck_attrib = 0;
51 lck_grp_t *lck_group = 0;
52
53 TAILQ_INIT(&sock_filter_head);
54
55 /* Allocate a spin lock */
56 grp_attrib = lck_grp_attr_alloc_init();
57 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
58 lck_grp_attr_free(grp_attrib);
59 lck_attrib = lck_attr_alloc_init();
60 sock_filter_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
61 lck_grp_free(lck_group);
62 lck_attr_free(lck_attrib);
63 }
64
65 __private_extern__ void
66 sflt_initsock(
67 struct socket *so)
68 {
69 struct protosw *proto = so->so_proto;
70 struct socket_filter *filter;
71
72 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
73 lck_mtx_lock(sock_filter_lock);
74 TAILQ_FOREACH(filter, &proto->pr_filter_head, sf_protosw_next) {
75 sflt_attach_private(so, filter, 0, 0);
76 }
77 lck_mtx_unlock(sock_filter_lock);
78 }
79 }
80
81 __private_extern__ void
82 sflt_termsock(
83 struct socket *so)
84 {
85 struct socket_filter_entry *filter;
86 struct socket_filter_entry *filter_next;
87
88 for (filter = so->so_filt; filter; filter = filter_next) {
89 filter_next = filter->sfe_next_onsocket;
90 sflt_detach_private(filter, 0);
91 }
92 so->so_filt = NULL;
93 }
94
95 __private_extern__ void
96 sflt_use(
97 struct socket *so)
98 {
99 so->so_filteruse++;
100 }
101
102 __private_extern__ void
103 sflt_unuse(
104 struct socket *so)
105 {
106 so->so_filteruse--;
107 if (so->so_filteruse == 0) {
108 struct socket_filter_entry *filter;
109 struct socket_filter_entry *next_filter;
110 // search for detaching filters
111 for (filter = so->so_filt; filter; filter = next_filter) {
112 next_filter = filter->sfe_next_onsocket;
113
114 if (filter->sfe_flags & SFEF_DETACHUSEZERO) {
115 sflt_detach_private(filter, 0);
116 }
117 }
118 }
119 }
120
121 __private_extern__ void
122 sflt_notify(
123 struct socket *so,
124 sflt_event_t event,
125 void *param)
126 {
127 struct socket_filter_entry *filter;
128 int filtered = 0;
129
130 for (filter = so->so_filt; filter;
131 filter = filter->sfe_next_onsocket) {
132 if (filter->sfe_filter->sf_filter.sf_notify) {
133 if (filtered == 0) {
134 filtered = 1;
135 sflt_use(so);
136 socket_unlock(so, 0);
137 }
138 filter->sfe_filter->sf_filter.sf_notify(
139 filter->sfe_cookie, so, event, param);
140 }
141 }
142
143 if (filtered != 0) {
144 socket_lock(so, 0);
145 sflt_unuse(so);
146 }
147 }
148
149 __private_extern__ int
150 sflt_data_in(
151 struct socket *so,
152 const struct sockaddr *from,
153 mbuf_t *data,
154 mbuf_t *control,
155 sflt_data_flag_t flags,
156 int *filtered)
157 {
158 struct socket_filter_entry *filter;
159 int error = 0;
160 int filtered_storage;
161
162 if (filtered == NULL)
163 filtered = &filtered_storage;
164 *filtered = 0;
165
166 for (filter = so->so_filt; filter && (error == 0);
167 filter = filter->sfe_next_onsocket) {
168 if (filter->sfe_filter->sf_filter.sf_data_in) {
169 if (*filtered == 0) {
170 *filtered = 1;
171 sflt_use(so);
172 socket_unlock(so, 0);
173 }
174 error = filter->sfe_filter->sf_filter.sf_data_in(
175 filter->sfe_cookie, so, from, data, control, flags);
176 }
177 }
178
179 if (*filtered != 0) {
180 socket_lock(so, 0);
181 sflt_unuse(so);
182 }
183
184 return error;
185 }
186
187 /* sflt_attach_private
188 *
189 * Assumptions: If filter is not NULL, socket_filter_lock is held.
190 */
191
192 __private_extern__ int
193 sflt_attach_private(
194 struct socket *so,
195 struct socket_filter *filter,
196 sflt_handle handle,
197 int sock_locked)
198 {
199 struct socket_filter_entry *entry = NULL;
200 int didlock = 0;
201 int error = 0;
202
203 if (filter == NULL) {
204 /* Find the filter by the handle */
205 lck_mtx_lock(sock_filter_lock);
206 didlock = 1;
207
208 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
209 if (filter->sf_filter.sf_handle == handle)
210 break;
211 }
212 }
213
214 if (filter == NULL)
215 error = ENOENT;
216
217 if (error == 0) {
218 /* allocate the socket filter entry */
219 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK);
220 if (entry == NULL) {
221 error = ENOMEM;
222 }
223 }
224
225 if (error == 0) {
226 /* Initialize the socket filter entry and call the attach function */
227 entry->sfe_filter = filter;
228 entry->sfe_socket = so;
229 entry->sfe_cookie = NULL;
230 entry->sfe_flags = 0;
231 if (entry->sfe_filter->sf_filter.sf_attach) {
232 filter->sf_usecount++;
233
234 if (sock_locked)
235 socket_unlock(so, 0);
236 error = entry->sfe_filter->sf_filter.sf_attach(&entry->sfe_cookie, so);
237 if (sock_locked)
238 socket_lock(so, 0);
239
240 filter->sf_usecount--;
241
242 /* If the attach function returns an error, this filter is not attached */
243 if (error) {
244 FREE(entry, M_IFADDR);
245 entry = NULL;
246 }
247 }
248 }
249
250 if (error == 0) {
251 /* Put the entry in the socket list */
252 entry->sfe_next_onsocket = so->so_filt;
253 so->so_filt = entry;
254
255 /* Put the entry in the filter list */
256 entry->sfe_next_onfilter = filter->sf_entry_head;
257 filter->sf_entry_head = entry;
258
259 /* Incremenet the parent filter's usecount */
260 filter->sf_usecount++;
261 }
262
263 if (didlock) {
264 lck_mtx_unlock(sock_filter_lock);
265 }
266
267 return error;
268 }
269
270
271 /* sflt_detach_private
272 *
273 * Assumptions: if you pass 0 in for the second parameter, you are holding the
274 * socket lock for the socket the entry is attached to. If you pass 1 in for
275 * the second parameter, it is assumed that the entry is not on the filter's
276 * list and the socket lock is not held.
277 */
278
279 static void
280 sflt_detach_private(
281 struct socket_filter_entry *entry,
282 int unregistering)
283 {
284 struct socket *so = entry->sfe_socket;
285 struct socket_filter_entry **next_ptr;
286 int detached = 0;
287 int found = 0;
288
289 if (unregistering) {
290 socket_lock(entry->sfe_socket, 0);
291 }
292
293 /*
294 * Attempt to find the entry on the filter's list and
295 * remove it. This prevents a filter detaching at the
296 * same time from attempting to remove the same entry.
297 */
298 lck_mtx_lock(sock_filter_lock);
299 if (!unregistering) {
300 if ((entry->sfe_flags & SFEF_UNREGISTERING) != 0) {
301 /*
302 * Another thread is unregistering the filter, we need to
303 * avoid detaching the filter here so the socket won't go
304 * away.
305 */
306 lck_mtx_unlock(sock_filter_lock);
307 return;
308 }
309 for (next_ptr = &entry->sfe_filter->sf_entry_head; *next_ptr;
310 next_ptr = &((*next_ptr)->sfe_next_onfilter)) {
311 if (*next_ptr == entry) {
312 found = 1;
313 *next_ptr = entry->sfe_next_onfilter;
314 break;
315 }
316 }
317
318 if (!found && (entry->sfe_flags & SFEF_DETACHUSEZERO) == 0) {
319 lck_mtx_unlock(sock_filter_lock);
320 return;
321 }
322 }
323 else {
324 /*
325 * Clear the removing flag. We will perform the detach here or
326 * request a delayed deatch.
327 */
328 entry->sfe_flags &= ~SFEF_UNREGISTERING;
329 }
330
331 if (entry->sfe_socket->so_filteruse != 0) {
332 entry->sfe_flags |= SFEF_DETACHUSEZERO;
333 lck_mtx_unlock(sock_filter_lock);
334 return;
335 }
336 else {
337 /*
338 * Check if we are removing the last attached filter and
339 * the parent filter is being unregistered.
340 */
341 entry->sfe_filter->sf_usecount--;
342 if ((entry->sfe_filter->sf_usecount == 0) &&
343 (entry->sfe_filter->sf_flags & SFF_DETACHING) != 0)
344 detached = 1;
345 }
346 lck_mtx_unlock(sock_filter_lock);
347
348 /* Remove from the socket list */
349 for (next_ptr = &entry->sfe_socket->so_filt; *next_ptr;
350 next_ptr = &((*next_ptr)->sfe_next_onsocket)) {
351 if (*next_ptr == entry) {
352 *next_ptr = entry->sfe_next_onsocket;
353 break;
354 }
355 }
356
357 if (entry->sfe_filter->sf_filter.sf_detach)
358 entry->sfe_filter->sf_filter.sf_detach(entry->sfe_cookie, entry->sfe_socket);
359
360 if (detached && entry->sfe_filter->sf_filter.sf_unregistered) {
361 entry->sfe_filter->sf_filter.sf_unregistered(entry->sfe_filter->sf_filter.sf_handle);
362 FREE(entry->sfe_filter, M_IFADDR);
363 }
364
365 if (unregistering)
366 socket_unlock(entry->sfe_socket, 1);
367
368 FREE(entry, M_IFADDR);
369 }
370
371 errno_t
372 sflt_attach(
373 socket_t socket,
374 sflt_handle handle)
375 {
376 if (socket == NULL || handle == 0)
377 return EINVAL;
378
379 return sflt_attach_private(socket, NULL, handle, 0);
380 }
381
382 errno_t
383 sflt_detach(
384 socket_t socket,
385 sflt_handle handle)
386 {
387 struct socket_filter_entry *filter;
388 errno_t result = 0;
389
390 if (socket == NULL || handle == 0)
391 return EINVAL;
392
393 socket_lock(socket, 1);
394
395 for (filter = socket->so_filt; filter;
396 filter = filter->sfe_next_onsocket) {
397 if (filter->sfe_filter->sf_filter.sf_handle == handle)
398 break;
399 }
400
401 if (filter != NULL) {
402 sflt_detach_private(filter, 0);
403 }
404 else {
405 socket->so_filt = NULL;
406 result = ENOENT;
407 }
408
409 socket_unlock(socket, 1);
410
411 return result;
412 }
413
414
415 errno_t
416 sflt_register(
417 const struct sflt_filter *filter,
418 int domain,
419 int type,
420 int protocol)
421 {
422 struct socket_filter *sock_filt = NULL;
423 struct socket_filter *match = NULL;
424 int error = 0;
425 struct protosw *pr = pffindproto(domain, protocol, type);
426
427 if (pr == NULL) return ENOENT;
428
429 if (filter->sf_attach == NULL || filter->sf_detach == NULL) return EINVAL;
430 if (filter->sf_handle == 0) return EINVAL;
431 if (filter->sf_name == NULL) return EINVAL;
432
433 /* Allocate the socket filter */
434 MALLOC(sock_filt, struct socket_filter*, sizeof(*sock_filt), M_IFADDR, M_WAITOK);
435 if (sock_filt == NULL) {
436 return ENOBUFS;
437 }
438
439 bzero(sock_filt, sizeof(*sock_filt));
440 sock_filt->sf_filter = *filter;
441
442 lck_mtx_lock(sock_filter_lock);
443 /* Look for an existing entry */
444 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
445 if (match->sf_filter.sf_handle == sock_filt->sf_filter.sf_handle) {
446 break;
447 }
448 }
449
450 /* Add the entry only if there was no existing entry */
451 if (match == NULL) {
452 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
453 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
454 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt, sf_protosw_next);
455 sock_filt->sf_proto = pr;
456 }
457 }
458 lck_mtx_unlock(sock_filter_lock);
459
460 if (match != NULL) {
461 FREE(sock_filt, M_IFADDR);
462 return EEXIST;
463 }
464
465 return error;
466 }
467
468 errno_t
469 sflt_unregister(
470 sflt_handle handle)
471 {
472 struct socket_filter *filter;
473 struct socket_filter_entry *entry_head = NULL;
474 struct socket_filter_entry *next_entry = NULL;
475
476 /* Find the entry and remove it from the global and protosw lists */
477 lck_mtx_lock(sock_filter_lock);
478 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
479 if (filter->sf_filter.sf_handle == handle)
480 break;
481 }
482
483 if (filter) {
484 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
485 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
486 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, filter, sf_protosw_next);
487 }
488 entry_head = filter->sf_entry_head;
489 filter->sf_entry_head = NULL;
490 filter->sf_flags |= SFF_DETACHING;
491
492 for (next_entry = entry_head; next_entry;
493 next_entry = next_entry->sfe_next_onfilter) {
494 socket_lock(next_entry->sfe_socket, 1);
495 next_entry->sfe_flags |= SFEF_UNREGISTERING;
496 socket_unlock(next_entry->sfe_socket, 0); /* Radar 4201550: prevents the socket from being deleted while being unregistered */
497 }
498 }
499
500 lck_mtx_unlock(sock_filter_lock);
501
502 if (filter == NULL)
503 return ENOENT;
504
505 /* We need to detach the filter from any sockets it's attached to */
506 if (entry_head == 0) {
507 if (filter->sf_filter.sf_unregistered)
508 filter->sf_filter.sf_unregistered(filter->sf_filter.sf_handle);
509 } else {
510 while (entry_head) {
511 next_entry = entry_head->sfe_next_onfilter;
512 sflt_detach_private(entry_head, 1);
513 entry_head = next_entry;
514 }
515 }
516
517 return 0;
518 }
519
520 errno_t
521 sock_inject_data_in(
522 socket_t so,
523 const struct sockaddr* from,
524 mbuf_t data,
525 mbuf_t control,
526 sflt_data_flag_t flags)
527 {
528 int error = 0;
529 if (so == NULL || data == NULL) return EINVAL;
530
531 if (flags & sock_data_filt_flag_oob) {
532 return ENOTSUP;
533 }
534
535 socket_lock(so, 1);
536
537 if (from) {
538 if (sbappendaddr(&so->so_rcv, (struct sockaddr*)from, data,
539 control, NULL))
540 sorwakeup(so);
541 goto done;
542 }
543
544 if (control) {
545 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
546 sorwakeup(so);
547 goto done;
548 }
549
550 if (flags & sock_data_filt_flag_record) {
551 if (control || from) {
552 error = EINVAL;
553 goto done;
554 }
555 if (sbappendrecord(&so->so_rcv, (struct mbuf*)data))
556 sorwakeup(so);
557 goto done;
558 }
559
560 if (sbappend(&so->so_rcv, data))
561 sorwakeup(so);
562 done:
563 socket_unlock(so, 1);
564 return error;
565 }
566
567 errno_t
568 sock_inject_data_out(
569 socket_t so,
570 const struct sockaddr* to,
571 mbuf_t data,
572 mbuf_t control,
573 sflt_data_flag_t flags)
574 {
575 int sosendflags = 0;
576 if (flags & sock_data_filt_flag_oob) sosendflags = MSG_OOB;
577 return sosend(so, (const struct sockaddr*)to, NULL,
578 data, control, sosendflags);
579 }
580
581 sockopt_dir
582 sockopt_direction(
583 sockopt_t sopt)
584 {
585 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
586 }
587
588 int
589 sockopt_level(
590 sockopt_t sopt)
591 {
592 return sopt->sopt_level;
593 }
594
595 int
596 sockopt_name(
597 sockopt_t sopt)
598 {
599 return sopt->sopt_name;
600 }
601
602 size_t
603 sockopt_valsize(
604 sockopt_t sopt)
605 {
606 return sopt->sopt_valsize;
607 }
608
609 errno_t
610 sockopt_copyin(
611 sockopt_t sopt,
612 void *data,
613 size_t len)
614 {
615 return sooptcopyin(sopt, data, len, len);
616 }
617
618 errno_t
619 sockopt_copyout(
620 sockopt_t sopt,
621 void *data,
622 size_t len)
623 {
624 return sooptcopyout(sopt, data, len);
625 }