]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
9e3c9e59a3e57ebc3564903813e0aa4d42f17dda
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/kpi_socketfilter.h>
30
31 #include <sys/socket.h>
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/malloc.h>
35 #include <sys/protosw.h>
36 #include <kern/locks.h>
37 #include <net/kext_net.h>
38
39 static struct socket_filter_list sock_filter_head;
40 static lck_mtx_t *sock_filter_lock = 0;
41
42 static void sflt_detach_private(struct socket_filter_entry *entry, int unregistering);
43
44 __private_extern__ void
45 sflt_init(void)
46 {
47 lck_grp_attr_t *grp_attrib = 0;
48 lck_attr_t *lck_attrib = 0;
49 lck_grp_t *lck_group = 0;
50
51 TAILQ_INIT(&sock_filter_head);
52
53 /* Allocate a spin lock */
54 grp_attrib = lck_grp_attr_alloc_init();
55 lck_grp_attr_setdefault(grp_attrib);
56 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
57 lck_grp_attr_free(grp_attrib);
58 lck_attrib = lck_attr_alloc_init();
59 lck_attr_setdefault(lck_attrib);
60 lck_attr_setdebug(lck_attrib);
61 sock_filter_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
62 lck_grp_free(lck_group);
63 lck_attr_free(lck_attrib);
64 }
65
66 __private_extern__ void
67 sflt_initsock(
68 struct socket *so)
69 {
70 struct protosw *proto = so->so_proto;
71 struct socket_filter *filter;
72
73 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
74 lck_mtx_lock(sock_filter_lock);
75 TAILQ_FOREACH(filter, &proto->pr_filter_head, sf_protosw_next) {
76 sflt_attach_private(so, filter, 0, 0);
77 }
78 lck_mtx_unlock(sock_filter_lock);
79 }
80 }
81
82 __private_extern__ void
83 sflt_termsock(
84 struct socket *so)
85 {
86 struct socket_filter_entry *filter;
87 struct socket_filter_entry *filter_next;
88
89 for (filter = so->so_filt; filter; filter = filter_next) {
90 filter_next = filter->sfe_next_onsocket;
91 sflt_detach_private(filter, 0);
92 }
93 so->so_filt = NULL;
94 }
95
96 __private_extern__ void
97 sflt_use(
98 struct socket *so)
99 {
100 so->so_filteruse++;
101 }
102
103 __private_extern__ void
104 sflt_unuse(
105 struct socket *so)
106 {
107 so->so_filteruse--;
108 if (so->so_filteruse == 0) {
109 struct socket_filter_entry *filter;
110 struct socket_filter_entry *next_filter;
111 // search for detaching filters
112 for (filter = so->so_filt; filter; filter = next_filter) {
113 next_filter = filter->sfe_next_onsocket;
114
115 if (filter->sfe_flags & SFEF_DETACHUSEZERO) {
116 sflt_detach_private(filter, 0);
117 }
118 }
119 }
120 }
121
122 __private_extern__ void
123 sflt_notify(
124 struct socket *so,
125 sflt_event_t event,
126 void *param)
127 {
128 struct socket_filter_entry *filter;
129 int filtered = 0;
130
131 for (filter = so->so_filt; filter;
132 filter = filter->sfe_next_onsocket) {
133 if (filter->sfe_filter->sf_filter.sf_notify) {
134 if (filtered == 0) {
135 filtered = 1;
136 sflt_use(so);
137 socket_unlock(so, 0);
138 }
139 filter->sfe_filter->sf_filter.sf_notify(
140 filter->sfe_cookie, so, event, param);
141 }
142 }
143
144 if (filtered != 0) {
145 socket_lock(so, 0);
146 sflt_unuse(so);
147 }
148 }
149
150 __private_extern__ int
151 sflt_data_in(
152 struct socket *so,
153 const struct sockaddr *from,
154 mbuf_t *data,
155 mbuf_t *control,
156 sflt_data_flag_t flags,
157 int *filtered)
158 {
159 struct socket_filter_entry *filter;
160 int error = 0;
161 int filtered_storage;
162
163 if (filtered == NULL)
164 filtered = &filtered_storage;
165 *filtered = 0;
166
167 for (filter = so->so_filt; filter && (error == 0);
168 filter = filter->sfe_next_onsocket) {
169 if (filter->sfe_filter->sf_filter.sf_data_in) {
170 if (*filtered == 0) {
171 *filtered = 1;
172 sflt_use(so);
173 socket_unlock(so, 0);
174 }
175 error = filter->sfe_filter->sf_filter.sf_data_in(
176 filter->sfe_cookie, so, from, data, control, flags);
177 }
178 }
179
180 if (*filtered != 0) {
181 socket_lock(so, 0);
182 sflt_unuse(so);
183 }
184
185 return error;
186 }
187
188 /* sflt_attach_private
189 *
190 * Assumptions: If filter is not NULL, socket_filter_lock is held.
191 */
192
193 __private_extern__ int
194 sflt_attach_private(
195 struct socket *so,
196 struct socket_filter *filter,
197 sflt_handle handle,
198 int sock_locked)
199 {
200 struct socket_filter_entry *entry = NULL;
201 int didlock = 0;
202 int error = 0;
203
204 if (filter == NULL) {
205 /* Find the filter by the handle */
206 lck_mtx_lock(sock_filter_lock);
207 didlock = 1;
208
209 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
210 if (filter->sf_filter.sf_handle == handle)
211 break;
212 }
213 }
214
215 if (filter == NULL)
216 error = ENOENT;
217
218 if (error == 0) {
219 /* allocate the socket filter entry */
220 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK);
221 if (entry == NULL) {
222 error = ENOMEM;
223 }
224 }
225
226 if (error == 0) {
227 /* Initialize the socket filter entry and call the attach function */
228 entry->sfe_filter = filter;
229 entry->sfe_socket = so;
230 entry->sfe_cookie = NULL;
231 entry->sfe_flags = 0;
232 if (entry->sfe_filter->sf_filter.sf_attach) {
233 filter->sf_usecount++;
234
235 if (sock_locked)
236 socket_unlock(so, 0);
237 error = entry->sfe_filter->sf_filter.sf_attach(&entry->sfe_cookie, so);
238 if (sock_locked)
239 socket_lock(so, 0);
240
241 filter->sf_usecount--;
242
243 /* If the attach function returns an error, this filter is not attached */
244 if (error) {
245 FREE(entry, M_IFADDR);
246 entry = NULL;
247 }
248 }
249 }
250
251 if (error == 0) {
252 /* Put the entry in the socket list */
253 entry->sfe_next_onsocket = so->so_filt;
254 so->so_filt = entry;
255
256 /* Put the entry in the filter list */
257 entry->sfe_next_onfilter = filter->sf_entry_head;
258 filter->sf_entry_head = entry;
259
260 /* Incremenet the parent filter's usecount */
261 filter->sf_usecount++;
262 }
263
264 if (didlock) {
265 lck_mtx_unlock(sock_filter_lock);
266 }
267
268 return error;
269 }
270
271
272 /* sflt_detach_private
273 *
274 * Assumptions: if you pass 0 in for the second parameter, you are holding the
275 * socket lock for the socket the entry is attached to. If you pass 1 in for
276 * the second parameter, it is assumed that the entry is not on the filter's
277 * list and the socket lock is not held.
278 */
279
280 static void
281 sflt_detach_private(
282 struct socket_filter_entry *entry,
283 int unregistering)
284 {
285 struct socket *so = entry->sfe_socket;
286 struct socket_filter_entry **next_ptr;
287 int detached = 0;
288 int found = 0;
289
290 if (unregistering) {
291 socket_lock(entry->sfe_socket, 0);
292 }
293
294 /*
295 * Attempt to find the entry on the filter's list and
296 * remove it. This prevents a filter detaching at the
297 * same time from attempting to remove the same entry.
298 */
299 lck_mtx_lock(sock_filter_lock);
300 if (!unregistering) {
301 if ((entry->sfe_flags & SFEF_UNREGISTERING) != 0) {
302 /*
303 * Another thread is unregistering the filter, we need to
304 * avoid detaching the filter here so the socket won't go
305 * away.
306 */
307 lck_mtx_unlock(sock_filter_lock);
308 return;
309 }
310 for (next_ptr = &entry->sfe_filter->sf_entry_head; *next_ptr;
311 next_ptr = &((*next_ptr)->sfe_next_onfilter)) {
312 if (*next_ptr == entry) {
313 found = 1;
314 *next_ptr = entry->sfe_next_onfilter;
315 break;
316 }
317 }
318
319 if (!found && (entry->sfe_flags & SFEF_DETACHUSEZERO) == 0) {
320 lck_mtx_unlock(sock_filter_lock);
321 return;
322 }
323 }
324 else {
325 /*
326 * Clear the removing flag. We will perform the detach here or
327 * request a delayed deatch.
328 */
329 entry->sfe_flags &= ~SFEF_UNREGISTERING;
330 }
331
332 if (entry->sfe_socket->so_filteruse != 0) {
333 entry->sfe_flags |= SFEF_DETACHUSEZERO;
334 lck_mtx_unlock(sock_filter_lock);
335 return;
336 }
337 else {
338 /*
339 * Check if we are removing the last attached filter and
340 * the parent filter is being unregistered.
341 */
342 entry->sfe_filter->sf_usecount--;
343 if ((entry->sfe_filter->sf_usecount == 0) &&
344 (entry->sfe_filter->sf_flags & SFF_DETACHING) != 0)
345 detached = 1;
346 }
347 lck_mtx_unlock(sock_filter_lock);
348
349 /* Remove from the socket list */
350 for (next_ptr = &entry->sfe_socket->so_filt; *next_ptr;
351 next_ptr = &((*next_ptr)->sfe_next_onsocket)) {
352 if (*next_ptr == entry) {
353 *next_ptr = entry->sfe_next_onsocket;
354 break;
355 }
356 }
357
358 if (entry->sfe_filter->sf_filter.sf_detach)
359 entry->sfe_filter->sf_filter.sf_detach(entry->sfe_cookie, entry->sfe_socket);
360
361 if (detached && entry->sfe_filter->sf_filter.sf_unregistered) {
362 entry->sfe_filter->sf_filter.sf_unregistered(entry->sfe_filter->sf_filter.sf_handle);
363 FREE(entry->sfe_filter, M_IFADDR);
364 }
365
366 if (unregistering)
367 socket_unlock(entry->sfe_socket, 1);
368
369 FREE(entry, M_IFADDR);
370 }
371
372 errno_t
373 sflt_attach(
374 socket_t socket,
375 sflt_handle handle)
376 {
377 if (socket == NULL || handle == 0)
378 return EINVAL;
379
380 return sflt_attach_private(socket, NULL, handle, 0);
381 }
382
383 errno_t
384 sflt_detach(
385 socket_t socket,
386 sflt_handle handle)
387 {
388 struct socket_filter_entry *filter;
389 errno_t result = 0;
390
391 if (socket == NULL || handle == 0)
392 return EINVAL;
393
394 socket_lock(socket, 1);
395
396 for (filter = socket->so_filt; filter;
397 filter = filter->sfe_next_onsocket) {
398 if (filter->sfe_filter->sf_filter.sf_handle == handle)
399 break;
400 }
401
402 if (filter != NULL) {
403 sflt_detach_private(filter, 0);
404 }
405 else {
406 socket->so_filt = NULL;
407 result = ENOENT;
408 }
409
410 socket_unlock(socket, 1);
411
412 return result;
413 }
414
415
416 errno_t
417 sflt_register(
418 const struct sflt_filter *filter,
419 int domain,
420 int type,
421 int protocol)
422 {
423 struct socket_filter *sock_filt = NULL;
424 struct socket_filter *match = NULL;
425 int error = 0;
426 struct protosw *pr = pffindproto(domain, protocol, type);
427
428 if (pr == NULL) return ENOENT;
429
430 if (filter->sf_attach == NULL || filter->sf_detach == NULL) return EINVAL;
431 if (filter->sf_handle == 0) return EINVAL;
432 if (filter->sf_name == NULL) return EINVAL;
433
434 /* Allocate the socket filter */
435 MALLOC(sock_filt, struct socket_filter*, sizeof(*sock_filt), M_IFADDR, M_WAITOK);
436 if (sock_filt == NULL) {
437 return ENOBUFS;
438 }
439
440 bzero(sock_filt, sizeof(*sock_filt));
441 sock_filt->sf_filter = *filter;
442
443 lck_mtx_lock(sock_filter_lock);
444 /* Look for an existing entry */
445 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
446 if (match->sf_filter.sf_handle == sock_filt->sf_filter.sf_handle) {
447 break;
448 }
449 }
450
451 /* Add the entry only if there was no existing entry */
452 if (match == NULL) {
453 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
454 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
455 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt, sf_protosw_next);
456 sock_filt->sf_proto = pr;
457 }
458 }
459 lck_mtx_unlock(sock_filter_lock);
460
461 if (match != NULL) {
462 FREE(sock_filt, M_IFADDR);
463 return EEXIST;
464 }
465
466 return error;
467 }
468
469 errno_t
470 sflt_unregister(
471 sflt_handle handle)
472 {
473 struct socket_filter *filter;
474 struct socket_filter_entry *entry_head = NULL;
475 struct socket_filter_entry *next_entry = NULL;
476
477 /* Find the entry and remove it from the global and protosw lists */
478 lck_mtx_lock(sock_filter_lock);
479 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
480 if (filter->sf_filter.sf_handle == handle)
481 break;
482 }
483
484 if (filter) {
485 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
486 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
487 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, filter, sf_protosw_next);
488 }
489 entry_head = filter->sf_entry_head;
490 filter->sf_entry_head = NULL;
491 filter->sf_flags |= SFF_DETACHING;
492
493 for (next_entry = entry_head; next_entry;
494 next_entry = next_entry->sfe_next_onfilter) {
495 socket_lock(next_entry->sfe_socket, 1);
496 next_entry->sfe_flags |= SFEF_UNREGISTERING;
497 socket_unlock(next_entry->sfe_socket, 0); /* Radar 4201550: prevents the socket from being deleted while being unregistered */
498 }
499 }
500
501 lck_mtx_unlock(sock_filter_lock);
502
503 if (filter == NULL)
504 return ENOENT;
505
506 /* We need to detach the filter from any sockets it's attached to */
507 if (entry_head == 0) {
508 if (filter->sf_filter.sf_unregistered)
509 filter->sf_filter.sf_unregistered(filter->sf_filter.sf_handle);
510 } else {
511 while (entry_head) {
512 next_entry = entry_head->sfe_next_onfilter;
513 sflt_detach_private(entry_head, 1);
514 entry_head = next_entry;
515 }
516 }
517
518 return 0;
519 }
520
521 errno_t
522 sock_inject_data_in(
523 socket_t so,
524 const struct sockaddr* from,
525 mbuf_t data,
526 mbuf_t control,
527 sflt_data_flag_t flags)
528 {
529 int error = 0;
530 if (so == NULL || data == NULL) return EINVAL;
531
532 if (flags & sock_data_filt_flag_oob) {
533 return ENOTSUP;
534 }
535
536 socket_lock(so, 1);
537
538 if (from) {
539 if (sbappendaddr(&so->so_rcv, (struct sockaddr*)from, data,
540 control, NULL))
541 sorwakeup(so);
542 goto done;
543 }
544
545 if (control) {
546 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
547 sorwakeup(so);
548 goto done;
549 }
550
551 if (flags & sock_data_filt_flag_record) {
552 if (control || from) {
553 error = EINVAL;
554 goto done;
555 }
556 if (sbappendrecord(&so->so_rcv, (struct mbuf*)data))
557 sorwakeup(so);
558 goto done;
559 }
560
561 if (sbappend(&so->so_rcv, data))
562 sorwakeup(so);
563 done:
564 socket_unlock(so, 1);
565 return error;
566 }
567
568 errno_t
569 sock_inject_data_out(
570 socket_t so,
571 const struct sockaddr* to,
572 mbuf_t data,
573 mbuf_t control,
574 sflt_data_flag_t flags)
575 {
576 int sosendflags = 0;
577 if (flags & sock_data_filt_flag_oob) sosendflags = MSG_OOB;
578 return sosend(so, (const struct sockaddr*)to, NULL,
579 data, control, sosendflags);
580 }
581
582 sockopt_dir
583 sockopt_direction(
584 sockopt_t sopt)
585 {
586 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
587 }
588
589 int
590 sockopt_level(
591 sockopt_t sopt)
592 {
593 return sopt->sopt_level;
594 }
595
596 int
597 sockopt_name(
598 sockopt_t sopt)
599 {
600 return sopt->sopt_name;
601 }
602
603 size_t
604 sockopt_valsize(
605 sockopt_t sopt)
606 {
607 return sopt->sopt_valsize;
608 }
609
610 errno_t
611 sockopt_copyin(
612 sockopt_t sopt,
613 void *data,
614 size_t len)
615 {
616 return sooptcopyin(sopt, data, len, len);
617 }
618
619 errno_t
620 sockopt_copyout(
621 sockopt_t sopt,
622 void *data,
623 size_t len)
624 {
625 return sooptcopyout(sopt, data, len);
626 }