]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socketfilter.c
10b73c86d767cdec1c65ca2303453f8849f8cc60
[apple/xnu.git] / bsd / kern / kpi_socketfilter.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/kpi_socketfilter.h>
24
25 #include <sys/socket.h>
26 #include <sys/param.h>
27 #include <sys/errno.h>
28 #include <sys/malloc.h>
29 #include <sys/protosw.h>
30 #include <kern/locks.h>
31 #include <net/kext_net.h>
32
33 static struct socket_filter_list sock_filter_head;
34 static lck_mtx_t *sock_filter_lock = 0;
35
36 static void sflt_detach_private(struct socket_filter_entry *entry, int unregistering);
37
38 __private_extern__ void
39 sflt_init(void)
40 {
41 lck_grp_attr_t *grp_attrib = 0;
42 lck_attr_t *lck_attrib = 0;
43 lck_grp_t *lck_group = 0;
44
45 TAILQ_INIT(&sock_filter_head);
46
47 /* Allocate a spin lock */
48 grp_attrib = lck_grp_attr_alloc_init();
49 lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
50 lck_grp_attr_free(grp_attrib);
51 lck_attrib = lck_attr_alloc_init();
52 sock_filter_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
53 lck_grp_free(lck_group);
54 lck_attr_free(lck_attrib);
55 }
56
57 __private_extern__ void
58 sflt_initsock(
59 struct socket *so)
60 {
61 struct protosw *proto = so->so_proto;
62 struct socket_filter *filter;
63
64 if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
65 lck_mtx_lock(sock_filter_lock);
66 TAILQ_FOREACH(filter, &proto->pr_filter_head, sf_protosw_next) {
67 sflt_attach_private(so, filter, 0, 0);
68 }
69 lck_mtx_unlock(sock_filter_lock);
70 }
71 }
72
73 __private_extern__ void
74 sflt_termsock(
75 struct socket *so)
76 {
77 struct socket_filter_entry *filter;
78 struct socket_filter_entry *filter_next;
79
80 for (filter = so->so_filt; filter; filter = filter_next) {
81 filter_next = filter->sfe_next_onsocket;
82 sflt_detach_private(filter, 0);
83 }
84 so->so_filt = NULL;
85 }
86
87 __private_extern__ void
88 sflt_use(
89 struct socket *so)
90 {
91 so->so_filteruse++;
92 }
93
94 __private_extern__ void
95 sflt_unuse(
96 struct socket *so)
97 {
98 so->so_filteruse--;
99 if (so->so_filteruse == 0) {
100 struct socket_filter_entry *filter;
101 struct socket_filter_entry *next_filter;
102 // search for detaching filters
103 for (filter = so->so_filt; filter; filter = next_filter) {
104 next_filter = filter->sfe_next_onsocket;
105
106 if (filter->sfe_flags & SFEF_DETACHUSEZERO) {
107 sflt_detach_private(filter, 0);
108 }
109 }
110 }
111 }
112
113 __private_extern__ void
114 sflt_notify(
115 struct socket *so,
116 sflt_event_t event,
117 void *param)
118 {
119 struct socket_filter_entry *filter;
120 int filtered = 0;
121
122 for (filter = so->so_filt; filter;
123 filter = filter->sfe_next_onsocket) {
124 if (filter->sfe_filter->sf_filter.sf_notify) {
125 if (filtered == 0) {
126 filtered = 1;
127 sflt_use(so);
128 socket_unlock(so, 0);
129 }
130 filter->sfe_filter->sf_filter.sf_notify(
131 filter->sfe_cookie, so, event, param);
132 }
133 }
134
135 if (filtered != 0) {
136 socket_lock(so, 0);
137 sflt_unuse(so);
138 }
139 }
140
141 __private_extern__ int
142 sflt_data_in(
143 struct socket *so,
144 const struct sockaddr *from,
145 mbuf_t *data,
146 mbuf_t *control,
147 sflt_data_flag_t flags,
148 int *filtered)
149 {
150 struct socket_filter_entry *filter;
151 int error = 0;
152 int filtered_storage;
153
154 if (filtered == NULL)
155 filtered = &filtered_storage;
156 *filtered = 0;
157
158 for (filter = so->so_filt; filter && (error == 0);
159 filter = filter->sfe_next_onsocket) {
160 if (filter->sfe_filter->sf_filter.sf_data_in) {
161 if (*filtered == 0) {
162 *filtered = 1;
163 sflt_use(so);
164 socket_unlock(so, 0);
165 }
166 error = filter->sfe_filter->sf_filter.sf_data_in(
167 filter->sfe_cookie, so, from, data, control, flags);
168 }
169 }
170
171 if (*filtered != 0) {
172 socket_lock(so, 0);
173 sflt_unuse(so);
174 }
175
176 return error;
177 }
178
179 /* sflt_attach_private
180 *
181 * Assumptions: If filter is not NULL, socket_filter_lock is held.
182 */
183
184 __private_extern__ int
185 sflt_attach_private(
186 struct socket *so,
187 struct socket_filter *filter,
188 sflt_handle handle,
189 int sock_locked)
190 {
191 struct socket_filter_entry *entry = NULL;
192 int didlock = 0;
193 int error = 0;
194
195 if (filter == NULL) {
196 /* Find the filter by the handle */
197 lck_mtx_lock(sock_filter_lock);
198 didlock = 1;
199
200 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
201 if (filter->sf_filter.sf_handle == handle)
202 break;
203 }
204 }
205
206 if (filter == NULL)
207 error = ENOENT;
208
209 if (error == 0) {
210 /* allocate the socket filter entry */
211 MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK);
212 if (entry == NULL) {
213 error = ENOMEM;
214 }
215 }
216
217 if (error == 0) {
218 /* Initialize the socket filter entry and call the attach function */
219 entry->sfe_filter = filter;
220 entry->sfe_socket = so;
221 entry->sfe_cookie = NULL;
222 entry->sfe_flags = 0;
223 if (entry->sfe_filter->sf_filter.sf_attach) {
224 filter->sf_usecount++;
225
226 if (sock_locked)
227 socket_unlock(so, 0);
228 error = entry->sfe_filter->sf_filter.sf_attach(&entry->sfe_cookie, so);
229 if (sock_locked)
230 socket_lock(so, 0);
231
232 filter->sf_usecount--;
233
234 /* If the attach function returns an error, this filter is not attached */
235 if (error) {
236 FREE(entry, M_IFADDR);
237 entry = NULL;
238 }
239 }
240 }
241
242 if (error == 0) {
243 /* Put the entry in the socket list */
244 entry->sfe_next_onsocket = so->so_filt;
245 so->so_filt = entry;
246
247 /* Put the entry in the filter list */
248 entry->sfe_next_onfilter = filter->sf_entry_head;
249 filter->sf_entry_head = entry;
250
251 /* Incremenet the parent filter's usecount */
252 filter->sf_usecount++;
253 }
254
255 if (didlock) {
256 lck_mtx_unlock(sock_filter_lock);
257 }
258
259 return error;
260 }
261
262
263 /* sflt_detach_private
264 *
265 * Assumptions: if you pass 0 in for the second parameter, you are holding the
266 * socket lock for the socket the entry is attached to. If you pass 1 in for
267 * the second parameter, it is assumed that the entry is not on the filter's
268 * list and the socket lock is not held.
269 */
270
271 static void
272 sflt_detach_private(
273 struct socket_filter_entry *entry,
274 int unregistering)
275 {
276 struct socket *so = entry->sfe_socket;
277 struct socket_filter_entry **next_ptr;
278 int detached = 0;
279 int found = 0;
280
281 if (unregistering) {
282 socket_lock(entry->sfe_socket, 0);
283 }
284
285 /*
286 * Attempt to find the entry on the filter's list and
287 * remove it. This prevents a filter detaching at the
288 * same time from attempting to remove the same entry.
289 */
290 lck_mtx_lock(sock_filter_lock);
291 if (!unregistering) {
292 if ((entry->sfe_flags & SFEF_UNREGISTERING) != 0) {
293 /*
294 * Another thread is unregistering the filter, we need to
295 * avoid detaching the filter here so the socket won't go
296 * away.
297 */
298 lck_mtx_unlock(sock_filter_lock);
299 return;
300 }
301 for (next_ptr = &entry->sfe_filter->sf_entry_head; *next_ptr;
302 next_ptr = &((*next_ptr)->sfe_next_onfilter)) {
303 if (*next_ptr == entry) {
304 found = 1;
305 *next_ptr = entry->sfe_next_onfilter;
306 break;
307 }
308 }
309
310 if (!found && (entry->sfe_flags & SFEF_DETACHUSEZERO) == 0) {
311 lck_mtx_unlock(sock_filter_lock);
312 return;
313 }
314 }
315 else {
316 /*
317 * Clear the removing flag. We will perform the detach here or
318 * request a delayed deatch.
319 */
320 entry->sfe_flags &= ~SFEF_UNREGISTERING;
321 }
322
323 if (entry->sfe_socket->so_filteruse != 0) {
324 entry->sfe_flags |= SFEF_DETACHUSEZERO;
325 lck_mtx_unlock(sock_filter_lock);
326 return;
327 }
328 else {
329 /*
330 * Check if we are removing the last attached filter and
331 * the parent filter is being unregistered.
332 */
333 entry->sfe_filter->sf_usecount--;
334 if ((entry->sfe_filter->sf_usecount == 0) &&
335 (entry->sfe_filter->sf_flags & SFF_DETACHING) != 0)
336 detached = 1;
337 }
338 lck_mtx_unlock(sock_filter_lock);
339
340 /* Remove from the socket list */
341 for (next_ptr = &entry->sfe_socket->so_filt; *next_ptr;
342 next_ptr = &((*next_ptr)->sfe_next_onsocket)) {
343 if (*next_ptr == entry) {
344 *next_ptr = entry->sfe_next_onsocket;
345 break;
346 }
347 }
348
349 if (entry->sfe_filter->sf_filter.sf_detach)
350 entry->sfe_filter->sf_filter.sf_detach(entry->sfe_cookie, entry->sfe_socket);
351
352 if (detached && entry->sfe_filter->sf_filter.sf_unregistered) {
353 entry->sfe_filter->sf_filter.sf_unregistered(entry->sfe_filter->sf_filter.sf_handle);
354 FREE(entry->sfe_filter, M_IFADDR);
355 }
356
357 if (unregistering)
358 socket_unlock(entry->sfe_socket, 1);
359
360 FREE(entry, M_IFADDR);
361 }
362
363 errno_t
364 sflt_attach(
365 socket_t socket,
366 sflt_handle handle)
367 {
368 if (socket == NULL || handle == 0)
369 return EINVAL;
370
371 return sflt_attach_private(socket, NULL, handle, 0);
372 }
373
374 errno_t
375 sflt_detach(
376 socket_t socket,
377 sflt_handle handle)
378 {
379 struct socket_filter_entry *filter;
380 errno_t result = 0;
381
382 if (socket == NULL || handle == 0)
383 return EINVAL;
384
385 socket_lock(socket, 1);
386
387 for (filter = socket->so_filt; filter;
388 filter = filter->sfe_next_onsocket) {
389 if (filter->sfe_filter->sf_filter.sf_handle == handle)
390 break;
391 }
392
393 if (filter != NULL) {
394 sflt_detach_private(filter, 0);
395 }
396 else {
397 socket->so_filt = NULL;
398 result = ENOENT;
399 }
400
401 socket_unlock(socket, 1);
402
403 return result;
404 }
405
406
407 errno_t
408 sflt_register(
409 const struct sflt_filter *filter,
410 int domain,
411 int type,
412 int protocol)
413 {
414 struct socket_filter *sock_filt = NULL;
415 struct socket_filter *match = NULL;
416 int error = 0;
417 struct protosw *pr = pffindproto(domain, protocol, type);
418
419 if (pr == NULL) return ENOENT;
420
421 if (filter->sf_attach == NULL || filter->sf_detach == NULL) return EINVAL;
422 if (filter->sf_handle == 0) return EINVAL;
423 if (filter->sf_name == NULL) return EINVAL;
424
425 /* Allocate the socket filter */
426 MALLOC(sock_filt, struct socket_filter*, sizeof(*sock_filt), M_IFADDR, M_WAITOK);
427 if (sock_filt == NULL) {
428 return ENOBUFS;
429 }
430
431 bzero(sock_filt, sizeof(*sock_filt));
432 sock_filt->sf_filter = *filter;
433
434 lck_mtx_lock(sock_filter_lock);
435 /* Look for an existing entry */
436 TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
437 if (match->sf_filter.sf_handle == sock_filt->sf_filter.sf_handle) {
438 break;
439 }
440 }
441
442 /* Add the entry only if there was no existing entry */
443 if (match == NULL) {
444 TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next);
445 if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
446 TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt, sf_protosw_next);
447 sock_filt->sf_proto = pr;
448 }
449 }
450 lck_mtx_unlock(sock_filter_lock);
451
452 if (match != NULL) {
453 FREE(sock_filt, M_IFADDR);
454 return EEXIST;
455 }
456
457 return error;
458 }
459
460 errno_t
461 sflt_unregister(
462 sflt_handle handle)
463 {
464 struct socket_filter *filter;
465 struct socket_filter_entry *entry_head = NULL;
466 struct socket_filter_entry *next_entry = NULL;
467
468 /* Find the entry and remove it from the global and protosw lists */
469 lck_mtx_lock(sock_filter_lock);
470 TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
471 if (filter->sf_filter.sf_handle == handle)
472 break;
473 }
474
475 if (filter) {
476 TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next);
477 if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) {
478 TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, filter, sf_protosw_next);
479 }
480 entry_head = filter->sf_entry_head;
481 filter->sf_entry_head = NULL;
482 filter->sf_flags |= SFF_DETACHING;
483
484 for (next_entry = entry_head; next_entry;
485 next_entry = next_entry->sfe_next_onfilter) {
486 socket_lock(next_entry->sfe_socket, 1);
487 next_entry->sfe_flags |= SFEF_UNREGISTERING;
488 socket_unlock(next_entry->sfe_socket, 0); /* Radar 4201550: prevents the socket from being deleted while being unregistered */
489 }
490 }
491
492 lck_mtx_unlock(sock_filter_lock);
493
494 if (filter == NULL)
495 return ENOENT;
496
497 /* We need to detach the filter from any sockets it's attached to */
498 if (entry_head == 0) {
499 if (filter->sf_filter.sf_unregistered)
500 filter->sf_filter.sf_unregistered(filter->sf_filter.sf_handle);
501 } else {
502 while (entry_head) {
503 next_entry = entry_head->sfe_next_onfilter;
504 sflt_detach_private(entry_head, 1);
505 entry_head = next_entry;
506 }
507 }
508
509 return 0;
510 }
511
512 errno_t
513 sock_inject_data_in(
514 socket_t so,
515 const struct sockaddr* from,
516 mbuf_t data,
517 mbuf_t control,
518 sflt_data_flag_t flags)
519 {
520 int error = 0;
521 if (so == NULL || data == NULL) return EINVAL;
522
523 if (flags & sock_data_filt_flag_oob) {
524 return ENOTSUP;
525 }
526
527 socket_lock(so, 1);
528
529 if (from) {
530 if (sbappendaddr(&so->so_rcv, (struct sockaddr*)from, data,
531 control, NULL))
532 sorwakeup(so);
533 goto done;
534 }
535
536 if (control) {
537 if (sbappendcontrol(&so->so_rcv, data, control, NULL))
538 sorwakeup(so);
539 goto done;
540 }
541
542 if (flags & sock_data_filt_flag_record) {
543 if (control || from) {
544 error = EINVAL;
545 goto done;
546 }
547 if (sbappendrecord(&so->so_rcv, (struct mbuf*)data))
548 sorwakeup(so);
549 goto done;
550 }
551
552 if (sbappend(&so->so_rcv, data))
553 sorwakeup(so);
554 done:
555 socket_unlock(so, 1);
556 return error;
557 }
558
559 errno_t
560 sock_inject_data_out(
561 socket_t so,
562 const struct sockaddr* to,
563 mbuf_t data,
564 mbuf_t control,
565 sflt_data_flag_t flags)
566 {
567 int sosendflags = 0;
568 if (flags & sock_data_filt_flag_oob) sosendflags = MSG_OOB;
569 return sosend(so, (const struct sockaddr*)to, NULL,
570 data, control, sosendflags);
571 }
572
573 sockopt_dir
574 sockopt_direction(
575 sockopt_t sopt)
576 {
577 return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set;
578 }
579
580 int
581 sockopt_level(
582 sockopt_t sopt)
583 {
584 return sopt->sopt_level;
585 }
586
587 int
588 sockopt_name(
589 sockopt_t sopt)
590 {
591 return sopt->sopt_name;
592 }
593
594 size_t
595 sockopt_valsize(
596 sockopt_t sopt)
597 {
598 return sopt->sopt_valsize;
599 }
600
601 errno_t
602 sockopt_copyin(
603 sockopt_t sopt,
604 void *data,
605 size_t len)
606 {
607 return sooptcopyin(sopt, data, len, len);
608 }
609
610 errno_t
611 sockopt_copyout(
612 sockopt_t sopt,
613 void *data,
614 size_t len)
615 {
616 return sooptcopyout(sopt, data, len);
617 }