2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/kpi_socketfilter.h>
25 #include <sys/socket.h>
26 #include <sys/param.h>
27 #include <sys/errno.h>
28 #include <sys/malloc.h>
29 #include <sys/protosw.h>
30 #include <kern/locks.h>
31 #include <net/kext_net.h>
33 static struct socket_filter_list sock_filter_head
;
34 static lck_mtx_t
*sock_filter_lock
= 0;
36 static void sflt_detach_private(struct socket_filter_entry
*entry
, int unregistering
);
38 __private_extern__
void
41 lck_grp_attr_t
*grp_attrib
= 0;
42 lck_attr_t
*lck_attrib
= 0;
43 lck_grp_t
*lck_group
= 0;
45 TAILQ_INIT(&sock_filter_head
);
47 /* Allocate a spin lock */
48 grp_attrib
= lck_grp_attr_alloc_init();
49 lck_group
= lck_grp_alloc_init("socket filter lock", grp_attrib
);
50 lck_grp_attr_free(grp_attrib
);
51 lck_attrib
= lck_attr_alloc_init();
52 sock_filter_lock
= lck_mtx_alloc_init(lck_group
, lck_attrib
);
53 lck_grp_free(lck_group
);
54 lck_attr_free(lck_attrib
);
57 __private_extern__
void
61 struct protosw
*proto
= so
->so_proto
;
62 struct socket_filter
*filter
;
64 if (TAILQ_FIRST(&proto
->pr_filter_head
) != NULL
) {
65 lck_mtx_lock(sock_filter_lock
);
66 TAILQ_FOREACH(filter
, &proto
->pr_filter_head
, sf_protosw_next
) {
67 sflt_attach_private(so
, filter
, 0, 0);
69 lck_mtx_unlock(sock_filter_lock
);
73 __private_extern__
void
77 struct socket_filter_entry
*filter
;
78 struct socket_filter_entry
*filter_next
;
80 for (filter
= so
->so_filt
; filter
; filter
= filter_next
) {
81 filter_next
= filter
->sfe_next_onsocket
;
82 sflt_detach_private(filter
, 0);
87 __private_extern__
void
94 __private_extern__
void
99 if (so
->so_filteruse
== 0) {
100 struct socket_filter_entry
*filter
;
101 struct socket_filter_entry
*next_filter
;
102 // search for detaching filters
103 for (filter
= so
->so_filt
; filter
; filter
= next_filter
) {
104 next_filter
= filter
->sfe_next_onsocket
;
106 if (filter
->sfe_flags
& SFEF_DETACHUSEZERO
) {
107 sflt_detach_private(filter
, 0);
113 __private_extern__
void
119 struct socket_filter_entry
*filter
;
122 for (filter
= so
->so_filt
; filter
;
123 filter
= filter
->sfe_next_onsocket
) {
124 if (filter
->sfe_filter
->sf_filter
.sf_notify
) {
128 socket_unlock(so
, 0);
130 filter
->sfe_filter
->sf_filter
.sf_notify(
131 filter
->sfe_cookie
, so
, event
, param
);
141 __private_extern__
int
144 const struct sockaddr
*from
,
147 sflt_data_flag_t flags
,
150 struct socket_filter_entry
*filter
;
152 int filtered_storage
;
154 if (filtered
== NULL
)
155 filtered
= &filtered_storage
;
158 for (filter
= so
->so_filt
; filter
&& (error
== 0);
159 filter
= filter
->sfe_next_onsocket
) {
160 if (filter
->sfe_filter
->sf_filter
.sf_data_in
) {
161 if (*filtered
== 0) {
164 socket_unlock(so
, 0);
166 error
= filter
->sfe_filter
->sf_filter
.sf_data_in(
167 filter
->sfe_cookie
, so
, from
, data
, control
, flags
);
171 if (*filtered
!= 0) {
179 /* sflt_attach_private
181 * Assumptions: If filter is not NULL, socket_filter_lock is held.
184 __private_extern__
int
187 struct socket_filter
*filter
,
191 struct socket_filter_entry
*entry
= NULL
;
195 if (filter
== NULL
) {
196 /* Find the filter by the handle */
197 lck_mtx_lock(sock_filter_lock
);
200 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
201 if (filter
->sf_filter
.sf_handle
== handle
)
210 /* allocate the socket filter entry */
211 MALLOC(entry
, struct socket_filter_entry
*, sizeof(*entry
), M_IFADDR
, M_WAITOK
);
218 /* Initialize the socket filter entry and call the attach function */
219 entry
->sfe_filter
= filter
;
220 entry
->sfe_socket
= so
;
221 entry
->sfe_cookie
= NULL
;
222 entry
->sfe_flags
= 0;
223 if (entry
->sfe_filter
->sf_filter
.sf_attach
) {
224 filter
->sf_usecount
++;
227 socket_unlock(so
, 0);
228 error
= entry
->sfe_filter
->sf_filter
.sf_attach(&entry
->sfe_cookie
, so
);
232 filter
->sf_usecount
--;
234 /* If the attach function returns an error, this filter is not attached */
236 FREE(entry
, M_IFADDR
);
243 /* Put the entry in the socket list */
244 entry
->sfe_next_onsocket
= so
->so_filt
;
247 /* Put the entry in the filter list */
248 entry
->sfe_next_onfilter
= filter
->sf_entry_head
;
249 filter
->sf_entry_head
= entry
;
251 /* Incremenet the parent filter's usecount */
252 filter
->sf_usecount
++;
256 lck_mtx_unlock(sock_filter_lock
);
263 /* sflt_detach_private
265 * Assumptions: if you pass 0 in for the second parameter, you are holding the
266 * socket lock for the socket the entry is attached to. If you pass 1 in for
267 * the second parameter, it is assumed that the entry is not on the filter's
268 * list and the socket lock is not held.
273 struct socket_filter_entry
*entry
,
276 struct socket
*so
= entry
->sfe_socket
;
277 struct socket_filter_entry
**next_ptr
;
282 socket_lock(entry
->sfe_socket
, 0);
286 * Attempt to find the entry on the filter's list and
287 * remove it. This prevents a filter detaching at the
288 * same time from attempting to remove the same entry.
290 lck_mtx_lock(sock_filter_lock
);
291 if (!unregistering
) {
292 if ((entry
->sfe_flags
& SFEF_UNREGISTERING
) != 0) {
294 * Another thread is unregistering the filter, we need to
295 * avoid detaching the filter here so the socket won't go
298 lck_mtx_unlock(sock_filter_lock
);
301 for (next_ptr
= &entry
->sfe_filter
->sf_entry_head
; *next_ptr
;
302 next_ptr
= &((*next_ptr
)->sfe_next_onfilter
)) {
303 if (*next_ptr
== entry
) {
305 *next_ptr
= entry
->sfe_next_onfilter
;
310 if (!found
&& (entry
->sfe_flags
& SFEF_DETACHUSEZERO
) == 0) {
311 lck_mtx_unlock(sock_filter_lock
);
317 * Clear the removing flag. We will perform the detach here or
318 * request a delayed deatch.
320 entry
->sfe_flags
&= ~SFEF_UNREGISTERING
;
323 if (entry
->sfe_socket
->so_filteruse
!= 0) {
324 entry
->sfe_flags
|= SFEF_DETACHUSEZERO
;
325 lck_mtx_unlock(sock_filter_lock
);
330 * Check if we are removing the last attached filter and
331 * the parent filter is being unregistered.
333 entry
->sfe_filter
->sf_usecount
--;
334 if ((entry
->sfe_filter
->sf_usecount
== 0) &&
335 (entry
->sfe_filter
->sf_flags
& SFF_DETACHING
) != 0)
338 lck_mtx_unlock(sock_filter_lock
);
340 /* Remove from the socket list */
341 for (next_ptr
= &entry
->sfe_socket
->so_filt
; *next_ptr
;
342 next_ptr
= &((*next_ptr
)->sfe_next_onsocket
)) {
343 if (*next_ptr
== entry
) {
344 *next_ptr
= entry
->sfe_next_onsocket
;
349 if (entry
->sfe_filter
->sf_filter
.sf_detach
)
350 entry
->sfe_filter
->sf_filter
.sf_detach(entry
->sfe_cookie
, entry
->sfe_socket
);
352 if (detached
&& entry
->sfe_filter
->sf_filter
.sf_unregistered
) {
353 entry
->sfe_filter
->sf_filter
.sf_unregistered(entry
->sfe_filter
->sf_filter
.sf_handle
);
354 FREE(entry
->sfe_filter
, M_IFADDR
);
358 socket_unlock(entry
->sfe_socket
, 1);
360 FREE(entry
, M_IFADDR
);
368 if (socket
== NULL
|| handle
== 0)
371 return sflt_attach_private(socket
, NULL
, handle
, 0);
379 struct socket_filter_entry
*filter
;
382 if (socket
== NULL
|| handle
== 0)
385 socket_lock(socket
, 1);
387 for (filter
= socket
->so_filt
; filter
;
388 filter
= filter
->sfe_next_onsocket
) {
389 if (filter
->sfe_filter
->sf_filter
.sf_handle
== handle
)
393 if (filter
!= NULL
) {
394 sflt_detach_private(filter
, 0);
397 socket
->so_filt
= NULL
;
401 socket_unlock(socket
, 1);
409 const struct sflt_filter
*filter
,
414 struct socket_filter
*sock_filt
= NULL
;
415 struct socket_filter
*match
= NULL
;
417 struct protosw
*pr
= pffindproto(domain
, protocol
, type
);
419 if (pr
== NULL
) return ENOENT
;
421 if (filter
->sf_attach
== NULL
|| filter
->sf_detach
== NULL
) return EINVAL
;
422 if (filter
->sf_handle
== 0) return EINVAL
;
423 if (filter
->sf_name
== NULL
) return EINVAL
;
425 /* Allocate the socket filter */
426 MALLOC(sock_filt
, struct socket_filter
*, sizeof(*sock_filt
), M_IFADDR
, M_WAITOK
);
427 if (sock_filt
== NULL
) {
431 bzero(sock_filt
, sizeof(*sock_filt
));
432 sock_filt
->sf_filter
= *filter
;
434 lck_mtx_lock(sock_filter_lock
);
435 /* Look for an existing entry */
436 TAILQ_FOREACH(match
, &sock_filter_head
, sf_global_next
) {
437 if (match
->sf_filter
.sf_handle
== sock_filt
->sf_filter
.sf_handle
) {
442 /* Add the entry only if there was no existing entry */
444 TAILQ_INSERT_TAIL(&sock_filter_head
, sock_filt
, sf_global_next
);
445 if ((sock_filt
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
446 TAILQ_INSERT_TAIL(&pr
->pr_filter_head
, sock_filt
, sf_protosw_next
);
447 sock_filt
->sf_proto
= pr
;
450 lck_mtx_unlock(sock_filter_lock
);
453 FREE(sock_filt
, M_IFADDR
);
464 struct socket_filter
*filter
;
465 struct socket_filter_entry
*entry_head
= NULL
;
466 struct socket_filter_entry
*next_entry
= NULL
;
468 /* Find the entry and remove it from the global and protosw lists */
469 lck_mtx_lock(sock_filter_lock
);
470 TAILQ_FOREACH(filter
, &sock_filter_head
, sf_global_next
) {
471 if (filter
->sf_filter
.sf_handle
== handle
)
476 TAILQ_REMOVE(&sock_filter_head
, filter
, sf_global_next
);
477 if ((filter
->sf_filter
.sf_flags
& SFLT_GLOBAL
) != 0) {
478 TAILQ_REMOVE(&filter
->sf_proto
->pr_filter_head
, filter
, sf_protosw_next
);
480 entry_head
= filter
->sf_entry_head
;
481 filter
->sf_entry_head
= NULL
;
482 filter
->sf_flags
|= SFF_DETACHING
;
484 for (next_entry
= entry_head
; next_entry
;
485 next_entry
= next_entry
->sfe_next_onfilter
) {
486 socket_lock(next_entry
->sfe_socket
, 1);
487 next_entry
->sfe_flags
|= SFEF_UNREGISTERING
;
488 socket_unlock(next_entry
->sfe_socket
, 0); /* Radar 4201550: prevents the socket from being deleted while being unregistered */
492 lck_mtx_unlock(sock_filter_lock
);
497 /* We need to detach the filter from any sockets it's attached to */
498 if (entry_head
== 0) {
499 if (filter
->sf_filter
.sf_unregistered
)
500 filter
->sf_filter
.sf_unregistered(filter
->sf_filter
.sf_handle
);
503 next_entry
= entry_head
->sfe_next_onfilter
;
504 sflt_detach_private(entry_head
, 1);
505 entry_head
= next_entry
;
515 const struct sockaddr
* from
,
518 sflt_data_flag_t flags
)
521 if (so
== NULL
|| data
== NULL
) return EINVAL
;
523 if (flags
& sock_data_filt_flag_oob
) {
530 if (sbappendaddr(&so
->so_rcv
, (struct sockaddr
*)from
, data
,
537 if (sbappendcontrol(&so
->so_rcv
, data
, control
, NULL
))
542 if (flags
& sock_data_filt_flag_record
) {
543 if (control
|| from
) {
547 if (sbappendrecord(&so
->so_rcv
, (struct mbuf
*)data
))
552 if (sbappend(&so
->so_rcv
, data
))
555 socket_unlock(so
, 1);
560 sock_inject_data_out(
562 const struct sockaddr
* to
,
565 sflt_data_flag_t flags
)
568 if (flags
& sock_data_filt_flag_oob
) sosendflags
= MSG_OOB
;
569 return sosend(so
, (const struct sockaddr
*)to
, NULL
,
570 data
, control
, sosendflags
);
577 return (sopt
->sopt_dir
== SOPT_GET
) ? sockopt_get
: sockopt_set
;
584 return sopt
->sopt_level
;
591 return sopt
->sopt_name
;
598 return sopt
->sopt_valsize
;
607 return sooptcopyin(sopt
, data
, len
, len
);
616 return sooptcopyout(sopt
, data
, len
);