#define SFEF_NODETACH 0x2 /* Detach should not be called */
#define SFEF_NOSOCKET 0x4 /* Socket is gone */
+/*
+ * If you need accounting for KM_IFADDR consider using
+ * KALLOC_HEAP_DEFINE to define a view.
+ */
+#define KM_IFADDR KHEAP_DEFAULT
+
struct socket_filter_entry {
struct socket_filter_entry *sfe_next_onsocket;
struct socket_filter_entry *sfe_next_onfilter;
TAILQ_HEAD(socket_filter_list, socket_filter);
-static struct socket_filter_list sock_filter_head;
-static lck_rw_t *sock_filter_lock = NULL;
-static lck_mtx_t *sock_filter_cleanup_lock = NULL;
+static LCK_GRP_DECLARE(sock_filter_lock_grp, "socket filter lock");
+static LCK_RW_DECLARE(sock_filter_lock, &sock_filter_lock_grp);
+static LCK_MTX_DECLARE(sock_filter_cleanup_lock, &sock_filter_lock_grp);
+
+static struct socket_filter_list sock_filter_head =
+ TAILQ_HEAD_INITIALIZER(sock_filter_head);
static struct socket_filter_entry *sock_filter_cleanup_entries = NULL;
static thread_t sock_filter_cleanup_thread = NULL;
return 0;
}
-__private_extern__ void
-sflt_init(void)
-{
- lck_grp_attr_t *grp_attrib = NULL;
- lck_attr_t *lck_attrib = NULL;
- lck_grp_t *lck_group = NULL;
-
- TAILQ_INIT(&sock_filter_head);
-
- /* Allocate a rw lock */
- grp_attrib = lck_grp_attr_alloc_init();
- lck_group = lck_grp_alloc_init("socket filter lock", grp_attrib);
- lck_grp_attr_free(grp_attrib);
- lck_attrib = lck_attr_alloc_init();
- sock_filter_lock = lck_rw_alloc_init(lck_group, lck_attrib);
- sock_filter_cleanup_lock = lck_mtx_alloc_init(lck_group, lck_attrib);
- lck_grp_free(lck_group);
- lck_attr_free(lck_attrib);
-}
-
static void
sflt_retain_locked(struct socket_filter *filter)
{
if (os_ref_release_locked(&filter->sf_refcount) == 0) {
/* Call the unregistered function */
if (filter->sf_filter.sf_unregistered) {
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
filter->sf_filter.sf_unregistered(
filter->sf_filter.sf_handle);
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
}
/* Free the entry */
- FREE(filter, M_IFADDR);
+ kheap_free(KM_IFADDR, filter, sizeof(struct socket_filter));
}
}
/* That was the last reference */
/* Take the cleanup lock */
- lck_mtx_lock(sock_filter_cleanup_lock);
+ lck_mtx_lock(&sock_filter_cleanup_lock);
/* Put this item on the cleanup list */
entry->sfe_next_oncleanup = sock_filter_cleanup_entries;
}
/* Drop the cleanup lock */
- lck_mtx_unlock(sock_filter_cleanup_lock);
+ lck_mtx_unlock(&sock_filter_cleanup_lock);
} else if (old <= 0) {
panic("sflt_entry_release - sfe_refcount (%d) <= 0\n",
(int)old);
{
#pragma unused(blah, blah2)
while (1) {
- lck_mtx_lock(sock_filter_cleanup_lock);
+ lck_mtx_lock(&sock_filter_cleanup_lock);
while (sock_filter_cleanup_entries == NULL) {
/* Sleep until we've got something better to do */
msleep(&sock_filter_cleanup_entries,
- sock_filter_cleanup_lock, PWAIT,
+ &sock_filter_cleanup_lock, PWAIT,
"sflt_cleanup", NULL);
}
sock_filter_cleanup_entries = NULL;
/* Drop the lock */
- lck_mtx_unlock(sock_filter_cleanup_lock);
+ lck_mtx_unlock(&sock_filter_cleanup_lock);
/* Take the socket filter lock */
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
/* Cleanup every dead item */
struct socket_filter_entry *entry;
if ((entry->sfe_flags & SFEF_NODETACH) == 0 &&
entry->sfe_filter->sf_filter.sf_detach) {
entry->sfe_flags |= SFEF_NODETACH;
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
/*
* Warning - passing a potentially
entry->sfe_filter->sf_filter.sf_detach(
entry->sfe_cookie, entry->sfe_socket);
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
}
/*
sflt_release_locked(entry->sfe_filter);
entry->sfe_socket = NULL;
entry->sfe_filter = NULL;
- FREE(entry, M_IFADDR);
+ kheap_free(KM_IFADDR, entry, sizeof(struct socket_filter_entry));
}
/* Drop the socket filter lock */
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
}
/* NOTREACHED */
}
}
}
/* allocate the socket filter entry */
- MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR,
- M_WAITOK);
+ entry = kheap_alloc(KM_IFADDR, sizeof(struct socket_filter_entry),
+ Z_WAITOK);
if (entry == NULL) {
return ENOMEM;
}
* Release the filter lock --
* callers must be aware we will do this
*/
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
/* Unlock the socket */
if (socklocked) {
}
/* Lock the filters again */
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
/*
* If the attach function returns an error,
int result = EINVAL;
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
struct socket_filter *filter = NULL;
TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
result = sflt_attach_locked(socket, filter, 1);
}
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
return result;
}
*/
struct protosw *proto = so->so_proto->pr_protosw;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) {
/* Promote lock to exclusive */
- if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock)) {
- lck_rw_lock_exclusive(sock_filter_lock);
+ if (!lck_rw_lock_shared_to_exclusive(&sock_filter_lock)) {
+ lck_rw_lock_exclusive(&sock_filter_lock);
}
/*
filter = filter_next;
}
}
- lck_rw_done(sock_filter_lock);
+ lck_rw_done(&sock_filter_lock);
}
/*
__private_extern__ void
sflt_termsock(struct socket *so)
{
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
struct socket_filter_entry *entry;
entry->sfe_flags |= SFEF_NODETACH;
/* Drop the lock before calling the detach function */
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
sfe_filter->sf_filter.sf_detach(sfe_cookie, so);
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
/* Release the filter */
sflt_release_locked(sfe_filter);
}
}
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
}
struct socket_filter_entry *entry;
int unlocked = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
entry->sfe_filter->sf_filter.sf_notify &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked != 0) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* release the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again and
* release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* release the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* release the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int unlocked = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
if ((entry->sfe_flags & SFEF_ATTACHED) &&
* the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int setsendthread = 0;
int error = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && error == 0;
entry = entry->sfe_next_onsocket) {
/* skip if this is a subflow socket */
* release the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
int error = 0;
int unlocked = 0;
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
for (entry = so->so_filt; entry && (error == 0);
entry = entry->sfe_next_onsocket) {
* release the socket filter lock
*/
sflt_entry_retain(entry);
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
/* If the socket isn't already unlocked, unlock it */
if (unlocked == 0) {
* Take the socket filter lock again
* and release the entry
*/
- lck_rw_lock_shared(sock_filter_lock);
+ lck_rw_lock_shared(&sock_filter_lock);
sflt_entry_release(entry);
}
}
- lck_rw_unlock_shared(sock_filter_lock);
+ lck_rw_unlock_shared(&sock_filter_lock);
if (unlocked) {
socket_lock(so, 0);
return EINVAL;
}
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) {
if (entry->sfe_filter->sf_filter.sf_handle == handle &&
(entry->sfe_flags & SFEF_ATTACHED) != 0) {
if (entry != NULL) {
sflt_detach_locked(entry);
}
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
return result;
}
}
/* Allocate the socket filter */
- MALLOC(sock_filt, struct socket_filter *, sizeof(*sock_filt),
- M_IFADDR, M_WAITOK);
+ sock_filt = kheap_alloc(KM_IFADDR,
+ sizeof(struct socket_filter), Z_WAITOK | Z_ZERO);
if (sock_filt == NULL) {
return ENOBUFS;
}
- bzero(sock_filt, sizeof(*sock_filt));
-
/* Legacy sflt_filter length; current structure minus extended */
len = sizeof(*filter) - sizeof(struct sflt_filter_ext);
/*
}
bcopy(filter, &sock_filt->sf_filter, len);
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
/* Look for an existing entry */
TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) {
if (match->sf_filter.sf_handle ==
INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total);
}
}
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
if (match != NULL) {
- FREE(sock_filt, M_IFADDR);
+ kheap_free(KM_IFADDR, sock_filt, sizeof(struct socket_filter));
return EEXIST;
}
!SOCK_CHECK_TYPE(so, type)) {
continue;
}
- MALLOC(solist, struct solist *, sizeof(*solist),
- M_IFADDR, M_NOWAIT);
+ solist = kheap_alloc(KHEAP_TEMP, sizeof(struct solist), Z_NOWAIT);
if (!solist) {
continue;
}
!SOCK_CHECK_TYPE(so, type)) {
continue;
}
- MALLOC(solist, struct solist *, sizeof(*solist),
- M_IFADDR, M_NOWAIT);
+ solist = kheap_alloc(KHEAP_TEMP, sizeof(struct solist), Z_NOWAIT);
if (!solist) {
continue;
}
sock_release(so);
solist = solisthead;
solisthead = solisthead->next;
- FREE(solist, M_IFADDR);
+ kheap_free(KHEAP_TEMP, solist, sizeof(struct solist));
}
return error;
sflt_unregister(sflt_handle handle)
{
struct socket_filter *filter;
- lck_rw_lock_exclusive(sock_filter_lock);
+ lck_rw_lock_exclusive(&sock_filter_lock);
/* Find the entry by the handle */
TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) {
sflt_release_locked(filter);
}
- lck_rw_unlock_exclusive(sock_filter_lock);
+ lck_rw_unlock_exclusive(&sock_filter_lock);
if (filter == NULL) {
return ENOENT;