2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 #include <sys/cdefs.h>
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <kern/queue.h>
58 #include <kern/locks.h>
59 #include <sys/malloc.h>
61 #include <sys/systm.h>
62 #include <sys/mcache.h>
63 #include <sys/eventhandler.h>
64 #include <sys/sysctl.h>
68 MALLOC_DEFINE(M_EVENTHANDLER
, "eventhandler", "Event handler records");
70 SYSCTL_NODE(_kern
, OID_AUTO
, eventhandler
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
72 SYSCTL_INT(_kern_eventhandler
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
73 &evh_debug
, 0, "Eventhandler debug mode");
75 struct eventhandler_entry_arg eventhandler_entry_dummy_arg
= { .ee_fm_uuid
= { 0 }, .ee_fr_uuid
= { 0 } };
77 /* List of 'slow' lists */
78 static struct eventhandler_lists_ctxt evthdlr_lists_ctxt_glb
;
79 static lck_grp_attr_t
*eventhandler_mutex_grp_attr
;
80 static lck_grp_t
*eventhandler_mutex_grp
;
81 static lck_attr_t
*eventhandler_mutex_attr
;
83 static unsigned int eg_size
; /* size of eventhandler_entry_generic */
84 static struct mcache
*eg_cache
; /* mcache for eventhandler_entry_generic */
86 static unsigned int el_size
; /* size of eventhandler_list */
87 static struct mcache
*el_cache
; /* mcache for eventhandler_list */
89 static lck_grp_attr_t
*el_lock_grp_attr
;
90 lck_grp_t
*el_lock_grp
;
91 lck_attr_t
*el_lock_attr
;
93 struct eventhandler_entry_generic
{
94 struct eventhandler_entry ee
;
98 static struct eventhandler_list
*_eventhandler_find_list(
99 struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
, const char *name
);
102 eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
)
104 VERIFY(evthdlr_lists_ctxt
!= NULL
);
106 TAILQ_INIT(&evthdlr_lists_ctxt
->eventhandler_lists
);
107 evthdlr_lists_ctxt
->eventhandler_lists_initted
= 1;
108 lck_mtx_init(&evthdlr_lists_ctxt
->eventhandler_mutex
,
109 eventhandler_mutex_grp
, eventhandler_mutex_attr
);
113 * Initialize the eventhandler mutex and list.
116 eventhandler_init(void)
118 eventhandler_mutex_grp_attr
= lck_grp_attr_alloc_init();
119 eventhandler_mutex_grp
= lck_grp_alloc_init("eventhandler",
120 eventhandler_mutex_grp_attr
);
121 eventhandler_mutex_attr
= lck_attr_alloc_init();
123 el_lock_grp_attr
= lck_grp_attr_alloc_init();
124 el_lock_grp
= lck_grp_alloc_init("eventhandler list",
126 el_lock_attr
= lck_attr_alloc_init();
128 eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb
);
130 eg_size
= sizeof(struct eventhandler_entry_generic
);
131 eg_cache
= mcache_create("eventhdlr_generic", eg_size
,
132 sizeof(uint64_t), 0, MCR_SLEEP
);
134 el_size
= sizeof(struct eventhandler_list
);
135 el_cache
= mcache_create("eventhdlr_list", el_size
,
136 sizeof(uint64_t), 0, MCR_SLEEP
);
140 eventhandler_reap_caches(boolean_t purge
)
142 mcache_reap_now(eg_cache
, purge
);
143 mcache_reap_now(el_cache
, purge
);
147 * Insertion is O(n) due to the priority scan, but optimises to O(1)
148 * if all priorities are identical.
150 static eventhandler_tag
151 eventhandler_register_internal(
152 struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
153 struct eventhandler_list
*list
,
154 const char *name
, eventhandler_tag epn
)
156 struct eventhandler_list
*new_list
;
157 struct eventhandler_entry
*ep
;
159 VERIFY(strlen(name
) <= (sizeof(new_list
->el_name
) - 1));
161 if (evthdlr_lists_ctxt
== NULL
) {
162 evthdlr_lists_ctxt
= &evthdlr_lists_ctxt_glb
;
165 VERIFY(evthdlr_lists_ctxt
->eventhandler_lists_initted
); /* eventhandler registered too early */
166 VERIFY(epn
!= NULL
); /* cannot register NULL event */
168 /* lock the eventhandler lists */
169 lck_mtx_lock_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
171 /* Do we need to find/create the (slow) list? */
173 /* look for a matching, existing list */
174 list
= _eventhandler_find_list(evthdlr_lists_ctxt
, name
);
176 /* Do we need to create the list? */
178 lck_mtx_convert_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
179 new_list
= mcache_alloc(el_cache
, MCR_SLEEP
);
180 if (new_list
== NULL
) {
181 evhlog((LOG_DEBUG
, "%s: Can't allocate list \"%s\"", __func__
, name
));
182 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
185 bzero(new_list
, el_size
);
186 evhlog((LOG_DEBUG
, "%s: creating list \"%s\"", __func__
, name
));
189 list
->el_runcount
= 0;
190 bzero(&list
->el_lock
, sizeof(list
->el_lock
));
191 (void) snprintf(list
->el_name
, sizeof(list
->el_name
), "%s", name
);
192 TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt
->eventhandler_lists
, list
, el_link
);
195 if (!(list
->el_flags
& EHL_INITTED
)) {
196 TAILQ_INIT(&list
->el_entries
);
198 list
->el_flags
|= EHL_INITTED
;
200 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
202 KASSERT(epn
->ee_priority
!= EHE_DEAD_PRIORITY
,
203 ("%s: handler for %s registered with dead priority", __func__
, name
));
205 /* sort it into the list */
206 evhlog((LOG_DEBUG
, "%s: adding item %p (function %p to \"%s\"", __func__
, VM_KERNEL_ADDRPERM(epn
),
207 VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic
*)epn
)->func
), name
));
209 TAILQ_FOREACH(ep
, &list
->el_entries
, ee_link
) {
210 if (ep
->ee_priority
!= EHE_DEAD_PRIORITY
&&
211 epn
->ee_priority
< ep
->ee_priority
) {
212 TAILQ_INSERT_BEFORE(ep
, epn
, ee_link
);
217 TAILQ_INSERT_TAIL(&list
->el_entries
, epn
, ee_link
);
224 eventhandler_register(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
225 struct eventhandler_list
*list
, const char *name
,
226 void *func
, struct eventhandler_entry_arg arg
, int priority
)
228 struct eventhandler_entry_generic
*eg
;
230 /* allocate an entry for this handler, populate it */
231 eg
= mcache_alloc(eg_cache
, MCR_SLEEP
);
233 evhlog((LOG_DEBUG
, "%s: Can't allocate entry to register for event list "
234 "\"%s\"", __func__
, name
));
240 eg
->ee
.ee_priority
= priority
;
242 return eventhandler_register_internal(evthdlr_lists_ctxt
, list
, name
, &eg
->ee
);
246 eventhandler_deregister(struct eventhandler_list
*list
, eventhandler_tag tag
)
248 struct eventhandler_entry
*ep
= tag
;
250 EHL_LOCK_ASSERT(list
, LCK_MTX_ASSERT_OWNED
);
252 /* remove just this entry */
253 if (list
->el_runcount
== 0) {
254 evhlog((LOG_DEBUG
, "%s: removing item %p from \"%s\"", __func__
, VM_KERNEL_ADDRPERM(ep
),
257 * We may have purged the list because of certain events.
258 * Make sure that is not the case when a specific entry
261 if (!TAILQ_EMPTY(&list
->el_entries
)) {
262 TAILQ_REMOVE(&list
->el_entries
, ep
, ee_link
);
264 EHL_LOCK_CONVERT(list
);
265 mcache_free(eg_cache
, ep
);
267 evhlog((LOG_DEBUG
, "%s: marking item %p from \"%s\" as dead", __func__
,
268 VM_KERNEL_ADDRPERM(ep
), list
->el_name
));
269 ep
->ee_priority
= EHE_DEAD_PRIORITY
;
272 /* remove entire list */
273 if (list
->el_runcount
== 0) {
274 evhlog((LOG_DEBUG
, "%s: removing all items from \"%s\"", __func__
,
276 EHL_LOCK_CONVERT(list
);
277 while (!TAILQ_EMPTY(&list
->el_entries
)) {
278 ep
= TAILQ_FIRST(&list
->el_entries
);
279 TAILQ_REMOVE(&list
->el_entries
, ep
, ee_link
);
280 mcache_free(eg_cache
, ep
);
283 evhlog((LOG_DEBUG
, "%s: marking all items from \"%s\" as dead",
284 __func__
, list
->el_name
));
285 TAILQ_FOREACH(ep
, &list
->el_entries
, ee_link
)
286 ep
->ee_priority
= EHE_DEAD_PRIORITY
;
289 while (list
->el_runcount
> 0) {
290 msleep((caddr_t
)list
, &list
->el_lock
, PSPIN
, "evhrm", 0);
296 * Internal version for use when eventhandler list is already locked.
298 static struct eventhandler_list
*
299 _eventhandler_find_list(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
302 struct eventhandler_list
*list
;
304 VERIFY(evthdlr_lists_ctxt
!= NULL
);
306 LCK_MTX_ASSERT(&evthdlr_lists_ctxt
->eventhandler_mutex
, LCK_MTX_ASSERT_OWNED
);
307 TAILQ_FOREACH(list
, &evthdlr_lists_ctxt
->eventhandler_lists
, el_link
) {
308 if (!strcmp(name
, list
->el_name
)) {
316 * Lookup a "slow" list by name. Returns with the list locked.
318 struct eventhandler_list
*
319 eventhandler_find_list(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
322 struct eventhandler_list
*list
;
324 if (evthdlr_lists_ctxt
== NULL
) {
325 evthdlr_lists_ctxt
= &evthdlr_lists_ctxt_glb
;
328 if (!evthdlr_lists_ctxt
->eventhandler_lists_initted
) {
332 /* scan looking for the requested list */
333 lck_mtx_lock_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
334 list
= _eventhandler_find_list(evthdlr_lists_ctxt
, name
);
336 lck_mtx_convert_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
339 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
345 * Prune "dead" entries from an eventhandler list.
348 eventhandler_prune_list(struct eventhandler_list
*list
)
350 struct eventhandler_entry
*ep
, *en
;
353 evhlog((LOG_DEBUG
, "%s: pruning list \"%s\"", __func__
, list
->el_name
));
354 EHL_LOCK_ASSERT(list
, LCK_MTX_ASSERT_OWNED
);
355 TAILQ_FOREACH_SAFE(ep
, &list
->el_entries
, ee_link
, en
) {
356 if (ep
->ee_priority
== EHE_DEAD_PRIORITY
) {
357 TAILQ_REMOVE(&list
->el_entries
, ep
, ee_link
);
358 mcache_free(eg_cache
, ep
);
368 * This should be called when last reference to an object
370 * The individual event type lists must be purged when the object
374 eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
)
376 struct eventhandler_list
*list
= NULL
;
377 struct eventhandler_list
*list_next
= NULL
;
379 lck_mtx_lock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
380 TAILQ_FOREACH_SAFE(list
, &evthdlr_lists_ctxt
->eventhandler_lists
,
381 el_link
, list_next
) {
382 VERIFY(TAILQ_EMPTY(&list
->el_entries
));
383 EHL_LOCK_DESTROY(list
);
384 mcache_free(el_cache
, list
);
386 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
387 lck_mtx_destroy(&evthdlr_lists_ctxt
->eventhandler_mutex
,
388 eventhandler_mutex_grp
);