2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 #include <sys/cdefs.h>
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <kern/queue.h>
58 #include <kern/locks.h>
59 #include <sys/malloc.h>
61 #include <sys/systm.h>
62 #include <sys/mcache.h>
63 #include <sys/eventhandler.h>
64 #include <sys/sysctl.h>
68 SYSCTL_NODE(_kern
, OID_AUTO
, eventhandler
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
70 SYSCTL_INT(_kern_eventhandler
, OID_AUTO
, debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
71 &evh_debug
, 0, "Eventhandler debug mode");
73 struct eventhandler_entry_arg eventhandler_entry_dummy_arg
= { .ee_fm_uuid
= { 0 }, .ee_fr_uuid
= { 0 } };
75 /* List of 'slow' lists */
76 static struct eventhandler_lists_ctxt evthdlr_lists_ctxt_glb
;
77 static LCK_GRP_DECLARE(eventhandler_mutex_grp
, "eventhandler");
79 static unsigned int eg_size
; /* size of eventhandler_entry_generic */
80 static struct mcache
*eg_cache
; /* mcache for eventhandler_entry_generic */
82 static unsigned int el_size
; /* size of eventhandler_list */
83 static struct mcache
*el_cache
; /* mcache for eventhandler_list */
85 LCK_GRP_DECLARE(el_lock_grp
, "eventhandler list");
86 LCK_ATTR_DECLARE(el_lock_attr
, 0, 0);
88 struct eventhandler_entry_generic
{
89 struct eventhandler_entry ee
;
93 static struct eventhandler_list
*_eventhandler_find_list(
94 struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
, const char *name
);
97 eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
)
99 VERIFY(evthdlr_lists_ctxt
!= NULL
);
101 TAILQ_INIT(&evthdlr_lists_ctxt
->eventhandler_lists
);
102 evthdlr_lists_ctxt
->eventhandler_lists_initted
= 1;
103 lck_mtx_init(&evthdlr_lists_ctxt
->eventhandler_mutex
,
104 &eventhandler_mutex_grp
, LCK_ATTR_NULL
);
108 * Initialize the eventhandler mutex and list.
111 eventhandler_init(void)
113 eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb
);
115 eg_size
= sizeof(struct eventhandler_entry_generic
);
116 eg_cache
= mcache_create("eventhdlr_generic", eg_size
,
117 sizeof(uint64_t), 0, MCR_SLEEP
);
119 el_size
= sizeof(struct eventhandler_list
);
120 el_cache
= mcache_create("eventhdlr_list", el_size
,
121 sizeof(uint64_t), 0, MCR_SLEEP
);
125 eventhandler_reap_caches(boolean_t purge
)
127 mcache_reap_now(eg_cache
, purge
);
128 mcache_reap_now(el_cache
, purge
);
132 * Insertion is O(n) due to the priority scan, but optimises to O(1)
133 * if all priorities are identical.
135 static eventhandler_tag
136 eventhandler_register_internal(
137 struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
138 struct eventhandler_list
*list
,
139 const char *name
, eventhandler_tag epn
)
141 struct eventhandler_list
*new_list
;
142 struct eventhandler_entry
*ep
;
144 VERIFY(strlen(name
) <= (sizeof(new_list
->el_name
) - 1));
146 if (evthdlr_lists_ctxt
== NULL
) {
147 evthdlr_lists_ctxt
= &evthdlr_lists_ctxt_glb
;
150 VERIFY(evthdlr_lists_ctxt
->eventhandler_lists_initted
); /* eventhandler registered too early */
151 VERIFY(epn
!= NULL
); /* cannot register NULL event */
153 /* lock the eventhandler lists */
154 lck_mtx_lock_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
156 /* Do we need to find/create the (slow) list? */
158 /* look for a matching, existing list */
159 list
= _eventhandler_find_list(evthdlr_lists_ctxt
, name
);
161 /* Do we need to create the list? */
163 lck_mtx_convert_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
164 new_list
= mcache_alloc(el_cache
, MCR_SLEEP
);
165 if (new_list
== NULL
) {
166 evhlog((LOG_DEBUG
, "%s: Can't allocate list \"%s\"", __func__
, name
));
167 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
170 bzero(new_list
, el_size
);
171 evhlog((LOG_DEBUG
, "%s: creating list \"%s\"", __func__
, name
));
174 list
->el_runcount
= 0;
175 bzero(&list
->el_lock
, sizeof(list
->el_lock
));
176 (void) snprintf(list
->el_name
, sizeof(list
->el_name
), "%s", name
);
177 TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt
->eventhandler_lists
, list
, el_link
);
180 if (!(list
->el_flags
& EHL_INITTED
)) {
181 TAILQ_INIT(&list
->el_entries
);
183 list
->el_flags
|= EHL_INITTED
;
185 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
187 KASSERT(epn
->ee_priority
!= EHE_DEAD_PRIORITY
,
188 ("%s: handler for %s registered with dead priority", __func__
, name
));
190 /* sort it into the list */
191 evhlog((LOG_DEBUG
, "%s: adding item %p (function %p to \"%s\"", __func__
, (void *)VM_KERNEL_ADDRPERM(epn
),
192 (void *)VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic
*)epn
)->func
), name
));
194 TAILQ_FOREACH(ep
, &list
->el_entries
, ee_link
) {
195 if (ep
->ee_priority
!= EHE_DEAD_PRIORITY
&&
196 epn
->ee_priority
< ep
->ee_priority
) {
197 TAILQ_INSERT_BEFORE(ep
, epn
, ee_link
);
202 TAILQ_INSERT_TAIL(&list
->el_entries
, epn
, ee_link
);
209 eventhandler_register(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
210 struct eventhandler_list
*list
, const char *name
,
211 void *func
, struct eventhandler_entry_arg arg
, int priority
)
213 struct eventhandler_entry_generic
*eg
;
215 /* allocate an entry for this handler, populate it */
216 eg
= mcache_alloc(eg_cache
, MCR_SLEEP
);
218 evhlog((LOG_DEBUG
, "%s: Can't allocate entry to register for event list "
219 "\"%s\"", __func__
, name
));
225 eg
->ee
.ee_priority
= priority
;
227 return eventhandler_register_internal(evthdlr_lists_ctxt
, list
, name
, &eg
->ee
);
231 eventhandler_deregister(struct eventhandler_list
*list
, eventhandler_tag tag
)
233 struct eventhandler_entry
*ep
= tag
;
235 EHL_LOCK_ASSERT(list
, LCK_MTX_ASSERT_OWNED
);
237 /* remove just this entry */
238 if (list
->el_runcount
== 0) {
239 evhlog((LOG_DEBUG
, "%s: removing item %p from \"%s\"", __func__
, (void *)VM_KERNEL_ADDRPERM(ep
),
242 * We may have purged the list because of certain events.
243 * Make sure that is not the case when a specific entry
246 if (!TAILQ_EMPTY(&list
->el_entries
)) {
247 TAILQ_REMOVE(&list
->el_entries
, ep
, ee_link
);
249 EHL_LOCK_CONVERT(list
);
250 mcache_free(eg_cache
, ep
);
252 evhlog((LOG_DEBUG
, "%s: marking item %p from \"%s\" as dead", __func__
,
253 (void *)VM_KERNEL_ADDRPERM(ep
), list
->el_name
));
254 ep
->ee_priority
= EHE_DEAD_PRIORITY
;
257 /* remove entire list */
258 if (list
->el_runcount
== 0) {
259 evhlog((LOG_DEBUG
, "%s: removing all items from \"%s\"", __func__
,
261 EHL_LOCK_CONVERT(list
);
262 while (!TAILQ_EMPTY(&list
->el_entries
)) {
263 ep
= TAILQ_FIRST(&list
->el_entries
);
264 TAILQ_REMOVE(&list
->el_entries
, ep
, ee_link
);
265 mcache_free(eg_cache
, ep
);
268 evhlog((LOG_DEBUG
, "%s: marking all items from \"%s\" as dead",
269 __func__
, list
->el_name
));
270 TAILQ_FOREACH(ep
, &list
->el_entries
, ee_link
)
271 ep
->ee_priority
= EHE_DEAD_PRIORITY
;
274 while (list
->el_runcount
> 0) {
275 msleep((caddr_t
)list
, &list
->el_lock
, PSPIN
, "evhrm", 0);
281 * Internal version for use when eventhandler list is already locked.
283 static struct eventhandler_list
*
284 _eventhandler_find_list(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
287 struct eventhandler_list
*list
;
289 VERIFY(evthdlr_lists_ctxt
!= NULL
);
291 LCK_MTX_ASSERT(&evthdlr_lists_ctxt
->eventhandler_mutex
, LCK_MTX_ASSERT_OWNED
);
292 TAILQ_FOREACH(list
, &evthdlr_lists_ctxt
->eventhandler_lists
, el_link
) {
293 if (!strcmp(name
, list
->el_name
)) {
301 * Lookup a "slow" list by name. Returns with the list locked.
303 struct eventhandler_list
*
304 eventhandler_find_list(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
,
307 struct eventhandler_list
*list
;
309 if (evthdlr_lists_ctxt
== NULL
) {
310 evthdlr_lists_ctxt
= &evthdlr_lists_ctxt_glb
;
313 if (!evthdlr_lists_ctxt
->eventhandler_lists_initted
) {
317 /* scan looking for the requested list */
318 lck_mtx_lock_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
319 list
= _eventhandler_find_list(evthdlr_lists_ctxt
, name
);
321 lck_mtx_convert_spin(&evthdlr_lists_ctxt
->eventhandler_mutex
);
324 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
330 * Prune "dead" entries from an eventhandler list.
333 eventhandler_prune_list(struct eventhandler_list
*list
)
335 struct eventhandler_entry
*ep
, *en
;
338 evhlog((LOG_DEBUG
, "%s: pruning list \"%s\"", __func__
, list
->el_name
));
339 EHL_LOCK_ASSERT(list
, LCK_MTX_ASSERT_OWNED
);
340 TAILQ_FOREACH_SAFE(ep
, &list
->el_entries
, ee_link
, en
) {
341 if (ep
->ee_priority
== EHE_DEAD_PRIORITY
) {
342 TAILQ_REMOVE(&list
->el_entries
, ep
, ee_link
);
343 mcache_free(eg_cache
, ep
);
353 * This should be called when last reference to an object
355 * The individual event type lists must be purged when the object
359 eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt
*evthdlr_lists_ctxt
)
361 struct eventhandler_list
*list
= NULL
;
362 struct eventhandler_list
*list_next
= NULL
;
364 lck_mtx_lock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
365 TAILQ_FOREACH_SAFE(list
, &evthdlr_lists_ctxt
->eventhandler_lists
,
366 el_link
, list_next
) {
367 VERIFY(TAILQ_EMPTY(&list
->el_entries
));
368 EHL_LOCK_DESTROY(list
);
369 mcache_free(el_cache
, list
);
371 lck_mtx_unlock(&evthdlr_lists_ctxt
->eventhandler_mutex
);
372 lck_mtx_destroy(&evthdlr_lists_ctxt
->eventhandler_mutex
,
373 &eventhandler_mutex_grp
);