]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/subr_eventhandler.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / kern / subr_eventhandler.c
CommitLineData
5ba3f43e 1/*
cb323159 2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
5ba3f43e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*-
29 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54#include <sys/cdefs.h>
55#include <sys/param.h>
56#include <sys/kernel.h>
57#include <kern/queue.h>
58#include <kern/locks.h>
59#include <sys/malloc.h>
60#include <sys/proc.h>
61#include <sys/systm.h>
62#include <sys/mcache.h>
63#include <sys/eventhandler.h>
64#include <sys/sysctl.h>
65
66int evh_debug = 0;
67
68MALLOC_DEFINE(M_EVENTHANDLER, "eventhandler", "Event handler records");
69
70SYSCTL_NODE(_kern, OID_AUTO, eventhandler, CTLFLAG_RW | CTLFLAG_LOCKED,
71 0, "Eventhandler");
72SYSCTL_INT(_kern_eventhandler, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
73 &evh_debug, 0, "Eventhandler debug mode");
74
cb323159 75struct eventhandler_entry_arg eventhandler_entry_dummy_arg = { .ee_fm_uuid = { 0 }, .ee_fr_uuid = { 0 } };
5ba3f43e
A
76
77/* List of 'slow' lists */
78static struct eventhandler_lists_ctxt evthdlr_lists_ctxt_glb;
79static lck_grp_attr_t *eventhandler_mutex_grp_attr;
80static lck_grp_t *eventhandler_mutex_grp;
81static lck_attr_t *eventhandler_mutex_attr;
82
0a7de745
A
83static unsigned int eg_size; /* size of eventhandler_entry_generic */
84static struct mcache *eg_cache; /* mcache for eventhandler_entry_generic */
a39ff7e2 85
0a7de745
A
86static unsigned int el_size; /* size of eventhandler_list */
87static struct mcache *el_cache; /* mcache for eventhandler_list */
a39ff7e2 88
5ba3f43e
A
89static lck_grp_attr_t *el_lock_grp_attr;
90lck_grp_t *el_lock_grp;
91lck_attr_t *el_lock_attr;
92
0a7de745
A
93struct eventhandler_entry_generic {
94 struct eventhandler_entry ee;
f427ee49 95 void *func;
5ba3f43e
A
96};
97
98static struct eventhandler_list *_eventhandler_find_list(
0a7de745 99 struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name);
5ba3f43e
A
100
101void
102eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt)
103{
104 VERIFY(evthdlr_lists_ctxt != NULL);
105
106 TAILQ_INIT(&evthdlr_lists_ctxt->eventhandler_lists);
107 evthdlr_lists_ctxt->eventhandler_lists_initted = 1;
108 lck_mtx_init(&evthdlr_lists_ctxt->eventhandler_mutex,
109 eventhandler_mutex_grp, eventhandler_mutex_attr);
110}
111
112/*
113 * Initialize the eventhandler mutex and list.
114 */
115void
116eventhandler_init(void)
117{
118 eventhandler_mutex_grp_attr = lck_grp_attr_alloc_init();
119 eventhandler_mutex_grp = lck_grp_alloc_init("eventhandler",
120 eventhandler_mutex_grp_attr);
121 eventhandler_mutex_attr = lck_attr_alloc_init();
122
123 el_lock_grp_attr = lck_grp_attr_alloc_init();
124 el_lock_grp = lck_grp_alloc_init("eventhandler list",
125 el_lock_grp_attr);
126 el_lock_attr = lck_attr_alloc_init();
127
128 eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb);
a39ff7e2 129
0a7de745 130 eg_size = sizeof(struct eventhandler_entry_generic);
a39ff7e2 131 eg_cache = mcache_create("eventhdlr_generic", eg_size,
0a7de745 132 sizeof(uint64_t), 0, MCR_SLEEP);
a39ff7e2 133
0a7de745 134 el_size = sizeof(struct eventhandler_list);
a39ff7e2 135 el_cache = mcache_create("eventhdlr_list", el_size,
0a7de745 136 sizeof(uint64_t), 0, MCR_SLEEP);
a39ff7e2
A
137}
138
139void
140eventhandler_reap_caches(boolean_t purge)
141{
142 mcache_reap_now(eg_cache, purge);
143 mcache_reap_now(el_cache, purge);
5ba3f43e
A
144}
145
146/*
147 * Insertion is O(n) due to the priority scan, but optimises to O(1)
148 * if all priorities are identical.
149 */
150static eventhandler_tag
151eventhandler_register_internal(
0a7de745
A
152 struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
153 struct eventhandler_list *list,
154 const char *name, eventhandler_tag epn)
5ba3f43e 155{
0a7de745
A
156 struct eventhandler_list *new_list;
157 struct eventhandler_entry *ep;
5ba3f43e 158
0a7de745 159 VERIFY(strlen(name) <= (sizeof(new_list->el_name) - 1));
a39ff7e2 160
0a7de745 161 if (evthdlr_lists_ctxt == NULL) {
5ba3f43e 162 evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;
0a7de745 163 }
5ba3f43e
A
164
165 VERIFY(evthdlr_lists_ctxt->eventhandler_lists_initted); /* eventhandler registered too early */
166 VERIFY(epn != NULL); /* cannot register NULL event */
167
168 /* lock the eventhandler lists */
a39ff7e2 169 lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
5ba3f43e
A
170
171 /* Do we need to find/create the (slow) list? */
172 if (list == NULL) {
173 /* look for a matching, existing list */
174 list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
175
176 /* Do we need to create the list? */
177 if (list == NULL) {
a39ff7e2
A
178 lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
179 new_list = mcache_alloc(el_cache, MCR_SLEEP);
cb323159
A
180 if (new_list == NULL) {
181 evhlog((LOG_DEBUG, "%s: Can't allocate list \"%s\"", __func__, name));
182 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
183 return NULL;
184 }
a39ff7e2
A
185 bzero(new_list, el_size);
186 evhlog((LOG_DEBUG, "%s: creating list \"%s\"", __func__, name));
187 list = new_list;
188 list->el_flags = 0;
189 list->el_runcount = 0;
190 bzero(&list->el_lock, sizeof(list->el_lock));
0a7de745 191 (void) snprintf(list->el_name, sizeof(list->el_name), "%s", name);
a39ff7e2 192 TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link);
5ba3f43e
A
193 }
194 }
195 if (!(list->el_flags & EHL_INITTED)) {
196 TAILQ_INIT(&list->el_entries);
197 EHL_LOCK_INIT(list);
198 list->el_flags |= EHL_INITTED;
199 }
200 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
201
202 KASSERT(epn->ee_priority != EHE_DEAD_PRIORITY,
203 ("%s: handler for %s registered with dead priority", __func__, name));
204
205 /* sort it into the list */
f427ee49
A
206 evhlog((LOG_DEBUG, "%s: adding item %p (function %p to \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(epn),
207 (void *)VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic *)epn)->func), name));
5ba3f43e
A
208 EHL_LOCK(list);
209 TAILQ_FOREACH(ep, &list->el_entries, ee_link) {
210 if (ep->ee_priority != EHE_DEAD_PRIORITY &&
211 epn->ee_priority < ep->ee_priority) {
212 TAILQ_INSERT_BEFORE(ep, epn, ee_link);
213 break;
214 }
215 }
0a7de745 216 if (ep == NULL) {
5ba3f43e 217 TAILQ_INSERT_TAIL(&list->el_entries, epn, ee_link);
0a7de745 218 }
5ba3f43e 219 EHL_UNLOCK(list);
0a7de745 220 return epn;
5ba3f43e
A
221}
222
223eventhandler_tag
224eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
225 struct eventhandler_list *list, const char *name,
226 void *func, struct eventhandler_entry_arg arg, int priority)
227{
0a7de745 228 struct eventhandler_entry_generic *eg;
5ba3f43e
A
229
230 /* allocate an entry for this handler, populate it */
a39ff7e2 231 eg = mcache_alloc(eg_cache, MCR_SLEEP);
cb323159
A
232 if (eg == NULL) {
233 evhlog((LOG_DEBUG, "%s: Can't allocate entry to register for event list "
234 "\"%s\"", __func__, name));
235 return NULL;
236 }
a39ff7e2 237 bzero(eg, eg_size);
5ba3f43e
A
238 eg->func = func;
239 eg->ee.ee_arg = arg;
240 eg->ee.ee_priority = priority;
241
0a7de745 242 return eventhandler_register_internal(evthdlr_lists_ctxt, list, name, &eg->ee);
5ba3f43e
A
243}
244
245void
246eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag)
247{
0a7de745 248 struct eventhandler_entry *ep = tag;
5ba3f43e
A
249
250 EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED);
251 if (ep != NULL) {
252 /* remove just this entry */
253 if (list->el_runcount == 0) {
f427ee49 254 evhlog((LOG_DEBUG, "%s: removing item %p from \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(ep),
5ba3f43e
A
255 list->el_name));
256 /*
257 * We may have purged the list because of certain events.
258 * Make sure that is not the case when a specific entry
259 * is being removed.
260 */
0a7de745 261 if (!TAILQ_EMPTY(&list->el_entries)) {
5ba3f43e 262 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
0a7de745 263 }
a39ff7e2
A
264 EHL_LOCK_CONVERT(list);
265 mcache_free(eg_cache, ep);
5ba3f43e
A
266 } else {
267 evhlog((LOG_DEBUG, "%s: marking item %p from \"%s\" as dead", __func__,
f427ee49 268 (void *)VM_KERNEL_ADDRPERM(ep), list->el_name));
5ba3f43e
A
269 ep->ee_priority = EHE_DEAD_PRIORITY;
270 }
271 } else {
272 /* remove entire list */
273 if (list->el_runcount == 0) {
274 evhlog((LOG_DEBUG, "%s: removing all items from \"%s\"", __func__,
275 list->el_name));
a39ff7e2 276 EHL_LOCK_CONVERT(list);
5ba3f43e
A
277 while (!TAILQ_EMPTY(&list->el_entries)) {
278 ep = TAILQ_FIRST(&list->el_entries);
279 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
a39ff7e2 280 mcache_free(eg_cache, ep);
5ba3f43e
A
281 }
282 } else {
283 evhlog((LOG_DEBUG, "%s: marking all items from \"%s\" as dead",
284 __func__, list->el_name));
285 TAILQ_FOREACH(ep, &list->el_entries, ee_link)
0a7de745 286 ep->ee_priority = EHE_DEAD_PRIORITY;
5ba3f43e
A
287 }
288 }
0a7de745 289 while (list->el_runcount > 0) {
a39ff7e2 290 msleep((caddr_t)list, &list->el_lock, PSPIN, "evhrm", 0);
0a7de745 291 }
5ba3f43e
A
292 EHL_UNLOCK(list);
293}
294
295/*
296 * Internal version for use when eventhandler list is already locked.
297 */
298static struct eventhandler_list *
299_eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
300 const char *name)
301{
0a7de745 302 struct eventhandler_list *list;
5ba3f43e
A
303
304 VERIFY(evthdlr_lists_ctxt != NULL);
305
306 LCK_MTX_ASSERT(&evthdlr_lists_ctxt->eventhandler_mutex, LCK_MTX_ASSERT_OWNED);
307 TAILQ_FOREACH(list, &evthdlr_lists_ctxt->eventhandler_lists, el_link) {
0a7de745 308 if (!strcmp(name, list->el_name)) {
5ba3f43e 309 break;
0a7de745 310 }
5ba3f43e 311 }
0a7de745 312 return list;
5ba3f43e
A
313}
314
315/*
316 * Lookup a "slow" list by name. Returns with the list locked.
317 */
318struct eventhandler_list *
319eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
320 const char *name)
321{
0a7de745 322 struct eventhandler_list *list;
5ba3f43e 323
0a7de745 324 if (evthdlr_lists_ctxt == NULL) {
5ba3f43e 325 evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;
0a7de745 326 }
5ba3f43e 327
0a7de745
A
328 if (!evthdlr_lists_ctxt->eventhandler_lists_initted) {
329 return NULL;
330 }
5ba3f43e
A
331
332 /* scan looking for the requested list */
a39ff7e2 333 lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
5ba3f43e 334 list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
a39ff7e2
A
335 if (list != NULL) {
336 lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
337 EHL_LOCK_SPIN(list);
338 }
5ba3f43e
A
339 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
340
0a7de745 341 return list;
5ba3f43e
A
342}
343
344/*
345 * Prune "dead" entries from an eventhandler list.
346 */
347void
348eventhandler_prune_list(struct eventhandler_list *list)
349{
350 struct eventhandler_entry *ep, *en;
351 int pruned = 0;
352
353 evhlog((LOG_DEBUG, "%s: pruning list \"%s\"", __func__, list->el_name));
354 EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED);
355 TAILQ_FOREACH_SAFE(ep, &list->el_entries, ee_link, en) {
356 if (ep->ee_priority == EHE_DEAD_PRIORITY) {
357 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
a39ff7e2 358 mcache_free(eg_cache, ep);
5ba3f43e
A
359 pruned++;
360 }
361 }
0a7de745 362 if (pruned > 0) {
5ba3f43e 363 wakeup(list);
0a7de745 364 }
5ba3f43e
A
365}
366
367/*
368 * This should be called when last reference to an object
369 * is being released.
370 * The individual event type lists must be purged when the object
371 * becomes defunct.
372 */
373void
374eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt)
375{
376 struct eventhandler_list *list = NULL;
0a7de745 377 struct eventhandler_list *list_next = NULL;
5ba3f43e
A
378
379 lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex);
380 TAILQ_FOREACH_SAFE(list, &evthdlr_lists_ctxt->eventhandler_lists,
381 el_link, list_next) {
382 VERIFY(TAILQ_EMPTY(&list->el_entries));
383 EHL_LOCK_DESTROY(list);
a39ff7e2 384 mcache_free(el_cache, list);
5ba3f43e
A
385 }
386 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
387 lck_mtx_destroy(&evthdlr_lists_ctxt->eventhandler_mutex,
388 eventhandler_mutex_grp);
389 return;
390}