]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/subr_eventhandler.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / kern / subr_eventhandler.c
1 /*
2 * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54 #include <sys/cdefs.h>
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <kern/queue.h>
58 #include <kern/locks.h>
59 #include <sys/malloc.h>
60 #include <sys/proc.h>
61 #include <sys/systm.h>
62 #include <sys/mcache.h>
63 #include <sys/eventhandler.h>
64 #include <sys/sysctl.h>
65
66 int evh_debug = 0;
67
68 SYSCTL_NODE(_kern, OID_AUTO, eventhandler, CTLFLAG_RW | CTLFLAG_LOCKED,
69 0, "Eventhandler");
70 SYSCTL_INT(_kern_eventhandler, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
71 &evh_debug, 0, "Eventhandler debug mode");
72
73 struct eventhandler_entry_arg eventhandler_entry_dummy_arg = { .ee_fm_uuid = { 0 }, .ee_fr_uuid = { 0 } };
74
75 /* List of 'slow' lists */
76 static struct eventhandler_lists_ctxt evthdlr_lists_ctxt_glb;
77 static LCK_GRP_DECLARE(eventhandler_mutex_grp, "eventhandler");
78
79 static unsigned int eg_size; /* size of eventhandler_entry_generic */
80 static struct mcache *eg_cache; /* mcache for eventhandler_entry_generic */
81
82 static unsigned int el_size; /* size of eventhandler_list */
83 static struct mcache *el_cache; /* mcache for eventhandler_list */
84
85 LCK_GRP_DECLARE(el_lock_grp, "eventhandler list");
86 LCK_ATTR_DECLARE(el_lock_attr, 0, 0);
87
88 struct eventhandler_entry_generic {
89 struct eventhandler_entry ee;
90 void *func;
91 };
92
93 static struct eventhandler_list *_eventhandler_find_list(
94 struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name);
95
96 void
97 eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt)
98 {
99 VERIFY(evthdlr_lists_ctxt != NULL);
100
101 TAILQ_INIT(&evthdlr_lists_ctxt->eventhandler_lists);
102 evthdlr_lists_ctxt->eventhandler_lists_initted = 1;
103 lck_mtx_init(&evthdlr_lists_ctxt->eventhandler_mutex,
104 &eventhandler_mutex_grp, LCK_ATTR_NULL);
105 }
106
107 /*
108 * Initialize the eventhandler mutex and list.
109 */
110 void
111 eventhandler_init(void)
112 {
113 eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb);
114
115 eg_size = sizeof(struct eventhandler_entry_generic);
116 eg_cache = mcache_create("eventhdlr_generic", eg_size,
117 sizeof(uint64_t), 0, MCR_SLEEP);
118
119 el_size = sizeof(struct eventhandler_list);
120 el_cache = mcache_create("eventhdlr_list", el_size,
121 sizeof(uint64_t), 0, MCR_SLEEP);
122 }
123
124 void
125 eventhandler_reap_caches(boolean_t purge)
126 {
127 mcache_reap_now(eg_cache, purge);
128 mcache_reap_now(el_cache, purge);
129 }
130
131 /*
132 * Insertion is O(n) due to the priority scan, but optimises to O(1)
133 * if all priorities are identical.
134 */
135 static eventhandler_tag
136 eventhandler_register_internal(
137 struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
138 struct eventhandler_list *list,
139 const char *name, eventhandler_tag epn)
140 {
141 struct eventhandler_list *new_list;
142 struct eventhandler_entry *ep;
143
144 VERIFY(strlen(name) <= (sizeof(new_list->el_name) - 1));
145
146 if (evthdlr_lists_ctxt == NULL) {
147 evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;
148 }
149
150 VERIFY(evthdlr_lists_ctxt->eventhandler_lists_initted); /* eventhandler registered too early */
151 VERIFY(epn != NULL); /* cannot register NULL event */
152
153 /* lock the eventhandler lists */
154 lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
155
156 /* Do we need to find/create the (slow) list? */
157 if (list == NULL) {
158 /* look for a matching, existing list */
159 list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
160
161 /* Do we need to create the list? */
162 if (list == NULL) {
163 lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
164 new_list = mcache_alloc(el_cache, MCR_SLEEP);
165 if (new_list == NULL) {
166 evhlog((LOG_DEBUG, "%s: Can't allocate list \"%s\"", __func__, name));
167 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
168 return NULL;
169 }
170 bzero(new_list, el_size);
171 evhlog((LOG_DEBUG, "%s: creating list \"%s\"", __func__, name));
172 list = new_list;
173 list->el_flags = 0;
174 list->el_runcount = 0;
175 bzero(&list->el_lock, sizeof(list->el_lock));
176 (void) snprintf(list->el_name, sizeof(list->el_name), "%s", name);
177 TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link);
178 }
179 }
180 if (!(list->el_flags & EHL_INITTED)) {
181 TAILQ_INIT(&list->el_entries);
182 EHL_LOCK_INIT(list);
183 list->el_flags |= EHL_INITTED;
184 }
185 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
186
187 KASSERT(epn->ee_priority != EHE_DEAD_PRIORITY,
188 ("%s: handler for %s registered with dead priority", __func__, name));
189
190 /* sort it into the list */
191 evhlog((LOG_DEBUG, "%s: adding item %p (function %p to \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(epn),
192 (void *)VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic *)epn)->func), name));
193 EHL_LOCK(list);
194 TAILQ_FOREACH(ep, &list->el_entries, ee_link) {
195 if (ep->ee_priority != EHE_DEAD_PRIORITY &&
196 epn->ee_priority < ep->ee_priority) {
197 TAILQ_INSERT_BEFORE(ep, epn, ee_link);
198 break;
199 }
200 }
201 if (ep == NULL) {
202 TAILQ_INSERT_TAIL(&list->el_entries, epn, ee_link);
203 }
204 EHL_UNLOCK(list);
205 return epn;
206 }
207
208 eventhandler_tag
209 eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
210 struct eventhandler_list *list, const char *name,
211 void *func, struct eventhandler_entry_arg arg, int priority)
212 {
213 struct eventhandler_entry_generic *eg;
214
215 /* allocate an entry for this handler, populate it */
216 eg = mcache_alloc(eg_cache, MCR_SLEEP);
217 if (eg == NULL) {
218 evhlog((LOG_DEBUG, "%s: Can't allocate entry to register for event list "
219 "\"%s\"", __func__, name));
220 return NULL;
221 }
222 bzero(eg, eg_size);
223 eg->func = func;
224 eg->ee.ee_arg = arg;
225 eg->ee.ee_priority = priority;
226
227 return eventhandler_register_internal(evthdlr_lists_ctxt, list, name, &eg->ee);
228 }
229
230 void
231 eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag)
232 {
233 struct eventhandler_entry *ep = tag;
234
235 EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED);
236 if (ep != NULL) {
237 /* remove just this entry */
238 if (list->el_runcount == 0) {
239 evhlog((LOG_DEBUG, "%s: removing item %p from \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(ep),
240 list->el_name));
241 /*
242 * We may have purged the list because of certain events.
243 * Make sure that is not the case when a specific entry
244 * is being removed.
245 */
246 if (!TAILQ_EMPTY(&list->el_entries)) {
247 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
248 }
249 EHL_LOCK_CONVERT(list);
250 mcache_free(eg_cache, ep);
251 } else {
252 evhlog((LOG_DEBUG, "%s: marking item %p from \"%s\" as dead", __func__,
253 (void *)VM_KERNEL_ADDRPERM(ep), list->el_name));
254 ep->ee_priority = EHE_DEAD_PRIORITY;
255 }
256 } else {
257 /* remove entire list */
258 if (list->el_runcount == 0) {
259 evhlog((LOG_DEBUG, "%s: removing all items from \"%s\"", __func__,
260 list->el_name));
261 EHL_LOCK_CONVERT(list);
262 while (!TAILQ_EMPTY(&list->el_entries)) {
263 ep = TAILQ_FIRST(&list->el_entries);
264 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
265 mcache_free(eg_cache, ep);
266 }
267 } else {
268 evhlog((LOG_DEBUG, "%s: marking all items from \"%s\" as dead",
269 __func__, list->el_name));
270 TAILQ_FOREACH(ep, &list->el_entries, ee_link)
271 ep->ee_priority = EHE_DEAD_PRIORITY;
272 }
273 }
274 while (list->el_runcount > 0) {
275 msleep((caddr_t)list, &list->el_lock, PSPIN, "evhrm", 0);
276 }
277 EHL_UNLOCK(list);
278 }
279
280 /*
281 * Internal version for use when eventhandler list is already locked.
282 */
283 static struct eventhandler_list *
284 _eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
285 const char *name)
286 {
287 struct eventhandler_list *list;
288
289 VERIFY(evthdlr_lists_ctxt != NULL);
290
291 LCK_MTX_ASSERT(&evthdlr_lists_ctxt->eventhandler_mutex, LCK_MTX_ASSERT_OWNED);
292 TAILQ_FOREACH(list, &evthdlr_lists_ctxt->eventhandler_lists, el_link) {
293 if (!strcmp(name, list->el_name)) {
294 break;
295 }
296 }
297 return list;
298 }
299
300 /*
301 * Lookup a "slow" list by name. Returns with the list locked.
302 */
303 struct eventhandler_list *
304 eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt,
305 const char *name)
306 {
307 struct eventhandler_list *list;
308
309 if (evthdlr_lists_ctxt == NULL) {
310 evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb;
311 }
312
313 if (!evthdlr_lists_ctxt->eventhandler_lists_initted) {
314 return NULL;
315 }
316
317 /* scan looking for the requested list */
318 lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
319 list = _eventhandler_find_list(evthdlr_lists_ctxt, name);
320 if (list != NULL) {
321 lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex);
322 EHL_LOCK_SPIN(list);
323 }
324 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
325
326 return list;
327 }
328
329 /*
330 * Prune "dead" entries from an eventhandler list.
331 */
332 void
333 eventhandler_prune_list(struct eventhandler_list *list)
334 {
335 struct eventhandler_entry *ep, *en;
336 int pruned = 0;
337
338 evhlog((LOG_DEBUG, "%s: pruning list \"%s\"", __func__, list->el_name));
339 EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED);
340 TAILQ_FOREACH_SAFE(ep, &list->el_entries, ee_link, en) {
341 if (ep->ee_priority == EHE_DEAD_PRIORITY) {
342 TAILQ_REMOVE(&list->el_entries, ep, ee_link);
343 mcache_free(eg_cache, ep);
344 pruned++;
345 }
346 }
347 if (pruned > 0) {
348 wakeup(list);
349 }
350 }
351
352 /*
353 * This should be called when last reference to an object
354 * is being released.
355 * The individual event type lists must be purged when the object
356 * becomes defunct.
357 */
358 void
359 eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt)
360 {
361 struct eventhandler_list *list = NULL;
362 struct eventhandler_list *list_next = NULL;
363
364 lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex);
365 TAILQ_FOREACH_SAFE(list, &evthdlr_lists_ctxt->eventhandler_lists,
366 el_link, list_next) {
367 VERIFY(TAILQ_EMPTY(&list->el_entries));
368 EHL_LOCK_DESTROY(list);
369 mcache_free(el_cache, list);
370 }
371 lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex);
372 lck_mtx_destroy(&evthdlr_lists_ctxt->eventhandler_mutex,
373 &eventhandler_mutex_grp);
374 return;
375 }