]> git.saurik.com Git - apple/xnu.git/blob - bsd/pthread/pthread_shims.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / pthread / pthread_shims.c
1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define PTHREAD_INTERNAL 1
30
31 #include <stdatomic.h>
32 #include <kern/debug.h>
33 #include <kern/mach_param.h>
34 #include <kern/sched_prim.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/affinity.h>
38 #include <kern/zalloc.h>
39 #include <kern/policy_internal.h>
40
41 #include <machine/machine_routines.h>
42 #include <mach/task.h>
43 #include <mach/thread_act.h>
44 #include <sys/param.h>
45 #include <sys/eventvar.h>
46 #include <sys/pthread_shims.h>
47 #include <pthread/workqueue_internal.h>
48 #include <sys/cdefs.h>
49 #include <sys/proc_info.h>
50 #include <sys/proc_internal.h>
51 #include <sys/sysproto.h>
52 #include <sys/systm.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_protos.h>
55 #include <kern/kcdata.h>
56
57 /* version number of the in-kernel shims given to pthread.kext */
58 #define PTHREAD_SHIMS_VERSION 1
59
60 /* on arm, the callbacks function has two #ifdef arm ponters */
61 #if defined(__arm__)
62 #define PTHREAD_CALLBACK_MEMBER __unused_was_map_is_1gb
63 #else
64 #define PTHREAD_CALLBACK_MEMBER __unused_was_ml_get_max_cpus
65 #endif
66
67 /* compile time asserts to check the length of structures in pthread_shims.h */
68 static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100));
69 static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100));
70
71 /* old pthread code had definitions for these as they don't exist in headers */
72 extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t);
73 extern kern_return_t semaphore_signal_internal_trap(mach_port_name_t);
74 extern void thread_deallocate_safe(thread_t thread);
75
76 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
77 static rettype \
78 get(structtype x) { \
79 return (x)->member; \
80 } \
81 static void \
82 set(structtype x, rettype y) { \
83 (x)->member = y; \
84 }
85
86 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize);
88 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread);
89 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint);
90 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset);
91 PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset, proc_set_mach_thread_self_tsd_offset, uint64_t, struct proc *, p_mach_thread_self_offset);
92 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash);
93
94 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
95
96 static void
97 proc_set_dispatchqueue_offset(struct proc *p, uint64_t offset)
98 {
99 p->p_dispatchqueue_offset = offset;
100 }
101
102 static void
103 proc_set_return_to_kernel_offset(struct proc *p, uint64_t offset)
104 {
105 p->p_return_to_kernel_offset = offset;
106 }
107
108 static user_addr_t
109 proc_get_user_stack(struct proc *p)
110 {
111 return p->user_stack;
112 }
113
114 static void
115 uthread_set_returnval(struct uthread *uth, int retval)
116 {
117 uth->uu_rval[0] = retval;
118 }
119
120 __attribute__((noreturn))
121 static void
122 pthread_returning_to_userspace(void)
123 {
124 thread_exception_return();
125 }
126
127 __attribute__((noreturn))
128 static void
129 pthread_bootstrap_return(void)
130 {
131 thread_bootstrap_return();
132 }
133
134 static uint32_t
135 get_task_threadmax(void)
136 {
137 return task_threadmax;
138 }
139
140 static uint64_t
141 proc_get_register(struct proc *p)
142 {
143 return p->p_lflag & P_LREGISTER;
144 }
145
146 static void
147 proc_set_register(struct proc *p)
148 {
149 proc_setregister(p);
150 }
151
152 static void*
153 uthread_get_uukwe(struct uthread *t)
154 {
155 return &t->uu_save.uus_kwe;
156 }
157
158 static int
159 uthread_is_cancelled(struct uthread *t)
160 {
161 return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL;
162 }
163
164 static vm_map_t
165 _current_map(void)
166 {
167 return current_map();
168 }
169
170 static boolean_t
171 qos_main_thread_active(void)
172 {
173 return TRUE;
174 }
175
176 static int
177 proc_usynch_get_requested_thread_qos(struct uthread *uth)
178 {
179 thread_t thread = uth ? uth->uu_thread : current_thread();
180 int requested_qos;
181
182 requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS);
183
184 /*
185 * For the purposes of userspace synchronization, it doesn't make sense to
186 * place an override of UNSPECIFIED on another thread, if the current thread
187 * doesn't have any QoS set. In these cases, upgrade to
188 * THREAD_QOS_USER_INTERACTIVE.
189 */
190 if (requested_qos == THREAD_QOS_UNSPECIFIED) {
191 requested_qos = THREAD_QOS_USER_INTERACTIVE;
192 }
193
194 return requested_qos;
195 }
196
197 static boolean_t
198 proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth,
199 uint64_t tid, int override_qos, boolean_t first_override_for_resource,
200 user_addr_t resource, int resource_type)
201 {
202 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
203
204 return proc_thread_qos_add_override(task, thread, tid, override_qos,
205 first_override_for_resource, resource, resource_type) == 0;
206 }
207
208 static boolean_t
209 proc_usynch_thread_qos_remove_override_for_resource(task_t task,
210 struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
211 {
212 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
213
214 return proc_thread_qos_remove_override(task, thread, tid, resource,
215 resource_type) == 0;
216 }
217
218
219 static wait_result_t
220 psynch_wait_prepare(uintptr_t kwq, struct turnstile **tstore,
221 thread_t owner, block_hint_t block_hint, uint64_t deadline)
222 {
223 struct turnstile *ts;
224 wait_result_t wr;
225
226 if (tstore) {
227 ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
228 TURNSTILE_PTHREAD_MUTEX);
229
230 turnstile_update_inheritor(ts, owner,
231 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
232
233 thread_set_pending_block_hint(current_thread(), block_hint);
234
235 wr = waitq_assert_wait64_leeway(&ts->ts_waitq, (event64_t)kwq,
236 THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
237 } else {
238 thread_set_pending_block_hint(current_thread(), block_hint);
239
240 wr = assert_wait_deadline_with_leeway((event_t)kwq, THREAD_ABORTSAFE,
241 TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
242 }
243
244 return wr;
245 }
246
247 static void
248 psynch_wait_update_complete(struct turnstile *ts)
249 {
250 assert(ts);
251 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
252 }
253
254 static void
255 psynch_wait_complete(uintptr_t kwq, struct turnstile **tstore)
256 {
257 assert(tstore);
258 turnstile_complete(kwq, tstore, NULL);
259 }
260
261 static void
262 psynch_wait_update_owner(uintptr_t kwq, thread_t owner,
263 struct turnstile **tstore)
264 {
265 struct turnstile *ts;
266
267 ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
268 TURNSTILE_PTHREAD_MUTEX);
269
270 turnstile_update_inheritor(ts, owner,
271 (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
272 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
273 turnstile_complete(kwq, tstore, NULL);
274 }
275
276 static void
277 psynch_wait_cleanup(void)
278 {
279 turnstile_cleanup();
280 }
281
282 static kern_return_t
283 psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe,
284 struct turnstile **tstore)
285 {
286 struct uthread *uth;
287 struct turnstile *ts;
288 kern_return_t kr;
289
290 uth = __container_of(kwe, struct uthread, uu_save.uus_kwe);
291 assert(uth);
292
293 if (tstore) {
294 ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL,
295 TURNSTILE_PTHREAD_MUTEX);
296 turnstile_update_inheritor(ts, uth->uu_thread,
297 (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
298
299 kr = waitq_wakeup64_thread(&ts->ts_waitq, (event64_t)kwq,
300 uth->uu_thread, THREAD_AWAKENED);
301
302 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
303 turnstile_complete(kwq, tstore, NULL);
304 } else {
305 kr = thread_wakeup_thread((event_t)kwq, uth->uu_thread);
306 }
307
308 return kr;
309 }
310
311 /* kernel (core) to kext shims */
312
313 void
314 pthread_init(void)
315 {
316 if (!pthread_functions) {
317 panic("pthread kernel extension not loaded (function table is NULL).");
318 }
319 pthread_functions->pthread_init();
320 }
321
322 void
323 pth_proc_hashinit(proc_t p)
324 {
325 pthread_functions->pth_proc_hashinit(p);
326 }
327
328 void
329 pth_proc_hashdelete(proc_t p)
330 {
331 pthread_functions->pth_proc_hashdelete(p);
332 }
333
334 /* syscall shims */
335 int
336 bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval)
337 {
338 return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval);
339 }
340
341 int
342 bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval)
343 {
344 kern_return_t kr;
345 static_assert(offsetof(struct bsdthread_register_args, threadstart) + sizeof(user_addr_t) ==
346 offsetof(struct bsdthread_register_args, wqthread));
347 kr = machine_thread_function_pointers_convert_from_user(current_thread(), &uap->threadstart, 2);
348 assert(kr == KERN_SUCCESS);
349
350 if (pthread_functions->version >= 1) {
351 return pthread_functions->bsdthread_register2(p, uap->threadstart,
352 uap->wqthread, uap->flags, uap->stack_addr_hint,
353 uap->targetconc_ptr, uap->dispatchqueue_offset,
354 uap->tsd_offset, retval);
355 } else {
356 return pthread_functions->bsdthread_register(p, uap->threadstart,
357 uap->wqthread, uap->flags, uap->stack_addr_hint,
358 uap->targetconc_ptr, uap->dispatchqueue_offset,
359 retval);
360 }
361 }
362
363 int
364 bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval)
365 {
366 thread_t th = current_thread();
367 if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
368 workq_thread_terminate(p, get_bsdthread_info(th));
369 }
370 return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, uap->port, uap->sem, retval);
371 }
372
373 int
374 thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
375 {
376 return pthread_functions->thread_selfid(p, retval);
377 }
378
379 /* pthread synchroniser syscalls */
380
381 int
382 psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval)
383 {
384 return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
385 }
386
387 int
388 psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval)
389 {
390 return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
391 }
392
393 int
394 psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval)
395 {
396 return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval);
397 }
398
399 int
400 psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval)
401 {
402 return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval);
403 }
404
405 int
406 psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
407 {
408 return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval);
409 }
410
411 int
412 psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval)
413 {
414 return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval);
415 }
416
417 int
418 psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval)
419 {
420 return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
421 }
422
423 int
424 psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
425 {
426 return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
427 }
428
429 int
430 psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval)
431 {
432 return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
433 }
434
435 int
436 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval)
437 {
438 return ENOTSUP;
439 }
440
441 int
442 psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval)
443 {
444 return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
445 }
446
447 int
448 psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval)
449 {
450 return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
451 }
452
453 int
454 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval)
455 {
456 return 0;
457 }
458
459 int
460 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval)
461 {
462 return 0;
463 }
464
465 void
466 kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo)
467 {
468 if (pthread_functions->pthread_find_owner) {
469 pthread_functions->pthread_find_owner(thread, waitinfo);
470 }
471 }
472
473 void *
474 kdp_pthread_get_thread_kwq(thread_t thread)
475 {
476 if (pthread_functions->pthread_get_thread_kwq) {
477 return pthread_functions->pthread_get_thread_kwq(thread);
478 }
479
480 return NULL;
481 }
482
483 void
484 thread_will_park_or_terminate(thread_t thread)
485 {
486 if (thread_owned_workloops_count(thread)) {
487 (void)kevent_exit_on_workloop_ownership_leak(thread);
488 }
489 }
490
491 /*
492 * The callbacks structure (defined in pthread_shims.h) contains a collection
493 * of kernel functions that were not deemed sensible to expose as a KPI to all
494 * kernel extensions. So the kext is given them in the form of a structure of
495 * function pointers.
496 */
497 static const struct pthread_callbacks_s pthread_callbacks = {
498 .version = PTHREAD_SHIMS_VERSION,
499 .config_thread_max = CONFIG_THREAD_MAX,
500 .get_task_threadmax = get_task_threadmax,
501
502 .proc_get_threadstart = proc_get_threadstart,
503 .proc_set_threadstart = proc_set_threadstart,
504 .proc_get_pthsize = proc_get_pthsize,
505 .proc_set_pthsize = proc_set_pthsize,
506 .proc_get_wqthread = proc_get_wqthread,
507 .proc_set_wqthread = proc_set_wqthread,
508 .proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset,
509 .proc_get_pthhash = proc_get_pthhash,
510 .proc_set_pthhash = proc_set_pthhash,
511 .proc_get_register = proc_get_register,
512 .proc_set_register = proc_set_register,
513
514 /* kernel IPI interfaces */
515 .ipc_port_copyout_send = ipc_port_copyout_send,
516 .task_get_ipcspace = get_task_ipcspace,
517 .vm_map_page_info = vm_map_page_info,
518 .thread_set_wq_state32 = thread_set_wq_state32,
519 #if !defined(__arm__)
520 .thread_set_wq_state64 = thread_set_wq_state64,
521 #endif
522
523 .uthread_get_uukwe = uthread_get_uukwe,
524 .uthread_set_returnval = uthread_set_returnval,
525 .uthread_is_cancelled = uthread_is_cancelled,
526
527 .thread_exception_return = pthread_returning_to_userspace,
528 .thread_bootstrap_return = pthread_bootstrap_return,
529 .unix_syscall_return = unix_syscall_return,
530
531 .get_bsdthread_info = (void*)get_bsdthread_info,
532 .thread_policy_set_internal = thread_policy_set_internal,
533 .thread_policy_get = thread_policy_get,
534
535 .__pthread_testcancel = __pthread_testcancel,
536
537 .mach_port_deallocate = mach_port_deallocate,
538 .semaphore_signal_internal_trap = semaphore_signal_internal_trap,
539 .current_map = _current_map,
540 .thread_create = thread_create,
541 .thread_resume = thread_resume,
542
543 .convert_thread_to_port = convert_thread_to_port,
544
545 .proc_get_stack_addr_hint = proc_get_stack_addr_hint,
546 .proc_set_stack_addr_hint = proc_set_stack_addr_hint,
547 .proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset,
548 .proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset,
549 .proc_get_mach_thread_self_tsd_offset = proc_get_mach_thread_self_tsd_offset,
550 .proc_set_mach_thread_self_tsd_offset = proc_set_mach_thread_self_tsd_offset,
551
552 .thread_set_tsd_base = thread_set_tsd_base,
553
554 .proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos,
555
556 .qos_main_thread_active = qos_main_thread_active,
557 .thread_set_voucher_name = thread_set_voucher_name,
558
559 .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
560 .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
561
562 .thread_set_tag = thread_set_tag,
563 .thread_get_tag = thread_get_tag,
564
565 .proc_set_return_to_kernel_offset = proc_set_return_to_kernel_offset,
566 .thread_will_park_or_terminate = thread_will_park_or_terminate,
567
568 .proc_get_user_stack = proc_get_user_stack,
569 .task_findtid = task_findtid,
570 .thread_deallocate_safe = thread_deallocate_safe,
571
572 .psynch_wait_prepare = psynch_wait_prepare,
573 .psynch_wait_update_complete = psynch_wait_update_complete,
574 .psynch_wait_complete = psynch_wait_complete,
575 .psynch_wait_cleanup = psynch_wait_cleanup,
576 .psynch_wait_wakeup = psynch_wait_wakeup,
577 .psynch_wait_update_owner = psynch_wait_update_owner,
578 };
579
580 pthread_callbacks_t pthread_kern = &pthread_callbacks;
581 pthread_functions_t pthread_functions = NULL;
582
583 /*
584 * pthread_kext_register is called by pthread.kext upon load, it has to provide
585 * us with a function pointer table of pthread internal calls. In return, this
586 * file provides it with a table of function pointers it needs.
587 */
588
589 void
590 pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks)
591 {
592 if (pthread_functions != NULL) {
593 panic("Re-initialisation of pthread kext callbacks.");
594 }
595
596 if (callbacks != NULL) {
597 *callbacks = &pthread_callbacks;
598 } else {
599 panic("pthread_kext_register called without callbacks pointer.");
600 }
601
602 if (fns) {
603 pthread_functions = fns;
604 }
605 }