]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/pthread_shims.c
76e76c957db922102420339bca4c0e15d725f55f
[apple/xnu.git] / bsd / kern / pthread_shims.c
1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define PTHREAD_INTERNAL 1
30
31 #include <kern/debug.h>
32 #include <kern/mach_param.h>
33 #include <kern/sched_prim.h>
34 #include <kern/task.h>
35 #include <kern/thread.h>
36 #include <kern/affinity.h>
37 #include <kern/zalloc.h>
38 #include <kern/policy_internal.h>
39
40 #include <machine/machine_routines.h>
41 #include <mach/task.h>
42 #include <mach/thread_act.h>
43 #include <sys/param.h>
44 #include <sys/pthread_shims.h>
45 #include <sys/proc_internal.h>
46 #include <sys/sysproto.h>
47 #include <sys/systm.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_protos.h>
50 #include <kern/kcdata.h>
51
52 /* version number of the in-kernel shims given to pthread.kext */
53 #define PTHREAD_SHIMS_VERSION 1
54
55 /* on arm, the callbacks function has two #ifdef arm ponters */
56 #define PTHREAD_CALLBACK_MEMBER ml_get_max_cpus
57
58 /* compile time asserts to check the length of structures in pthread_shims.h */
59 static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100));
60 static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100));
61
62 /* old pthread code had definitions for these as they don't exist in headers */
63 extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t);
64 extern kern_return_t semaphore_signal_internal_trap(mach_port_name_t);
65
66 /* Used for stackshot introspection */
67 extern void kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo);
68 extern void* kdp_pthread_get_thread_kwq(thread_t thread);
69
70 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
71 static rettype \
72 get(structtype x) { \
73 return (x)->member; \
74 } \
75 static void \
76 set(structtype x, rettype y) { \
77 (x)->member = y; \
78 }
79
80 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart);
81 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize);
82 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread);
83 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint);
84 PTHREAD_STRUCT_ACCESSOR(proc_get_dispatchqueue_offset, proc_set_dispatchqueue_offset, uint64_t, struct proc*, p_dispatchqueue_offset);
85 PTHREAD_STRUCT_ACCESSOR(proc_get_dispatchqueue_serialno_offset, proc_set_dispatchqueue_serialno_offset, uint64_t, struct proc*, p_dispatchqueue_serialno_offset);
86 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash);
88
89 PTHREAD_STRUCT_ACCESSOR(uthread_get_threadlist, uthread_set_threadlist, void*, struct uthread*, uu_threadlist);
90 PTHREAD_STRUCT_ACCESSOR(uthread_get_sigmask, uthread_set_sigmask, sigset_t, struct uthread*, uu_sigmask);
91 PTHREAD_STRUCT_ACCESSOR(uthread_get_returnval, uthread_set_returnval, int, struct uthread*, uu_rval[0]);
92
93 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
94
95 static void *
96 proc_get_wqptr(struct proc *p) {
97 void *wqptr = p->p_wqptr;
98 return (wqptr == WQPTR_IS_INITING_VALUE) ? NULL : wqptr;
99 }
100 static void
101 proc_set_wqptr(struct proc *p, void *y) {
102 proc_lock(p);
103
104 assert(y == NULL || p->p_wqptr == WQPTR_IS_INITING_VALUE);
105
106 p->p_wqptr = y;
107
108 if (y != NULL){
109 wakeup(&p->p_wqptr);
110 }
111
112 proc_unlock(p);
113 }
114 static boolean_t
115 proc_init_wqptr_or_wait(struct proc *p) {
116 proc_lock(p);
117
118 if (p->p_wqptr == NULL){
119 p->p_wqptr = WQPTR_IS_INITING_VALUE;
120 proc_unlock(p);
121
122 return TRUE;
123 } else if (p->p_wqptr == WQPTR_IS_INITING_VALUE){
124 assert_wait(&p->p_wqptr, THREAD_UNINT);
125 proc_unlock(p);
126 thread_block(THREAD_CONTINUE_NULL);
127
128 return FALSE;
129 } else {
130 proc_unlock(p);
131
132 return FALSE;
133 }
134 }
135
136 __attribute__((noreturn))
137 static void
138 pthread_returning_to_userspace(void)
139 {
140 thread_exception_return();
141 }
142
143 static uint32_t
144 get_task_threadmax(void) {
145 return task_threadmax;
146 }
147
148 static task_t
149 proc_get_task(struct proc *p) {
150 return p->task;
151 }
152
153 static uint64_t
154 proc_get_register(struct proc *p) {
155 return (p->p_lflag & P_LREGISTER);
156 }
157
158 static void
159 proc_set_register(struct proc *p) {
160 proc_setregister(p);
161 }
162
163 static void*
164 uthread_get_uukwe(struct uthread *t)
165 {
166 return &t->uu_kevent.uu_kwe;
167 }
168
169 static int
170 uthread_is_cancelled(struct uthread *t)
171 {
172 return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL;
173 }
174
175 static vm_map_t
176 _current_map(void)
177 {
178 return current_map();
179 }
180
181 static boolean_t
182 qos_main_thread_active(void)
183 {
184 return TRUE;
185 }
186
187
188 static int proc_usynch_get_requested_thread_qos(struct uthread *uth)
189 {
190 thread_t thread = uth ? uth->uu_thread : current_thread();
191 int requested_qos;
192
193 requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS);
194
195 /*
196 * For the purposes of userspace synchronization, it doesn't make sense to
197 * place an override of UNSPECIFIED on another thread, if the current thread
198 * doesn't have any QoS set. In these cases, upgrade to
199 * THREAD_QOS_USER_INTERACTIVE.
200 */
201 if (requested_qos == THREAD_QOS_UNSPECIFIED) {
202 requested_qos = THREAD_QOS_USER_INTERACTIVE;
203 }
204
205 return requested_qos;
206 }
207
208 static int
209 proc_usynch_thread_qos_add_override_for_resource_check_owner(thread_t thread,
210 int override_qos, boolean_t first_override_for_resource,
211 user_addr_t resource, int resource_type,
212 user_addr_t user_lock_addr, mach_port_name_t user_lock_owner)
213 {
214 return proc_thread_qos_add_override_check_owner(thread, override_qos,
215 first_override_for_resource, resource, resource_type,
216 user_lock_addr, user_lock_owner);
217 }
218
219 static boolean_t
220 proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth,
221 uint64_t tid, int override_qos, boolean_t first_override_for_resource,
222 user_addr_t resource, int resource_type)
223 {
224 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
225
226 return proc_thread_qos_add_override(task, thread, tid, override_qos,
227 first_override_for_resource, resource, resource_type);
228 }
229
230 static boolean_t
231 proc_usynch_thread_qos_remove_override_for_resource(task_t task,
232 struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
233 {
234 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
235
236 return proc_thread_qos_remove_override(task, thread, tid, resource, resource_type);
237 }
238
239 static boolean_t
240 proc_usynch_thread_qos_reset_override_for_resource(task_t task,
241 struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
242 {
243 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
244
245 return proc_thread_qos_reset_override(task, thread, tid, resource, resource_type);
246 }
247
248 static boolean_t
249 proc_usynch_thread_qos_squash_override_for_resource(thread_t thread,
250 user_addr_t resource, int resource_type)
251 {
252 return proc_thread_qos_squash_override(thread, resource, resource_type);
253 }
254
255 /* kernel (core) to kext shims */
256
257 void
258 pthread_init(void)
259 {
260 if (!pthread_functions) {
261 panic("pthread kernel extension not loaded (function table is NULL).");
262 }
263 pthread_functions->pthread_init();
264 }
265
266 int
267 fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
268 {
269 return pthread_functions->fill_procworkqueue(p, pwqinfo);
270 }
271
272 /*
273 * Returns true if the workqueue flags are available, and will fill
274 * in exceeded_total and exceeded_constrained.
275 */
276 boolean_t
277 workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
278 boolean_t *exceeded_constrained)
279 {
280 proc_t p = v;
281 struct proc_workqueueinfo pwqinfo;
282 int err;
283
284 assert(p != NULL);
285 assert(exceeded_total != NULL);
286 assert(exceeded_constrained != NULL);
287
288 err = fill_procworkqueue(p, &pwqinfo);
289 if (err) {
290 return FALSE;
291 }
292 if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
293 return FALSE;
294 }
295
296 *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
297 *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
298
299 return TRUE;
300 }
301
302 uint32_t
303 workqueue_get_pwq_state_kdp(void * v)
304 {
305 static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) == kTaskWqExceededConstrainedThreadLimit);
306 static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) == kTaskWqExceededTotalThreadLimit);
307 static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
308 static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT | WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
309 proc_t p = v;
310 if (pthread_functions == NULL || pthread_functions->get_pwq_state_kdp == NULL)
311 return 0;
312 else
313 return pthread_functions->get_pwq_state_kdp(p);
314 }
315
316 void
317 workqueue_exit(struct proc *p)
318 {
319 pthread_functions->workqueue_exit(p);
320 }
321
322 void
323 workqueue_mark_exiting(struct proc *p)
324 {
325 pthread_functions->workqueue_mark_exiting(p);
326 }
327
328 void
329 workqueue_thread_yielded(void)
330 {
331 pthread_functions->workqueue_thread_yielded();
332 }
333
334 sched_call_t
335 workqueue_get_sched_callback(void)
336 {
337 if (pthread_functions->workqueue_get_sched_callback) {
338 return pthread_functions->workqueue_get_sched_callback();
339 }
340 return NULL;
341 }
342
343 void
344 pth_proc_hashinit(proc_t p)
345 {
346 pthread_functions->pth_proc_hashinit(p);
347 }
348
349 void
350 pth_proc_hashdelete(proc_t p)
351 {
352 pthread_functions->pth_proc_hashdelete(p);
353 }
354
355 /* syscall shims */
356 int
357 bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval)
358 {
359 return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval);
360 }
361
362 int
363 bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval)
364 {
365 if (pthread_functions->version >= 1) {
366 return pthread_functions->bsdthread_register2(p, uap->threadstart, uap->wqthread,
367 uap->flags, uap->stack_addr_hint,
368 uap->targetconc_ptr, uap->dispatchqueue_offset,
369 uap->tsd_offset, retval);
370 } else {
371 return pthread_functions->bsdthread_register(p, uap->threadstart, uap->wqthread,
372 uap->flags, uap->stack_addr_hint,
373 uap->targetconc_ptr, uap->dispatchqueue_offset,
374 retval);
375 }
376 }
377
378 int
379 bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval)
380 {
381 return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, uap->port, uap->sem, retval);
382 }
383
384 int
385 bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
386 {
387 return pthread_functions->bsdthread_ctl(p, uap->cmd, uap->arg1, uap->arg2, uap->arg3, retval);
388 }
389
390
391 int
392 thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
393 {
394 return pthread_functions->thread_selfid(p, retval);
395 }
396
397 int
398 workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
399 {
400 return pthread_functions->workq_kernreturn(p, uap->options, uap->item, uap->affinity, uap->prio, retval);
401 }
402
403 int
404 workq_open(struct proc *p, __unused struct workq_open_args *uap, int32_t *retval)
405 {
406 return pthread_functions->workq_open(p, retval);
407 }
408
409 /* pthread synchroniser syscalls */
410
411 int
412 psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval)
413 {
414 return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
415 }
416
417 int
418 psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval)
419 {
420 return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
421 }
422
423 int
424 psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval)
425 {
426 return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval);
427 }
428
429 int
430 psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval)
431 {
432 return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval);
433 }
434
435 int
436 psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
437 {
438 return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval);
439 }
440
441 int
442 psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval)
443 {
444 return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval);
445 }
446
447 int
448 psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval)
449 {
450 return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
451 }
452
453 int
454 psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
455 {
456 return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
457 }
458
459 int
460 psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval)
461 {
462 return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
463 }
464
465 int
466 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval)
467 {
468 return ENOTSUP;
469 }
470
471 int
472 psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval)
473 {
474 return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
475 }
476
477 int
478 psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval)
479 {
480 return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
481 }
482
483 int
484 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval)
485 {
486 return 0;
487 }
488
489 int
490 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval)
491 {
492 return 0;
493 }
494
495 int
496 thread_qos_from_pthread_priority(unsigned long priority, unsigned long *flags)
497 {
498 return pthread_functions->thread_qos_from_pthread_priority(priority, flags);
499 }
500
501 unsigned long
502 pthread_priority_canonicalize(unsigned long priority, boolean_t propagation)
503 {
504 if (pthread_functions->pthread_priority_canonicalize2) {
505 return pthread_functions->pthread_priority_canonicalize2(priority, propagation);
506 } else {
507 return pthread_functions->pthread_priority_canonicalize(priority);
508 }
509 }
510
511 void
512 kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo)
513 {
514 if (pthread_functions->pthread_find_owner)
515 pthread_functions->pthread_find_owner(thread, waitinfo);
516 }
517
518 void *
519 kdp_pthread_get_thread_kwq(thread_t thread)
520 {
521 if (pthread_functions->pthread_get_thread_kwq)
522 return pthread_functions->pthread_get_thread_kwq(thread);
523
524 return NULL;
525 }
526
527 /*
528 * The callbacks structure (defined in pthread_shims.h) contains a collection
529 * of kernel functions that were not deemed sensible to expose as a KPI to all
530 * kernel extensions. So the kext is given them in the form of a structure of
531 * function pointers.
532 */
533 static const struct pthread_callbacks_s pthread_callbacks = {
534 .version = PTHREAD_SHIMS_VERSION,
535 .config_thread_max = CONFIG_THREAD_MAX,
536 .get_task_threadmax = get_task_threadmax,
537
538 .proc_get_threadstart = proc_get_threadstart,
539 .proc_set_threadstart = proc_set_threadstart,
540 .proc_get_pthsize = proc_get_pthsize,
541 .proc_set_pthsize = proc_set_pthsize,
542 .proc_get_wqthread = proc_get_wqthread,
543 .proc_set_wqthread = proc_set_wqthread,
544 .proc_get_dispatchqueue_offset = proc_get_dispatchqueue_offset,
545 .proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset,
546 .proc_get_wqptr = proc_get_wqptr,
547 .proc_set_wqptr = proc_set_wqptr,
548 .proc_get_pthhash = proc_get_pthhash,
549 .proc_set_pthhash = proc_set_pthhash,
550 .proc_get_task = proc_get_task,
551 .proc_lock = proc_lock,
552 .proc_unlock = proc_unlock,
553 .proc_get_register = proc_get_register,
554 .proc_set_register = proc_set_register,
555
556 /* kernel IPI interfaces */
557 .ipc_port_copyout_send = ipc_port_copyout_send,
558 .task_get_ipcspace = get_task_ipcspace,
559 .vm_map_page_info = vm_map_page_info,
560 .vm_map_switch = vm_map_switch,
561 .thread_set_wq_state32 = thread_set_wq_state32,
562 .thread_set_wq_state64 = thread_set_wq_state64,
563
564 .uthread_get_threadlist = uthread_get_threadlist,
565 .uthread_set_threadlist = uthread_set_threadlist,
566 .uthread_get_sigmask = uthread_get_sigmask,
567 .uthread_set_sigmask = uthread_set_sigmask,
568 .uthread_get_uukwe = uthread_get_uukwe,
569 .uthread_get_returnval = uthread_get_returnval,
570 .uthread_set_returnval = uthread_set_returnval,
571 .uthread_is_cancelled = uthread_is_cancelled,
572
573 .thread_exception_return = pthread_returning_to_userspace,
574 .thread_bootstrap_return = thread_bootstrap_return,
575 .unix_syscall_return = unix_syscall_return,
576
577 .absolutetime_to_microtime = absolutetime_to_microtime,
578
579 .thread_set_workq_pri = thread_set_workq_pri,
580 .thread_set_workq_qos = thread_set_workq_qos,
581
582 .get_bsdthread_info = (void*)get_bsdthread_info,
583 .thread_sched_call = thread_sched_call,
584 .thread_static_param = thread_static_param,
585 .thread_create_workq = thread_create_workq,
586 .thread_policy_set_internal = thread_policy_set_internal,
587 .thread_policy_get = thread_policy_get,
588 .thread_set_voucher_name = thread_set_voucher_name,
589
590 .thread_affinity_set = thread_affinity_set,
591
592 .zalloc = zalloc,
593 .zfree = zfree,
594 .zinit = zinit,
595
596 .__pthread_testcancel = __pthread_testcancel,
597
598 .mach_port_deallocate = mach_port_deallocate,
599 .semaphore_signal_internal_trap = semaphore_signal_internal_trap,
600 .current_map = _current_map,
601 .thread_create = thread_create,
602 .thread_resume = thread_resume,
603
604 .convert_thread_to_port = convert_thread_to_port,
605 .ml_get_max_cpus = (void*)ml_get_max_cpus,
606
607
608 .proc_get_dispatchqueue_serialno_offset = proc_get_dispatchqueue_serialno_offset,
609 .proc_set_dispatchqueue_serialno_offset = proc_set_dispatchqueue_serialno_offset,
610
611 .proc_get_stack_addr_hint = proc_get_stack_addr_hint,
612 .proc_set_stack_addr_hint = proc_set_stack_addr_hint,
613 .proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset,
614 .proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset,
615
616 .thread_set_tsd_base = thread_set_tsd_base,
617
618 .proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos,
619
620 .qos_main_thread_active = qos_main_thread_active,
621
622 .proc_usynch_thread_qos_add_override_for_resource_check_owner = proc_usynch_thread_qos_add_override_for_resource_check_owner,
623 .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
624 .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
625 .proc_usynch_thread_qos_reset_override_for_resource = proc_usynch_thread_qos_reset_override_for_resource,
626
627 .proc_init_wqptr_or_wait = proc_init_wqptr_or_wait,
628
629 .thread_set_tag = thread_set_tag,
630 .thread_get_tag = thread_get_tag,
631
632 .proc_usynch_thread_qos_squash_override_for_resource = proc_usynch_thread_qos_squash_override_for_resource,
633 .task_get_default_manager_qos = task_get_default_manager_qos,
634 .thread_create_workq_waiting = thread_create_workq_waiting,
635 };
636
637 pthread_callbacks_t pthread_kern = &pthread_callbacks;
638 pthread_functions_t pthread_functions = NULL;
639
640 /*
641 * pthread_kext_register is called by pthread.kext upon load, it has to provide
642 * us with a function pointer table of pthread internal calls. In return, this
643 * file provides it with a table of function pointers it needs.
644 */
645
646 void
647 pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks)
648 {
649 if (pthread_functions != NULL) {
650 panic("Re-initialisation of pthread kext callbacks.");
651 }
652
653 if (callbacks != NULL) {
654 *callbacks = &pthread_callbacks;
655 } else {
656 panic("pthread_kext_register called without callbacks pointer.");
657 }
658
659 if (fns) {
660 pthread_functions = fns;
661 }
662 }