2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #define PTHREAD_INTERNAL 1
31 #include <stdatomic.h>
32 #include <kern/debug.h>
33 #include <kern/mach_param.h>
34 #include <kern/sched_prim.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/affinity.h>
38 #include <kern/zalloc.h>
39 #include <kern/policy_internal.h>
41 #include <machine/machine_routines.h>
42 #include <mach/task.h>
43 #include <mach/thread_act.h>
44 #include <sys/param.h>
45 #include <sys/eventvar.h>
46 #include <sys/pthread_shims.h>
47 #include <sys/proc_info.h>
48 #include <sys/proc_internal.h>
49 #include <sys/sysproto.h>
50 #include <sys/systm.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_protos.h>
53 #include <kern/kcdata.h>
55 /* version number of the in-kernel shims given to pthread.kext */
56 #define PTHREAD_SHIMS_VERSION 1
58 /* on arm, the callbacks function has two #ifdef arm ponters */
60 #define PTHREAD_CALLBACK_MEMBER map_is_1gb
62 #define PTHREAD_CALLBACK_MEMBER ml_get_max_cpus
65 /* compile time asserts to check the length of structures in pthread_shims.h */
66 static_assert((sizeof(struct pthread_functions_s
) - offsetof(struct pthread_functions_s
, psynch_rw_yieldwrlock
) - sizeof(void*)) == (sizeof(void*) * 100));
67 static_assert((sizeof(struct pthread_callbacks_s
) - offsetof(struct pthread_callbacks_s
, PTHREAD_CALLBACK_MEMBER
) - sizeof(void*)) == (sizeof(void*) * 100));
69 /* old pthread code had definitions for these as they don't exist in headers */
70 extern kern_return_t
mach_port_deallocate(ipc_space_t
, mach_port_name_t
);
71 extern kern_return_t
semaphore_signal_internal_trap(mach_port_name_t
);
73 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
79 set(structtype x, rettype y) { \
83 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart
, proc_set_threadstart
, user_addr_t
, struct proc
*, p_threadstart
);
84 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize
, proc_set_pthsize
, int, struct proc
*, p_pthsize
);
85 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread
, proc_set_wqthread
, user_addr_t
, struct proc
*, p_wqthread
);
86 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint
, proc_set_stack_addr_hint
, user_addr_t
, struct proc
*, p_stack_addr_hint
);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_dispatchqueue_offset
, proc_set_dispatchqueue_offset
, uint64_t, struct proc
*, p_dispatchqueue_offset
);
88 PTHREAD_STRUCT_ACCESSOR(proc_get_dispatchqueue_serialno_offset
, proc_set_dispatchqueue_serialno_offset
, uint64_t, struct proc
*, p_dispatchqueue_serialno_offset
);
89 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset
, proc_set_pthread_tsd_offset
, uint32_t, struct proc
*, p_pth_tsd_offset
);
90 PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset
, proc_set_mach_thread_self_tsd_offset
, uint64_t, struct proc
*, p_mach_thread_self_offset
);
91 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash
, proc_set_pthhash
, void*, struct proc
*, p_pthhash
);
92 PTHREAD_STRUCT_ACCESSOR(proc_get_return_to_kernel_offset
, proc_set_return_to_kernel_offset
, uint64_t, struct proc
*, p_return_to_kernel_offset
);
93 PTHREAD_STRUCT_ACCESSOR(proc_get_user_stack
, proc_set_user_stack
, user_addr_t
, struct proc
*, user_stack
);
95 PTHREAD_STRUCT_ACCESSOR(uthread_get_threadlist
, uthread_set_threadlist
, void*, struct uthread
*, uu_threadlist
);
96 PTHREAD_STRUCT_ACCESSOR(uthread_get_sigmask
, uthread_set_sigmask
, sigset_t
, struct uthread
*, uu_sigmask
);
97 PTHREAD_STRUCT_ACCESSOR(uthread_get_returnval
, uthread_set_returnval
, int, struct uthread
*, uu_rval
[0]);
99 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
102 proc_get_wqptr(struct proc
*p
) {
103 void *wqptr
= p
->p_wqptr
;
104 return (wqptr
== WQPTR_IS_INITING_VALUE
) ? NULL
: wqptr
;
107 proc_set_wqptr(struct proc
*p
, void *y
) {
110 assert(y
== NULL
|| p
->p_wqptr
== WQPTR_IS_INITING_VALUE
);
121 proc_init_wqptr_or_wait(struct proc
*p
) {
124 if (p
->p_wqptr
== NULL
){
125 p
->p_wqptr
= WQPTR_IS_INITING_VALUE
;
129 } else if (p
->p_wqptr
== WQPTR_IS_INITING_VALUE
){
130 assert_wait(&p
->p_wqptr
, THREAD_UNINT
);
132 thread_block(THREAD_CONTINUE_NULL
);
142 __attribute__((noreturn
))
144 pthread_returning_to_userspace(void)
146 thread_exception_return();
149 __attribute__((noreturn
))
151 pthread_bootstrap_return(void)
153 thread_bootstrap_return();
157 get_task_threadmax(void) {
158 return task_threadmax
;
162 proc_get_task(struct proc
*p
) {
167 proc_get_register(struct proc
*p
) {
168 return (p
->p_lflag
& P_LREGISTER
);
172 proc_set_register(struct proc
*p
) {
177 uthread_get_uukwe(struct uthread
*t
)
179 return &t
->uu_kevent
.uu_kwe
;
183 uthread_is_cancelled(struct uthread
*t
)
185 return (t
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
;
191 return current_map();
195 qos_main_thread_active(void)
201 /* On iOS, the stack placement depends on the address space size */
203 map_is_1gb(vm_map_t map
)
205 return ((!vm_map_is_64bit(map
)) && (get_map_max(map
) == ml_get_max_offset(FALSE
, MACHINE_MAX_OFFSET_MIN
)));
209 static int proc_usynch_get_requested_thread_qos(struct uthread
*uth
)
211 thread_t thread
= uth
? uth
->uu_thread
: current_thread();
214 requested_qos
= proc_get_thread_policy(thread
, TASK_POLICY_ATTRIBUTE
, TASK_POLICY_QOS
);
217 * For the purposes of userspace synchronization, it doesn't make sense to
218 * place an override of UNSPECIFIED on another thread, if the current thread
219 * doesn't have any QoS set. In these cases, upgrade to
220 * THREAD_QOS_USER_INTERACTIVE.
222 if (requested_qos
== THREAD_QOS_UNSPECIFIED
) {
223 requested_qos
= THREAD_QOS_USER_INTERACTIVE
;
226 return requested_qos
;
230 proc_usynch_thread_qos_add_override_for_resource_check_owner(thread_t thread
,
231 int override_qos
, boolean_t first_override_for_resource
,
232 user_addr_t resource
, int resource_type
,
233 user_addr_t user_lock_addr
, mach_port_name_t user_lock_owner
)
235 return proc_thread_qos_add_override_check_owner(thread
, override_qos
,
236 first_override_for_resource
, resource
, resource_type
,
237 user_lock_addr
, user_lock_owner
);
241 proc_usynch_thread_qos_add_override_for_resource(task_t task
, struct uthread
*uth
,
242 uint64_t tid
, int override_qos
, boolean_t first_override_for_resource
,
243 user_addr_t resource
, int resource_type
)
245 thread_t thread
= uth
? uth
->uu_thread
: THREAD_NULL
;
247 return proc_thread_qos_add_override(task
, thread
, tid
, override_qos
,
248 first_override_for_resource
, resource
, resource_type
);
252 proc_usynch_thread_qos_remove_override_for_resource(task_t task
,
253 struct uthread
*uth
, uint64_t tid
, user_addr_t resource
, int resource_type
)
255 thread_t thread
= uth
? uth
->uu_thread
: THREAD_NULL
;
257 return proc_thread_qos_remove_override(task
, thread
, tid
, resource
, resource_type
);
261 proc_usynch_thread_qos_reset_override_for_resource(task_t task
,
262 struct uthread
*uth
, uint64_t tid
, user_addr_t resource
, int resource_type
)
264 thread_t thread
= uth
? uth
->uu_thread
: THREAD_NULL
;
266 return proc_thread_qos_reset_override(task
, thread
, tid
, resource
, resource_type
);
270 proc_usynch_thread_qos_squash_override_for_resource(thread_t thread
,
271 user_addr_t resource
, int resource_type
)
273 return proc_thread_qos_squash_override(thread
, resource
, resource_type
);
276 /* kernel (core) to kext shims */
281 if (!pthread_functions
) {
282 panic("pthread kernel extension not loaded (function table is NULL).");
284 pthread_functions
->pthread_init();
288 fill_procworkqueue(proc_t p
, struct proc_workqueueinfo
* pwqinfo
)
290 return pthread_functions
->fill_procworkqueue(p
, pwqinfo
);
294 * Returns true if the workqueue flags are available, and will fill
295 * in exceeded_total and exceeded_constrained.
298 workqueue_get_pwq_exceeded(void *v
, boolean_t
*exceeded_total
,
299 boolean_t
*exceeded_constrained
)
302 struct proc_workqueueinfo pwqinfo
;
306 assert(exceeded_total
!= NULL
);
307 assert(exceeded_constrained
!= NULL
);
309 err
= fill_procworkqueue(p
, &pwqinfo
);
313 if (!(pwqinfo
.pwq_state
& WQ_FLAGS_AVAILABLE
)) {
317 *exceeded_total
= (pwqinfo
.pwq_state
& WQ_EXCEEDED_TOTAL_THREAD_LIMIT
);
318 *exceeded_constrained
= (pwqinfo
.pwq_state
& WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT
);
324 workqueue_get_pwq_state_kdp(void * v
)
326 static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT
<< 17) == kTaskWqExceededConstrainedThreadLimit
);
327 static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT
<< 17) == kTaskWqExceededTotalThreadLimit
);
328 static_assert((WQ_FLAGS_AVAILABLE
<< 17) == kTaskWqFlagsAvailable
);
329 static_assert((WQ_FLAGS_AVAILABLE
| WQ_EXCEEDED_TOTAL_THREAD_LIMIT
| WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT
) == 0x7);
331 if (pthread_functions
== NULL
|| pthread_functions
->get_pwq_state_kdp
== NULL
)
334 return pthread_functions
->get_pwq_state_kdp(p
);
338 workqueue_exit(struct proc
*p
)
340 pthread_functions
->workqueue_exit(p
);
344 workqueue_mark_exiting(struct proc
*p
)
346 pthread_functions
->workqueue_mark_exiting(p
);
350 workqueue_thread_yielded(void)
352 pthread_functions
->workqueue_thread_yielded();
356 workqueue_get_sched_callback(void)
358 if (pthread_functions
->workqueue_get_sched_callback
) {
359 return pthread_functions
->workqueue_get_sched_callback();
365 pth_proc_hashinit(proc_t p
)
367 pthread_functions
->pth_proc_hashinit(p
);
371 pth_proc_hashdelete(proc_t p
)
373 pthread_functions
->pth_proc_hashdelete(p
);
378 bsdthread_create(struct proc
*p
, struct bsdthread_create_args
*uap
, user_addr_t
*retval
)
380 return pthread_functions
->bsdthread_create(p
, uap
->func
, uap
->func_arg
, uap
->stack
, uap
->pthread
, uap
->flags
, retval
);
384 bsdthread_register(struct proc
*p
, struct bsdthread_register_args
*uap
, __unused
int32_t *retval
)
386 if (pthread_functions
->version
>= 1) {
387 return pthread_functions
->bsdthread_register2(p
, uap
->threadstart
, uap
->wqthread
,
388 uap
->flags
, uap
->stack_addr_hint
,
389 uap
->targetconc_ptr
, uap
->dispatchqueue_offset
,
390 uap
->tsd_offset
, retval
);
392 return pthread_functions
->bsdthread_register(p
, uap
->threadstart
, uap
->wqthread
,
393 uap
->flags
, uap
->stack_addr_hint
,
394 uap
->targetconc_ptr
, uap
->dispatchqueue_offset
,
400 bsdthread_terminate(struct proc
*p
, struct bsdthread_terminate_args
*uap
, int32_t *retval
)
402 return pthread_functions
->bsdthread_terminate(p
, uap
->stackaddr
, uap
->freesize
, uap
->port
, uap
->sem
, retval
);
406 bsdthread_ctl(struct proc
*p
, struct bsdthread_ctl_args
*uap
, int *retval
)
408 return pthread_functions
->bsdthread_ctl(p
, uap
->cmd
, uap
->arg1
, uap
->arg2
, uap
->arg3
, retval
);
413 thread_selfid(struct proc
*p
, __unused
struct thread_selfid_args
*uap
, uint64_t *retval
)
415 return pthread_functions
->thread_selfid(p
, retval
);
419 workq_kernreturn(struct proc
*p
, struct workq_kernreturn_args
*uap
, int32_t *retval
)
421 return pthread_functions
->workq_kernreturn(p
, uap
->options
, uap
->item
, uap
->affinity
, uap
->prio
, retval
);
425 workq_open(struct proc
*p
, __unused
struct workq_open_args
*uap
, int32_t *retval
)
427 return pthread_functions
->workq_open(p
, retval
);
430 /* pthread synchroniser syscalls */
433 psynch_mutexwait(proc_t p
, struct psynch_mutexwait_args
*uap
, uint32_t *retval
)
435 return pthread_functions
->psynch_mutexwait(p
, uap
->mutex
, uap
->mgen
, uap
->ugen
, uap
->tid
, uap
->flags
, retval
);
439 psynch_mutexdrop(proc_t p
, struct psynch_mutexdrop_args
*uap
, uint32_t *retval
)
441 return pthread_functions
->psynch_mutexdrop(p
, uap
->mutex
, uap
->mgen
, uap
->ugen
, uap
->tid
, uap
->flags
, retval
);
445 psynch_cvbroad(proc_t p
, struct psynch_cvbroad_args
*uap
, uint32_t *retval
)
447 return pthread_functions
->psynch_cvbroad(p
, uap
->cv
, uap
->cvlsgen
, uap
->cvudgen
, uap
->flags
, uap
->mutex
, uap
->mugen
, uap
->tid
, retval
);
451 psynch_cvsignal(proc_t p
, struct psynch_cvsignal_args
*uap
, uint32_t *retval
)
453 return pthread_functions
->psynch_cvsignal(p
, uap
->cv
, uap
->cvlsgen
, uap
->cvugen
, uap
->thread_port
, uap
->mutex
, uap
->mugen
, uap
->tid
, uap
->flags
, retval
);
457 psynch_cvwait(proc_t p
, struct psynch_cvwait_args
* uap
, uint32_t * retval
)
459 return pthread_functions
->psynch_cvwait(p
, uap
->cv
, uap
->cvlsgen
, uap
->cvugen
, uap
->mutex
, uap
->mugen
, uap
->flags
, uap
->sec
, uap
->nsec
, retval
);
463 psynch_cvclrprepost(proc_t p
, struct psynch_cvclrprepost_args
* uap
, int *retval
)
465 return pthread_functions
->psynch_cvclrprepost(p
, uap
->cv
, uap
->cvgen
, uap
->cvugen
, uap
->cvsgen
, uap
->prepocnt
, uap
->preposeq
, uap
->flags
, retval
);
469 psynch_rw_longrdlock(proc_t p
, struct psynch_rw_longrdlock_args
* uap
, uint32_t *retval
)
471 return pthread_functions
->psynch_rw_longrdlock(p
, uap
->rwlock
, uap
->lgenval
, uap
->ugenval
, uap
->rw_wc
, uap
->flags
, retval
);
475 psynch_rw_rdlock(proc_t p
, struct psynch_rw_rdlock_args
* uap
, uint32_t * retval
)
477 return pthread_functions
->psynch_rw_rdlock(p
, uap
->rwlock
, uap
->lgenval
, uap
->ugenval
, uap
->rw_wc
, uap
->flags
, retval
);
481 psynch_rw_unlock(proc_t p
, struct psynch_rw_unlock_args
*uap
, uint32_t *retval
)
483 return pthread_functions
->psynch_rw_unlock(p
, uap
->rwlock
, uap
->lgenval
, uap
->ugenval
, uap
->rw_wc
, uap
->flags
, retval
);
487 psynch_rw_unlock2(__unused proc_t p
, __unused
struct psynch_rw_unlock2_args
*uap
, __unused
uint32_t *retval
)
493 psynch_rw_wrlock(proc_t p
, struct psynch_rw_wrlock_args
*uap
, uint32_t *retval
)
495 return pthread_functions
->psynch_rw_wrlock(p
, uap
->rwlock
, uap
->lgenval
, uap
->ugenval
, uap
->rw_wc
, uap
->flags
, retval
);
499 psynch_rw_yieldwrlock(proc_t p
, struct psynch_rw_yieldwrlock_args
*uap
, uint32_t *retval
)
501 return pthread_functions
->psynch_rw_yieldwrlock(p
, uap
->rwlock
, uap
->lgenval
, uap
->ugenval
, uap
->rw_wc
, uap
->flags
, retval
);
505 psynch_rw_upgrade(__unused proc_t p
, __unused
struct psynch_rw_upgrade_args
* uap
, __unused
uint32_t *retval
)
511 psynch_rw_downgrade(__unused proc_t p
, __unused
struct psynch_rw_downgrade_args
* uap
, __unused
int *retval
)
517 thread_qos_from_pthread_priority(unsigned long priority
, unsigned long *flags
)
519 return pthread_functions
->thread_qos_from_pthread_priority(priority
, flags
);
523 pthread_priority_canonicalize(unsigned long priority
, boolean_t propagation
)
525 return pthread_functions
->pthread_priority_canonicalize2(priority
, propagation
);
529 workq_thread_has_been_unbound(thread_t th
, int qos_class
)
531 if (pthread_functions
->workq_thread_has_been_unbound
) {
532 return pthread_functions
->workq_thread_has_been_unbound(th
, qos_class
);
534 panic("pthread kext does not support workq_thread_has_been_unbound");
540 kdp_pthread_find_owner(thread_t thread
, struct stackshot_thread_waitinfo
*waitinfo
)
542 if (pthread_functions
->pthread_find_owner
)
543 pthread_functions
->pthread_find_owner(thread
, waitinfo
);
547 kdp_pthread_get_thread_kwq(thread_t thread
)
549 if (pthread_functions
->pthread_get_thread_kwq
)
550 return pthread_functions
->pthread_get_thread_kwq(thread
);
556 thread_will_park_or_terminate(thread_t thread
)
558 if (thread_owned_workloops_count(thread
)) {
559 (void)kevent_exit_on_workloop_ownership_leak(thread
);
563 #if defined(__arm64__)
564 static unsigned __int128
565 atomic_fetch_add_128_relaxed(_Atomic
unsigned __int128
*ptr
, unsigned __int128 value
)
567 return atomic_fetch_add_explicit(ptr
, value
, memory_order_relaxed
);
570 static unsigned __int128
571 atomic_load_128_relaxed(_Atomic
unsigned __int128
*ptr
)
573 return atomic_load_explicit(ptr
, memory_order_relaxed
);
578 * The callbacks structure (defined in pthread_shims.h) contains a collection
579 * of kernel functions that were not deemed sensible to expose as a KPI to all
580 * kernel extensions. So the kext is given them in the form of a structure of
583 static const struct pthread_callbacks_s pthread_callbacks
= {
584 .version
= PTHREAD_SHIMS_VERSION
,
585 .config_thread_max
= CONFIG_THREAD_MAX
,
586 .get_task_threadmax
= get_task_threadmax
,
588 .proc_get_threadstart
= proc_get_threadstart
,
589 .proc_set_threadstart
= proc_set_threadstart
,
590 .proc_get_pthsize
= proc_get_pthsize
,
591 .proc_set_pthsize
= proc_set_pthsize
,
592 .proc_get_wqthread
= proc_get_wqthread
,
593 .proc_set_wqthread
= proc_set_wqthread
,
594 .proc_get_dispatchqueue_offset
= proc_get_dispatchqueue_offset
,
595 .proc_set_dispatchqueue_offset
= proc_set_dispatchqueue_offset
,
596 .proc_get_wqptr
= proc_get_wqptr
,
597 .proc_set_wqptr
= proc_set_wqptr
,
598 .proc_get_pthhash
= proc_get_pthhash
,
599 .proc_set_pthhash
= proc_set_pthhash
,
600 .proc_get_task
= proc_get_task
,
601 .proc_lock
= proc_lock
,
602 .proc_unlock
= proc_unlock
,
603 .proc_get_register
= proc_get_register
,
604 .proc_set_register
= proc_set_register
,
606 /* kernel IPI interfaces */
607 .ipc_port_copyout_send
= ipc_port_copyout_send
,
608 .task_get_ipcspace
= get_task_ipcspace
,
609 .vm_map_page_info
= vm_map_page_info
,
610 .vm_map_switch
= vm_map_switch
,
611 .thread_set_wq_state32
= thread_set_wq_state32
,
612 #if !defined(__arm__)
613 .thread_set_wq_state64
= thread_set_wq_state64
,
616 .uthread_get_threadlist
= uthread_get_threadlist
,
617 .uthread_set_threadlist
= uthread_set_threadlist
,
618 .uthread_get_sigmask
= uthread_get_sigmask
,
619 .uthread_set_sigmask
= uthread_set_sigmask
,
620 .uthread_get_uukwe
= uthread_get_uukwe
,
621 .uthread_get_returnval
= uthread_get_returnval
,
622 .uthread_set_returnval
= uthread_set_returnval
,
623 .uthread_is_cancelled
= uthread_is_cancelled
,
625 .thread_exception_return
= pthread_returning_to_userspace
,
626 .thread_bootstrap_return
= pthread_bootstrap_return
,
627 .unix_syscall_return
= unix_syscall_return
,
629 .absolutetime_to_microtime
= absolutetime_to_microtime
,
631 .thread_set_workq_pri
= thread_set_workq_pri
,
632 .thread_set_workq_qos
= thread_set_workq_qos
,
634 .get_bsdthread_info
= (void*)get_bsdthread_info
,
635 .thread_sched_call
= thread_sched_call
,
636 .thread_static_param
= thread_static_param
,
637 .thread_create_workq
= thread_create_workq
,
638 .thread_policy_set_internal
= thread_policy_set_internal
,
639 .thread_policy_get
= thread_policy_get
,
640 .thread_set_voucher_name
= thread_set_voucher_name
,
642 .thread_affinity_set
= thread_affinity_set
,
648 .workloop_fulfill_threadreq
= workloop_fulfill_threadreq
,
650 .__pthread_testcancel
= __pthread_testcancel
,
652 .mach_port_deallocate
= mach_port_deallocate
,
653 .semaphore_signal_internal_trap
= semaphore_signal_internal_trap
,
654 .current_map
= _current_map
,
655 .thread_create
= thread_create
,
656 .thread_resume
= thread_resume
,
658 .convert_thread_to_port
= convert_thread_to_port
,
659 .ml_get_max_cpus
= (void*)ml_get_max_cpus
,
662 .map_is_1gb
= map_is_1gb
,
664 #if defined(__arm64__)
665 .atomic_fetch_add_128_relaxed
= atomic_fetch_add_128_relaxed
,
666 .atomic_load_128_relaxed
= atomic_load_128_relaxed
,
669 .proc_get_dispatchqueue_serialno_offset
= proc_get_dispatchqueue_serialno_offset
,
670 .proc_set_dispatchqueue_serialno_offset
= proc_set_dispatchqueue_serialno_offset
,
672 .proc_get_stack_addr_hint
= proc_get_stack_addr_hint
,
673 .proc_set_stack_addr_hint
= proc_set_stack_addr_hint
,
674 .proc_get_pthread_tsd_offset
= proc_get_pthread_tsd_offset
,
675 .proc_set_pthread_tsd_offset
= proc_set_pthread_tsd_offset
,
676 .proc_get_mach_thread_self_tsd_offset
= proc_get_mach_thread_self_tsd_offset
,
677 .proc_set_mach_thread_self_tsd_offset
= proc_set_mach_thread_self_tsd_offset
,
679 .thread_set_tsd_base
= thread_set_tsd_base
,
681 .proc_usynch_get_requested_thread_qos
= proc_usynch_get_requested_thread_qos
,
683 .qos_main_thread_active
= qos_main_thread_active
,
685 .proc_usynch_thread_qos_add_override_for_resource_check_owner
= proc_usynch_thread_qos_add_override_for_resource_check_owner
,
686 .proc_usynch_thread_qos_add_override_for_resource
= proc_usynch_thread_qos_add_override_for_resource
,
687 .proc_usynch_thread_qos_remove_override_for_resource
= proc_usynch_thread_qos_remove_override_for_resource
,
688 .proc_usynch_thread_qos_reset_override_for_resource
= proc_usynch_thread_qos_reset_override_for_resource
,
690 .proc_init_wqptr_or_wait
= proc_init_wqptr_or_wait
,
692 .thread_set_tag
= thread_set_tag
,
693 .thread_get_tag
= thread_get_tag
,
695 .proc_usynch_thread_qos_squash_override_for_resource
= proc_usynch_thread_qos_squash_override_for_resource
,
696 .task_get_default_manager_qos
= task_get_default_manager_qos
,
697 .thread_create_workq_waiting
= thread_create_workq_waiting
,
699 .proc_get_return_to_kernel_offset
= proc_get_return_to_kernel_offset
,
700 .proc_set_return_to_kernel_offset
= proc_set_return_to_kernel_offset
,
701 .thread_will_park_or_terminate
= thread_will_park_or_terminate
,
703 .qos_max_parallelism
= qos_max_parallelism
,
705 .proc_get_user_stack
= proc_get_user_stack
,
706 .proc_set_user_stack
= proc_set_user_stack
,
709 pthread_callbacks_t pthread_kern
= &pthread_callbacks
;
710 pthread_functions_t pthread_functions
= NULL
;
713 * pthread_kext_register is called by pthread.kext upon load, it has to provide
714 * us with a function pointer table of pthread internal calls. In return, this
715 * file provides it with a table of function pointers it needs.
719 pthread_kext_register(pthread_functions_t fns
, pthread_callbacks_t
*callbacks
)
721 if (pthread_functions
!= NULL
) {
722 panic("Re-initialisation of pthread kext callbacks.");
725 if (callbacks
!= NULL
) {
726 *callbacks
= &pthread_callbacks
;
728 panic("pthread_kext_register called without callbacks pointer.");
732 pthread_functions
= fns
;