]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/pthread_shims.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / bsd / kern / pthread_shims.c
1 /*
2 * Copyright (c) 2012-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define PTHREAD_INTERNAL 1
30
31 #include <stdatomic.h>
32 #include <kern/debug.h>
33 #include <kern/mach_param.h>
34 #include <kern/sched_prim.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/affinity.h>
38 #include <kern/zalloc.h>
39 #include <kern/policy_internal.h>
40
41 #include <machine/machine_routines.h>
42 #include <mach/task.h>
43 #include <mach/thread_act.h>
44 #include <sys/param.h>
45 #include <sys/eventvar.h>
46 #include <sys/pthread_shims.h>
47 #include <sys/proc_info.h>
48 #include <sys/proc_internal.h>
49 #include <sys/sysproto.h>
50 #include <sys/systm.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_protos.h>
53 #include <kern/kcdata.h>
54
55 /* version number of the in-kernel shims given to pthread.kext */
56 #define PTHREAD_SHIMS_VERSION 1
57
58 /* on arm, the callbacks function has two #ifdef arm ponters */
59 #if defined(__arm__)
60 #define PTHREAD_CALLBACK_MEMBER map_is_1gb
61 #else
62 #define PTHREAD_CALLBACK_MEMBER ml_get_max_cpus
63 #endif
64
65 /* compile time asserts to check the length of structures in pthread_shims.h */
66 static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100));
67 static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100));
68
69 /* old pthread code had definitions for these as they don't exist in headers */
70 extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t);
71 extern kern_return_t semaphore_signal_internal_trap(mach_port_name_t);
72
73 #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \
74 static rettype \
75 get(structtype x) { \
76 return (x)->member; \
77 } \
78 static void \
79 set(structtype x, rettype y) { \
80 (x)->member = y; \
81 }
82
83 PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart);
84 PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize);
85 PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread);
86 PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint);
87 PTHREAD_STRUCT_ACCESSOR(proc_get_dispatchqueue_offset, proc_set_dispatchqueue_offset, uint64_t, struct proc*, p_dispatchqueue_offset);
88 PTHREAD_STRUCT_ACCESSOR(proc_get_dispatchqueue_serialno_offset, proc_set_dispatchqueue_serialno_offset, uint64_t, struct proc*, p_dispatchqueue_serialno_offset);
89 PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset);
90 PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset, proc_set_mach_thread_self_tsd_offset, uint64_t, struct proc *, p_mach_thread_self_offset);
91 PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash);
92 PTHREAD_STRUCT_ACCESSOR(proc_get_return_to_kernel_offset, proc_set_return_to_kernel_offset, uint64_t, struct proc*, p_return_to_kernel_offset);
93 PTHREAD_STRUCT_ACCESSOR(proc_get_user_stack, proc_set_user_stack, user_addr_t, struct proc*, user_stack);
94
95 PTHREAD_STRUCT_ACCESSOR(uthread_get_threadlist, uthread_set_threadlist, void*, struct uthread*, uu_threadlist);
96 PTHREAD_STRUCT_ACCESSOR(uthread_get_sigmask, uthread_set_sigmask, sigset_t, struct uthread*, uu_sigmask);
97 PTHREAD_STRUCT_ACCESSOR(uthread_get_returnval, uthread_set_returnval, int, struct uthread*, uu_rval[0]);
98
99 #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0)
100
101 static void *
102 proc_get_wqptr(struct proc *p) {
103 void *wqptr = p->p_wqptr;
104 return (wqptr == WQPTR_IS_INITING_VALUE) ? NULL : wqptr;
105 }
106 static void
107 proc_set_wqptr(struct proc *p, void *y) {
108 proc_lock(p);
109
110 assert(y == NULL || p->p_wqptr == WQPTR_IS_INITING_VALUE);
111
112 p->p_wqptr = y;
113
114 if (y != NULL){
115 wakeup(&p->p_wqptr);
116 }
117
118 proc_unlock(p);
119 }
120 static boolean_t
121 proc_init_wqptr_or_wait(struct proc *p) {
122 proc_lock(p);
123
124 if (p->p_wqptr == NULL){
125 p->p_wqptr = WQPTR_IS_INITING_VALUE;
126 proc_unlock(p);
127
128 return TRUE;
129 } else if (p->p_wqptr == WQPTR_IS_INITING_VALUE){
130 assert_wait(&p->p_wqptr, THREAD_UNINT);
131 proc_unlock(p);
132 thread_block(THREAD_CONTINUE_NULL);
133
134 return FALSE;
135 } else {
136 proc_unlock(p);
137
138 return FALSE;
139 }
140 }
141
142 __attribute__((noreturn))
143 static void
144 pthread_returning_to_userspace(void)
145 {
146 thread_exception_return();
147 }
148
149 __attribute__((noreturn))
150 static void
151 pthread_bootstrap_return(void)
152 {
153 thread_bootstrap_return();
154 }
155
156 static uint32_t
157 get_task_threadmax(void) {
158 return task_threadmax;
159 }
160
161 static task_t
162 proc_get_task(struct proc *p) {
163 return p->task;
164 }
165
166 static uint64_t
167 proc_get_register(struct proc *p) {
168 return (p->p_lflag & P_LREGISTER);
169 }
170
171 static void
172 proc_set_register(struct proc *p) {
173 proc_setregister(p);
174 }
175
176 static void*
177 uthread_get_uukwe(struct uthread *t)
178 {
179 return &t->uu_kevent.uu_kwe;
180 }
181
182 static int
183 uthread_is_cancelled(struct uthread *t)
184 {
185 return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL;
186 }
187
188 static vm_map_t
189 _current_map(void)
190 {
191 return current_map();
192 }
193
194 static boolean_t
195 qos_main_thread_active(void)
196 {
197 return TRUE;
198 }
199
200 #if defined(__arm__)
201 /* On iOS, the stack placement depends on the address space size */
202 static uint32_t
203 map_is_1gb(vm_map_t map)
204 {
205 return ((!vm_map_is_64bit(map)) && (get_map_max(map) == ml_get_max_offset(FALSE, MACHINE_MAX_OFFSET_MIN)));
206 }
207 #endif
208
209 static int proc_usynch_get_requested_thread_qos(struct uthread *uth)
210 {
211 thread_t thread = uth ? uth->uu_thread : current_thread();
212 int requested_qos;
213
214 requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS);
215
216 /*
217 * For the purposes of userspace synchronization, it doesn't make sense to
218 * place an override of UNSPECIFIED on another thread, if the current thread
219 * doesn't have any QoS set. In these cases, upgrade to
220 * THREAD_QOS_USER_INTERACTIVE.
221 */
222 if (requested_qos == THREAD_QOS_UNSPECIFIED) {
223 requested_qos = THREAD_QOS_USER_INTERACTIVE;
224 }
225
226 return requested_qos;
227 }
228
229 static int
230 proc_usynch_thread_qos_add_override_for_resource_check_owner(thread_t thread,
231 int override_qos, boolean_t first_override_for_resource,
232 user_addr_t resource, int resource_type,
233 user_addr_t user_lock_addr, mach_port_name_t user_lock_owner)
234 {
235 return proc_thread_qos_add_override_check_owner(thread, override_qos,
236 first_override_for_resource, resource, resource_type,
237 user_lock_addr, user_lock_owner);
238 }
239
240 static boolean_t
241 proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth,
242 uint64_t tid, int override_qos, boolean_t first_override_for_resource,
243 user_addr_t resource, int resource_type)
244 {
245 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
246
247 return proc_thread_qos_add_override(task, thread, tid, override_qos,
248 first_override_for_resource, resource, resource_type);
249 }
250
251 static boolean_t
252 proc_usynch_thread_qos_remove_override_for_resource(task_t task,
253 struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
254 {
255 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
256
257 return proc_thread_qos_remove_override(task, thread, tid, resource, resource_type);
258 }
259
260 static boolean_t
261 proc_usynch_thread_qos_reset_override_for_resource(task_t task,
262 struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
263 {
264 thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
265
266 return proc_thread_qos_reset_override(task, thread, tid, resource, resource_type);
267 }
268
269 static boolean_t
270 proc_usynch_thread_qos_squash_override_for_resource(thread_t thread,
271 user_addr_t resource, int resource_type)
272 {
273 return proc_thread_qos_squash_override(thread, resource, resource_type);
274 }
275
276 /* kernel (core) to kext shims */
277
278 void
279 pthread_init(void)
280 {
281 if (!pthread_functions) {
282 panic("pthread kernel extension not loaded (function table is NULL).");
283 }
284 pthread_functions->pthread_init();
285 }
286
287 int
288 fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
289 {
290 return pthread_functions->fill_procworkqueue(p, pwqinfo);
291 }
292
293 /*
294 * Returns true if the workqueue flags are available, and will fill
295 * in exceeded_total and exceeded_constrained.
296 */
297 boolean_t
298 workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
299 boolean_t *exceeded_constrained)
300 {
301 proc_t p = v;
302 struct proc_workqueueinfo pwqinfo;
303 int err;
304
305 assert(p != NULL);
306 assert(exceeded_total != NULL);
307 assert(exceeded_constrained != NULL);
308
309 err = fill_procworkqueue(p, &pwqinfo);
310 if (err) {
311 return FALSE;
312 }
313 if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
314 return FALSE;
315 }
316
317 *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
318 *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
319
320 return TRUE;
321 }
322
323 uint32_t
324 workqueue_get_pwq_state_kdp(void * v)
325 {
326 static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) == kTaskWqExceededConstrainedThreadLimit);
327 static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) == kTaskWqExceededTotalThreadLimit);
328 static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
329 static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT | WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
330 proc_t p = v;
331 if (pthread_functions == NULL || pthread_functions->get_pwq_state_kdp == NULL)
332 return 0;
333 else
334 return pthread_functions->get_pwq_state_kdp(p);
335 }
336
337 void
338 workqueue_exit(struct proc *p)
339 {
340 pthread_functions->workqueue_exit(p);
341 }
342
343 void
344 workqueue_mark_exiting(struct proc *p)
345 {
346 pthread_functions->workqueue_mark_exiting(p);
347 }
348
349 void
350 workqueue_thread_yielded(void)
351 {
352 pthread_functions->workqueue_thread_yielded();
353 }
354
355 sched_call_t
356 workqueue_get_sched_callback(void)
357 {
358 if (pthread_functions->workqueue_get_sched_callback) {
359 return pthread_functions->workqueue_get_sched_callback();
360 }
361 return NULL;
362 }
363
364 void
365 pth_proc_hashinit(proc_t p)
366 {
367 pthread_functions->pth_proc_hashinit(p);
368 }
369
370 void
371 pth_proc_hashdelete(proc_t p)
372 {
373 pthread_functions->pth_proc_hashdelete(p);
374 }
375
376 /* syscall shims */
377 int
378 bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval)
379 {
380 return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval);
381 }
382
383 int
384 bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval)
385 {
386 if (pthread_functions->version >= 1) {
387 return pthread_functions->bsdthread_register2(p, uap->threadstart, uap->wqthread,
388 uap->flags, uap->stack_addr_hint,
389 uap->targetconc_ptr, uap->dispatchqueue_offset,
390 uap->tsd_offset, retval);
391 } else {
392 return pthread_functions->bsdthread_register(p, uap->threadstart, uap->wqthread,
393 uap->flags, uap->stack_addr_hint,
394 uap->targetconc_ptr, uap->dispatchqueue_offset,
395 retval);
396 }
397 }
398
399 int
400 bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval)
401 {
402 return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, uap->port, uap->sem, retval);
403 }
404
405 int
406 bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
407 {
408 return pthread_functions->bsdthread_ctl(p, uap->cmd, uap->arg1, uap->arg2, uap->arg3, retval);
409 }
410
411
412 int
413 thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval)
414 {
415 return pthread_functions->thread_selfid(p, retval);
416 }
417
418 int
419 workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
420 {
421 return pthread_functions->workq_kernreturn(p, uap->options, uap->item, uap->affinity, uap->prio, retval);
422 }
423
424 int
425 workq_open(struct proc *p, __unused struct workq_open_args *uap, int32_t *retval)
426 {
427 return pthread_functions->workq_open(p, retval);
428 }
429
430 /* pthread synchroniser syscalls */
431
432 int
433 psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval)
434 {
435 return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
436 }
437
438 int
439 psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval)
440 {
441 return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval);
442 }
443
444 int
445 psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval)
446 {
447 return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval);
448 }
449
450 int
451 psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval)
452 {
453 return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval);
454 }
455
456 int
457 psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval)
458 {
459 return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval);
460 }
461
462 int
463 psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval)
464 {
465 return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval);
466 }
467
468 int
469 psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval)
470 {
471 return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
472 }
473
474 int
475 psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval)
476 {
477 return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
478 }
479
480 int
481 psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval)
482 {
483 return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
484 }
485
486 int
487 psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval)
488 {
489 return ENOTSUP;
490 }
491
492 int
493 psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval)
494 {
495 return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
496 }
497
498 int
499 psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval)
500 {
501 return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval);
502 }
503
504 int
505 psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval)
506 {
507 return 0;
508 }
509
510 int
511 psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval)
512 {
513 return 0;
514 }
515
516 int
517 thread_qos_from_pthread_priority(unsigned long priority, unsigned long *flags)
518 {
519 return pthread_functions->thread_qos_from_pthread_priority(priority, flags);
520 }
521
522 unsigned long
523 pthread_priority_canonicalize(unsigned long priority, boolean_t propagation)
524 {
525 return pthread_functions->pthread_priority_canonicalize2(priority, propagation);
526 }
527
528 boolean_t
529 workq_thread_has_been_unbound(thread_t th, int qos_class)
530 {
531 if (pthread_functions->workq_thread_has_been_unbound) {
532 return pthread_functions->workq_thread_has_been_unbound(th, qos_class);
533 } else {
534 panic("pthread kext does not support workq_thread_has_been_unbound");
535 return false;
536 }
537 }
538
539 void
540 kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo)
541 {
542 if (pthread_functions->pthread_find_owner)
543 pthread_functions->pthread_find_owner(thread, waitinfo);
544 }
545
546 void *
547 kdp_pthread_get_thread_kwq(thread_t thread)
548 {
549 if (pthread_functions->pthread_get_thread_kwq)
550 return pthread_functions->pthread_get_thread_kwq(thread);
551
552 return NULL;
553 }
554
555 static void
556 thread_will_park_or_terminate(thread_t thread)
557 {
558 if (thread_owned_workloops_count(thread)) {
559 (void)kevent_exit_on_workloop_ownership_leak(thread);
560 }
561 }
562
563 #if defined(__arm64__)
564 static unsigned __int128
565 atomic_fetch_add_128_relaxed(_Atomic unsigned __int128 *ptr, unsigned __int128 value)
566 {
567 return atomic_fetch_add_explicit(ptr, value, memory_order_relaxed);
568 }
569
570 static unsigned __int128
571 atomic_load_128_relaxed(_Atomic unsigned __int128 *ptr)
572 {
573 return atomic_load_explicit(ptr, memory_order_relaxed);
574 }
575 #endif
576
577 /*
578 * The callbacks structure (defined in pthread_shims.h) contains a collection
579 * of kernel functions that were not deemed sensible to expose as a KPI to all
580 * kernel extensions. So the kext is given them in the form of a structure of
581 * function pointers.
582 */
583 static const struct pthread_callbacks_s pthread_callbacks = {
584 .version = PTHREAD_SHIMS_VERSION,
585 .config_thread_max = CONFIG_THREAD_MAX,
586 .get_task_threadmax = get_task_threadmax,
587
588 .proc_get_threadstart = proc_get_threadstart,
589 .proc_set_threadstart = proc_set_threadstart,
590 .proc_get_pthsize = proc_get_pthsize,
591 .proc_set_pthsize = proc_set_pthsize,
592 .proc_get_wqthread = proc_get_wqthread,
593 .proc_set_wqthread = proc_set_wqthread,
594 .proc_get_dispatchqueue_offset = proc_get_dispatchqueue_offset,
595 .proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset,
596 .proc_get_wqptr = proc_get_wqptr,
597 .proc_set_wqptr = proc_set_wqptr,
598 .proc_get_pthhash = proc_get_pthhash,
599 .proc_set_pthhash = proc_set_pthhash,
600 .proc_get_task = proc_get_task,
601 .proc_lock = proc_lock,
602 .proc_unlock = proc_unlock,
603 .proc_get_register = proc_get_register,
604 .proc_set_register = proc_set_register,
605
606 /* kernel IPI interfaces */
607 .ipc_port_copyout_send = ipc_port_copyout_send,
608 .task_get_ipcspace = get_task_ipcspace,
609 .vm_map_page_info = vm_map_page_info,
610 .vm_map_switch = vm_map_switch,
611 .thread_set_wq_state32 = thread_set_wq_state32,
612 #if !defined(__arm__)
613 .thread_set_wq_state64 = thread_set_wq_state64,
614 #endif
615
616 .uthread_get_threadlist = uthread_get_threadlist,
617 .uthread_set_threadlist = uthread_set_threadlist,
618 .uthread_get_sigmask = uthread_get_sigmask,
619 .uthread_set_sigmask = uthread_set_sigmask,
620 .uthread_get_uukwe = uthread_get_uukwe,
621 .uthread_get_returnval = uthread_get_returnval,
622 .uthread_set_returnval = uthread_set_returnval,
623 .uthread_is_cancelled = uthread_is_cancelled,
624
625 .thread_exception_return = pthread_returning_to_userspace,
626 .thread_bootstrap_return = pthread_bootstrap_return,
627 .unix_syscall_return = unix_syscall_return,
628
629 .absolutetime_to_microtime = absolutetime_to_microtime,
630
631 .thread_set_workq_pri = thread_set_workq_pri,
632 .thread_set_workq_qos = thread_set_workq_qos,
633
634 .get_bsdthread_info = (void*)get_bsdthread_info,
635 .thread_sched_call = thread_sched_call,
636 .thread_static_param = thread_static_param,
637 .thread_create_workq = thread_create_workq,
638 .thread_policy_set_internal = thread_policy_set_internal,
639 .thread_policy_get = thread_policy_get,
640 .thread_set_voucher_name = thread_set_voucher_name,
641
642 .thread_affinity_set = thread_affinity_set,
643
644 .zalloc = zalloc,
645 .zfree = zfree,
646 .zinit = zinit,
647
648 .workloop_fulfill_threadreq = workloop_fulfill_threadreq,
649
650 .__pthread_testcancel = __pthread_testcancel,
651
652 .mach_port_deallocate = mach_port_deallocate,
653 .semaphore_signal_internal_trap = semaphore_signal_internal_trap,
654 .current_map = _current_map,
655 .thread_create = thread_create,
656 .thread_resume = thread_resume,
657
658 .convert_thread_to_port = convert_thread_to_port,
659 .ml_get_max_cpus = (void*)ml_get_max_cpus,
660
661 #if defined(__arm__)
662 .map_is_1gb = map_is_1gb,
663 #endif
664 #if defined(__arm64__)
665 .atomic_fetch_add_128_relaxed = atomic_fetch_add_128_relaxed,
666 .atomic_load_128_relaxed = atomic_load_128_relaxed,
667 #endif
668
669 .proc_get_dispatchqueue_serialno_offset = proc_get_dispatchqueue_serialno_offset,
670 .proc_set_dispatchqueue_serialno_offset = proc_set_dispatchqueue_serialno_offset,
671
672 .proc_get_stack_addr_hint = proc_get_stack_addr_hint,
673 .proc_set_stack_addr_hint = proc_set_stack_addr_hint,
674 .proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset,
675 .proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset,
676 .proc_get_mach_thread_self_tsd_offset = proc_get_mach_thread_self_tsd_offset,
677 .proc_set_mach_thread_self_tsd_offset = proc_set_mach_thread_self_tsd_offset,
678
679 .thread_set_tsd_base = thread_set_tsd_base,
680
681 .proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos,
682
683 .qos_main_thread_active = qos_main_thread_active,
684
685 .proc_usynch_thread_qos_add_override_for_resource_check_owner = proc_usynch_thread_qos_add_override_for_resource_check_owner,
686 .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
687 .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
688 .proc_usynch_thread_qos_reset_override_for_resource = proc_usynch_thread_qos_reset_override_for_resource,
689
690 .proc_init_wqptr_or_wait = proc_init_wqptr_or_wait,
691
692 .thread_set_tag = thread_set_tag,
693 .thread_get_tag = thread_get_tag,
694
695 .proc_usynch_thread_qos_squash_override_for_resource = proc_usynch_thread_qos_squash_override_for_resource,
696 .task_get_default_manager_qos = task_get_default_manager_qos,
697 .thread_create_workq_waiting = thread_create_workq_waiting,
698
699 .proc_get_return_to_kernel_offset = proc_get_return_to_kernel_offset,
700 .proc_set_return_to_kernel_offset = proc_set_return_to_kernel_offset,
701 .thread_will_park_or_terminate = thread_will_park_or_terminate,
702
703 .qos_max_parallelism = qos_max_parallelism,
704
705 .proc_get_user_stack = proc_get_user_stack,
706 .proc_set_user_stack = proc_set_user_stack,
707 };
708
709 pthread_callbacks_t pthread_kern = &pthread_callbacks;
710 pthread_functions_t pthread_functions = NULL;
711
712 /*
713 * pthread_kext_register is called by pthread.kext upon load, it has to provide
714 * us with a function pointer table of pthread internal calls. In return, this
715 * file provides it with a table of function pointers it needs.
716 */
717
718 void
719 pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks)
720 {
721 if (pthread_functions != NULL) {
722 panic("Re-initialisation of pthread kext callbacks.");
723 }
724
725 if (callbacks != NULL) {
726 *callbacks = &pthread_callbacks;
727 } else {
728 panic("pthread_kext_register called without callbacks pointer.");
729 }
730
731 if (fns) {
732 pthread_functions = fns;
733 }
734 }