]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2012-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #define PTHREAD_INTERNAL 1 | |
30 | ||
31 | #include <stdatomic.h> | |
32 | #include <kern/debug.h> | |
33 | #include <kern/mach_param.h> | |
34 | #include <kern/sched_prim.h> | |
35 | #include <kern/task.h> | |
36 | #include <kern/thread.h> | |
37 | #include <kern/affinity.h> | |
38 | #include <kern/zalloc.h> | |
39 | #include <kern/policy_internal.h> | |
40 | ||
41 | #include <machine/machine_routines.h> | |
42 | #include <mach/task.h> | |
43 | #include <mach/thread_act.h> | |
44 | #include <sys/param.h> | |
45 | #include <sys/eventvar.h> | |
46 | #include <sys/pthread_shims.h> | |
47 | #include <pthread/workqueue_internal.h> | |
48 | #include <sys/cdefs.h> | |
49 | #include <sys/proc_info.h> | |
50 | #include <sys/proc_internal.h> | |
51 | #include <sys/sysproto.h> | |
52 | #include <sys/systm.h> | |
53 | #include <vm/vm_map.h> | |
54 | #include <vm/vm_protos.h> | |
55 | #include <kern/kcdata.h> | |
56 | ||
57 | /* version number of the in-kernel shims given to pthread.kext */ | |
58 | #define PTHREAD_SHIMS_VERSION 1 | |
59 | ||
60 | /* on arm, the callbacks function has two #ifdef arm ponters */ | |
61 | #if defined(__arm__) | |
62 | #define PTHREAD_CALLBACK_MEMBER __unused_was_map_is_1gb | |
63 | #else | |
64 | #define PTHREAD_CALLBACK_MEMBER __unused_was_ml_get_max_cpus | |
65 | #endif | |
66 | ||
67 | /* compile time asserts to check the length of structures in pthread_shims.h */ | |
68 | static_assert((sizeof(struct pthread_functions_s) - offsetof(struct pthread_functions_s, psynch_rw_yieldwrlock) - sizeof(void*)) == (sizeof(void*) * 100)); | |
69 | static_assert((sizeof(struct pthread_callbacks_s) - offsetof(struct pthread_callbacks_s, PTHREAD_CALLBACK_MEMBER) - sizeof(void*)) == (sizeof(void*) * 100)); | |
70 | ||
71 | /* old pthread code had definitions for these as they don't exist in headers */ | |
72 | extern kern_return_t mach_port_deallocate(ipc_space_t, mach_port_name_t); | |
73 | extern kern_return_t semaphore_signal_internal_trap(mach_port_name_t); | |
74 | extern void thread_deallocate_safe(thread_t thread); | |
75 | ||
76 | #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \ | |
77 | static rettype \ | |
78 | get(structtype x) { \ | |
79 | return (x)->member; \ | |
80 | } \ | |
81 | static void \ | |
82 | set(structtype x, rettype y) { \ | |
83 | (x)->member = y; \ | |
84 | } | |
85 | ||
86 | PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart); | |
87 | PTHREAD_STRUCT_ACCESSOR(proc_get_pthsize, proc_set_pthsize, int, struct proc*, p_pthsize); | |
88 | PTHREAD_STRUCT_ACCESSOR(proc_get_wqthread, proc_set_wqthread, user_addr_t, struct proc*, p_wqthread); | |
89 | PTHREAD_STRUCT_ACCESSOR(proc_get_stack_addr_hint, proc_set_stack_addr_hint, user_addr_t, struct proc *, p_stack_addr_hint); | |
90 | PTHREAD_STRUCT_ACCESSOR(proc_get_pthread_tsd_offset, proc_set_pthread_tsd_offset, uint32_t, struct proc *, p_pth_tsd_offset); | |
91 | PTHREAD_STRUCT_ACCESSOR(proc_get_mach_thread_self_tsd_offset, proc_set_mach_thread_self_tsd_offset, uint64_t, struct proc *, p_mach_thread_self_offset); | |
92 | PTHREAD_STRUCT_ACCESSOR(proc_get_pthhash, proc_set_pthhash, void*, struct proc*, p_pthhash); | |
93 | ||
94 | #define WQPTR_IS_INITING_VALUE ((void *)~(uintptr_t)0) | |
95 | ||
96 | static void | |
97 | proc_set_dispatchqueue_offset(struct proc *p, uint64_t offset) | |
98 | { | |
99 | p->p_dispatchqueue_offset = offset; | |
100 | } | |
101 | ||
102 | static void | |
103 | proc_set_return_to_kernel_offset(struct proc *p, uint64_t offset) | |
104 | { | |
105 | p->p_return_to_kernel_offset = offset; | |
106 | } | |
107 | ||
108 | static user_addr_t | |
109 | proc_get_user_stack(struct proc *p) | |
110 | { | |
111 | return p->user_stack; | |
112 | } | |
113 | ||
114 | static void | |
115 | uthread_set_returnval(struct uthread *uth, int retval) | |
116 | { | |
117 | uth->uu_rval[0] = retval; | |
118 | } | |
119 | ||
120 | __attribute__((noreturn)) | |
121 | static void | |
122 | pthread_returning_to_userspace(void) | |
123 | { | |
124 | thread_exception_return(); | |
125 | } | |
126 | ||
127 | __attribute__((noreturn)) | |
128 | static void | |
129 | pthread_bootstrap_return(void) | |
130 | { | |
131 | thread_bootstrap_return(); | |
132 | } | |
133 | ||
134 | static uint32_t | |
135 | get_task_threadmax(void) { | |
136 | return task_threadmax; | |
137 | } | |
138 | ||
139 | static uint64_t | |
140 | proc_get_register(struct proc *p) { | |
141 | return (p->p_lflag & P_LREGISTER); | |
142 | } | |
143 | ||
144 | static void | |
145 | proc_set_register(struct proc *p) { | |
146 | proc_setregister(p); | |
147 | } | |
148 | ||
149 | static void* | |
150 | uthread_get_uukwe(struct uthread *t) | |
151 | { | |
152 | return &t->uu_save.uus_kwe; | |
153 | } | |
154 | ||
155 | static int | |
156 | uthread_is_cancelled(struct uthread *t) | |
157 | { | |
158 | return (t->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL; | |
159 | } | |
160 | ||
161 | static vm_map_t | |
162 | _current_map(void) | |
163 | { | |
164 | return current_map(); | |
165 | } | |
166 | ||
167 | static boolean_t | |
168 | qos_main_thread_active(void) | |
169 | { | |
170 | return TRUE; | |
171 | } | |
172 | ||
173 | static int proc_usynch_get_requested_thread_qos(struct uthread *uth) | |
174 | { | |
175 | thread_t thread = uth ? uth->uu_thread : current_thread(); | |
176 | int requested_qos; | |
177 | ||
178 | requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS); | |
179 | ||
180 | /* | |
181 | * For the purposes of userspace synchronization, it doesn't make sense to | |
182 | * place an override of UNSPECIFIED on another thread, if the current thread | |
183 | * doesn't have any QoS set. In these cases, upgrade to | |
184 | * THREAD_QOS_USER_INTERACTIVE. | |
185 | */ | |
186 | if (requested_qos == THREAD_QOS_UNSPECIFIED) { | |
187 | requested_qos = THREAD_QOS_USER_INTERACTIVE; | |
188 | } | |
189 | ||
190 | return requested_qos; | |
191 | } | |
192 | ||
193 | static boolean_t | |
194 | proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth, | |
195 | uint64_t tid, int override_qos, boolean_t first_override_for_resource, | |
196 | user_addr_t resource, int resource_type) | |
197 | { | |
198 | thread_t thread = uth ? uth->uu_thread : THREAD_NULL; | |
199 | ||
200 | return proc_thread_qos_add_override(task, thread, tid, override_qos, | |
201 | first_override_for_resource, resource, resource_type) == 0; | |
202 | } | |
203 | ||
204 | static boolean_t | |
205 | proc_usynch_thread_qos_remove_override_for_resource(task_t task, | |
206 | struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type) | |
207 | { | |
208 | thread_t thread = uth ? uth->uu_thread : THREAD_NULL; | |
209 | ||
210 | return proc_thread_qos_remove_override(task, thread, tid, resource, | |
211 | resource_type) == 0; | |
212 | } | |
213 | ||
214 | ||
215 | static wait_result_t | |
216 | psynch_wait_prepare(uintptr_t kwq, struct turnstile **tstore, | |
217 | thread_t owner, block_hint_t block_hint, uint64_t deadline) | |
218 | { | |
219 | struct turnstile *ts; | |
220 | wait_result_t wr; | |
221 | ||
222 | if (tstore) { | |
223 | ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL, | |
224 | TURNSTILE_PTHREAD_MUTEX); | |
225 | ||
226 | turnstile_update_inheritor(ts, owner, | |
227 | (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); | |
228 | ||
229 | thread_set_pending_block_hint(current_thread(), block_hint); | |
230 | ||
231 | wr = waitq_assert_wait64_leeway(&ts->ts_waitq, (event64_t)kwq, | |
232 | THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); | |
233 | } else { | |
234 | thread_set_pending_block_hint(current_thread(), block_hint); | |
235 | ||
236 | wr = assert_wait_deadline_with_leeway((event_t)kwq, THREAD_ABORTSAFE, | |
237 | TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); | |
238 | } | |
239 | ||
240 | return wr; | |
241 | } | |
242 | ||
243 | static void | |
244 | psynch_wait_update_complete(struct turnstile *ts) | |
245 | { | |
246 | assert(ts); | |
247 | turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); | |
248 | } | |
249 | ||
250 | static void | |
251 | psynch_wait_complete(uintptr_t kwq, struct turnstile **tstore) | |
252 | { | |
253 | assert(tstore); | |
254 | turnstile_complete(kwq, tstore, NULL); | |
255 | } | |
256 | ||
257 | static void | |
258 | psynch_wait_update_owner(uintptr_t kwq, thread_t owner, | |
259 | struct turnstile **tstore) | |
260 | { | |
261 | struct turnstile *ts; | |
262 | ||
263 | ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL, | |
264 | TURNSTILE_PTHREAD_MUTEX); | |
265 | ||
266 | turnstile_update_inheritor(ts, owner, | |
267 | (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); | |
268 | turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); | |
269 | turnstile_complete(kwq, tstore, NULL); | |
270 | } | |
271 | ||
272 | static void | |
273 | psynch_wait_cleanup(void) | |
274 | { | |
275 | turnstile_cleanup(); | |
276 | } | |
277 | ||
278 | static kern_return_t | |
279 | psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe, | |
280 | struct turnstile **tstore) | |
281 | { | |
282 | struct uthread *uth; | |
283 | struct turnstile *ts; | |
284 | kern_return_t kr; | |
285 | ||
286 | uth = __container_of(kwe, struct uthread, uu_save.uus_kwe); | |
287 | assert(uth); | |
288 | ||
289 | if (tstore) { | |
290 | ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL, | |
291 | TURNSTILE_PTHREAD_MUTEX); | |
292 | turnstile_update_inheritor(ts, uth->uu_thread, | |
293 | (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); | |
294 | ||
295 | kr = waitq_wakeup64_thread(&ts->ts_waitq, (event64_t)kwq, | |
296 | uth->uu_thread, THREAD_AWAKENED); | |
297 | ||
298 | turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); | |
299 | turnstile_complete(kwq, tstore, NULL); | |
300 | } else { | |
301 | kr = thread_wakeup_thread((event_t)kwq, uth->uu_thread); | |
302 | } | |
303 | ||
304 | return kr; | |
305 | } | |
306 | ||
307 | /* kernel (core) to kext shims */ | |
308 | ||
309 | void | |
310 | pthread_init(void) | |
311 | { | |
312 | if (!pthread_functions) { | |
313 | panic("pthread kernel extension not loaded (function table is NULL)."); | |
314 | } | |
315 | pthread_functions->pthread_init(); | |
316 | } | |
317 | ||
318 | void | |
319 | pth_proc_hashinit(proc_t p) | |
320 | { | |
321 | pthread_functions->pth_proc_hashinit(p); | |
322 | } | |
323 | ||
324 | void | |
325 | pth_proc_hashdelete(proc_t p) | |
326 | { | |
327 | pthread_functions->pth_proc_hashdelete(p); | |
328 | } | |
329 | ||
330 | /* syscall shims */ | |
331 | int | |
332 | bsdthread_create(struct proc *p, struct bsdthread_create_args *uap, user_addr_t *retval) | |
333 | { | |
334 | return pthread_functions->bsdthread_create(p, uap->func, uap->func_arg, uap->stack, uap->pthread, uap->flags, retval); | |
335 | } | |
336 | ||
337 | int | |
338 | bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused int32_t *retval) | |
339 | { | |
340 | kern_return_t kr; | |
341 | static_assert(offsetof(struct bsdthread_register_args, threadstart) + sizeof(user_addr_t) == | |
342 | offsetof(struct bsdthread_register_args, wqthread)); | |
343 | kr = machine_thread_function_pointers_convert_from_user(current_thread(), &uap->threadstart, 2); | |
344 | assert(kr == KERN_SUCCESS); | |
345 | ||
346 | if (pthread_functions->version >= 1) { | |
347 | return pthread_functions->bsdthread_register2(p, uap->threadstart, | |
348 | uap->wqthread, uap->flags, uap->stack_addr_hint, | |
349 | uap->targetconc_ptr, uap->dispatchqueue_offset, | |
350 | uap->tsd_offset, retval); | |
351 | } else { | |
352 | return pthread_functions->bsdthread_register(p, uap->threadstart, | |
353 | uap->wqthread, uap->flags, uap->stack_addr_hint, | |
354 | uap->targetconc_ptr, uap->dispatchqueue_offset, | |
355 | retval); | |
356 | } | |
357 | } | |
358 | ||
359 | int | |
360 | bsdthread_terminate(struct proc *p, struct bsdthread_terminate_args *uap, int32_t *retval) | |
361 | { | |
362 | thread_t th = current_thread(); | |
363 | if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) { | |
364 | workq_thread_terminate(p, get_bsdthread_info(th)); | |
365 | } | |
366 | return pthread_functions->bsdthread_terminate(p, uap->stackaddr, uap->freesize, uap->port, uap->sem, retval); | |
367 | } | |
368 | ||
369 | int | |
370 | thread_selfid(struct proc *p, __unused struct thread_selfid_args *uap, uint64_t *retval) | |
371 | { | |
372 | return pthread_functions->thread_selfid(p, retval); | |
373 | } | |
374 | ||
375 | /* pthread synchroniser syscalls */ | |
376 | ||
377 | int | |
378 | psynch_mutexwait(proc_t p, struct psynch_mutexwait_args *uap, uint32_t *retval) | |
379 | { | |
380 | return pthread_functions->psynch_mutexwait(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval); | |
381 | } | |
382 | ||
383 | int | |
384 | psynch_mutexdrop(proc_t p, struct psynch_mutexdrop_args *uap, uint32_t *retval) | |
385 | { | |
386 | return pthread_functions->psynch_mutexdrop(p, uap->mutex, uap->mgen, uap->ugen, uap->tid, uap->flags, retval); | |
387 | } | |
388 | ||
389 | int | |
390 | psynch_cvbroad(proc_t p, struct psynch_cvbroad_args *uap, uint32_t *retval) | |
391 | { | |
392 | return pthread_functions->psynch_cvbroad(p, uap->cv, uap->cvlsgen, uap->cvudgen, uap->flags, uap->mutex, uap->mugen, uap->tid, retval); | |
393 | } | |
394 | ||
395 | int | |
396 | psynch_cvsignal(proc_t p, struct psynch_cvsignal_args *uap, uint32_t *retval) | |
397 | { | |
398 | return pthread_functions->psynch_cvsignal(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->thread_port, uap->mutex, uap->mugen, uap->tid, uap->flags, retval); | |
399 | } | |
400 | ||
401 | int | |
402 | psynch_cvwait(proc_t p, struct psynch_cvwait_args * uap, uint32_t * retval) | |
403 | { | |
404 | return pthread_functions->psynch_cvwait(p, uap->cv, uap->cvlsgen, uap->cvugen, uap->mutex, uap->mugen, uap->flags, uap->sec, uap->nsec, retval); | |
405 | } | |
406 | ||
407 | int | |
408 | psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval) | |
409 | { | |
410 | return pthread_functions->psynch_cvclrprepost(p, uap->cv, uap->cvgen, uap->cvugen, uap->cvsgen, uap->prepocnt, uap->preposeq, uap->flags, retval); | |
411 | } | |
412 | ||
413 | int | |
414 | psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval) | |
415 | { | |
416 | return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval); | |
417 | } | |
418 | ||
419 | int | |
420 | psynch_rw_rdlock(proc_t p, struct psynch_rw_rdlock_args * uap, uint32_t * retval) | |
421 | { | |
422 | return pthread_functions->psynch_rw_rdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval); | |
423 | } | |
424 | ||
425 | int | |
426 | psynch_rw_unlock(proc_t p, struct psynch_rw_unlock_args *uap, uint32_t *retval) | |
427 | { | |
428 | return pthread_functions->psynch_rw_unlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval); | |
429 | } | |
430 | ||
431 | int | |
432 | psynch_rw_unlock2(__unused proc_t p, __unused struct psynch_rw_unlock2_args *uap, __unused uint32_t *retval) | |
433 | { | |
434 | return ENOTSUP; | |
435 | } | |
436 | ||
437 | int | |
438 | psynch_rw_wrlock(proc_t p, struct psynch_rw_wrlock_args *uap, uint32_t *retval) | |
439 | { | |
440 | return pthread_functions->psynch_rw_wrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval); | |
441 | } | |
442 | ||
443 | int | |
444 | psynch_rw_yieldwrlock(proc_t p, struct psynch_rw_yieldwrlock_args *uap, uint32_t *retval) | |
445 | { | |
446 | return pthread_functions->psynch_rw_yieldwrlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval); | |
447 | } | |
448 | ||
449 | int | |
450 | psynch_rw_upgrade(__unused proc_t p, __unused struct psynch_rw_upgrade_args * uap, __unused uint32_t *retval) | |
451 | { | |
452 | return 0; | |
453 | } | |
454 | ||
455 | int | |
456 | psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args * uap, __unused int *retval) | |
457 | { | |
458 | return 0; | |
459 | } | |
460 | ||
461 | void | |
462 | kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo) | |
463 | { | |
464 | if (pthread_functions->pthread_find_owner) | |
465 | pthread_functions->pthread_find_owner(thread, waitinfo); | |
466 | } | |
467 | ||
468 | void * | |
469 | kdp_pthread_get_thread_kwq(thread_t thread) | |
470 | { | |
471 | if (pthread_functions->pthread_get_thread_kwq) | |
472 | return pthread_functions->pthread_get_thread_kwq(thread); | |
473 | ||
474 | return NULL; | |
475 | } | |
476 | ||
477 | void | |
478 | thread_will_park_or_terminate(thread_t thread) | |
479 | { | |
480 | if (thread_owned_workloops_count(thread)) { | |
481 | (void)kevent_exit_on_workloop_ownership_leak(thread); | |
482 | } | |
483 | } | |
484 | ||
485 | /* | |
486 | * The callbacks structure (defined in pthread_shims.h) contains a collection | |
487 | * of kernel functions that were not deemed sensible to expose as a KPI to all | |
488 | * kernel extensions. So the kext is given them in the form of a structure of | |
489 | * function pointers. | |
490 | */ | |
491 | static const struct pthread_callbacks_s pthread_callbacks = { | |
492 | .version = PTHREAD_SHIMS_VERSION, | |
493 | .config_thread_max = CONFIG_THREAD_MAX, | |
494 | .get_task_threadmax = get_task_threadmax, | |
495 | ||
496 | .proc_get_threadstart = proc_get_threadstart, | |
497 | .proc_set_threadstart = proc_set_threadstart, | |
498 | .proc_get_pthsize = proc_get_pthsize, | |
499 | .proc_set_pthsize = proc_set_pthsize, | |
500 | .proc_get_wqthread = proc_get_wqthread, | |
501 | .proc_set_wqthread = proc_set_wqthread, | |
502 | .proc_set_dispatchqueue_offset = proc_set_dispatchqueue_offset, | |
503 | .proc_get_pthhash = proc_get_pthhash, | |
504 | .proc_set_pthhash = proc_set_pthhash, | |
505 | .proc_get_register = proc_get_register, | |
506 | .proc_set_register = proc_set_register, | |
507 | ||
508 | /* kernel IPI interfaces */ | |
509 | .ipc_port_copyout_send = ipc_port_copyout_send, | |
510 | .task_get_ipcspace = get_task_ipcspace, | |
511 | .vm_map_page_info = vm_map_page_info, | |
512 | .thread_set_wq_state32 = thread_set_wq_state32, | |
513 | #if !defined(__arm__) | |
514 | .thread_set_wq_state64 = thread_set_wq_state64, | |
515 | #endif | |
516 | ||
517 | .uthread_get_uukwe = uthread_get_uukwe, | |
518 | .uthread_set_returnval = uthread_set_returnval, | |
519 | .uthread_is_cancelled = uthread_is_cancelled, | |
520 | ||
521 | .thread_exception_return = pthread_returning_to_userspace, | |
522 | .thread_bootstrap_return = pthread_bootstrap_return, | |
523 | .unix_syscall_return = unix_syscall_return, | |
524 | ||
525 | .get_bsdthread_info = (void*)get_bsdthread_info, | |
526 | .thread_policy_set_internal = thread_policy_set_internal, | |
527 | .thread_policy_get = thread_policy_get, | |
528 | ||
529 | .__pthread_testcancel = __pthread_testcancel, | |
530 | ||
531 | .mach_port_deallocate = mach_port_deallocate, | |
532 | .semaphore_signal_internal_trap = semaphore_signal_internal_trap, | |
533 | .current_map = _current_map, | |
534 | .thread_create = thread_create, | |
535 | .thread_resume = thread_resume, | |
536 | ||
537 | .convert_thread_to_port = convert_thread_to_port, | |
538 | ||
539 | .proc_get_stack_addr_hint = proc_get_stack_addr_hint, | |
540 | .proc_set_stack_addr_hint = proc_set_stack_addr_hint, | |
541 | .proc_get_pthread_tsd_offset = proc_get_pthread_tsd_offset, | |
542 | .proc_set_pthread_tsd_offset = proc_set_pthread_tsd_offset, | |
543 | .proc_get_mach_thread_self_tsd_offset = proc_get_mach_thread_self_tsd_offset, | |
544 | .proc_set_mach_thread_self_tsd_offset = proc_set_mach_thread_self_tsd_offset, | |
545 | ||
546 | .thread_set_tsd_base = thread_set_tsd_base, | |
547 | ||
548 | .proc_usynch_get_requested_thread_qos = proc_usynch_get_requested_thread_qos, | |
549 | ||
550 | .qos_main_thread_active = qos_main_thread_active, | |
551 | .thread_set_voucher_name = thread_set_voucher_name, | |
552 | ||
553 | .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource, | |
554 | .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource, | |
555 | ||
556 | .thread_set_tag = thread_set_tag, | |
557 | .thread_get_tag = thread_get_tag, | |
558 | ||
559 | .proc_set_return_to_kernel_offset = proc_set_return_to_kernel_offset, | |
560 | .thread_will_park_or_terminate = thread_will_park_or_terminate, | |
561 | ||
562 | .proc_get_user_stack = proc_get_user_stack, | |
563 | .task_findtid = task_findtid, | |
564 | .thread_deallocate_safe = thread_deallocate_safe, | |
565 | ||
566 | .psynch_wait_prepare = psynch_wait_prepare, | |
567 | .psynch_wait_update_complete = psynch_wait_update_complete, | |
568 | .psynch_wait_complete = psynch_wait_complete, | |
569 | .psynch_wait_cleanup = psynch_wait_cleanup, | |
570 | .psynch_wait_wakeup = psynch_wait_wakeup, | |
571 | .psynch_wait_update_owner = psynch_wait_update_owner, | |
572 | }; | |
573 | ||
574 | pthread_callbacks_t pthread_kern = &pthread_callbacks; | |
575 | pthread_functions_t pthread_functions = NULL; | |
576 | ||
577 | /* | |
578 | * pthread_kext_register is called by pthread.kext upon load, it has to provide | |
579 | * us with a function pointer table of pthread internal calls. In return, this | |
580 | * file provides it with a table of function pointers it needs. | |
581 | */ | |
582 | ||
583 | void | |
584 | pthread_kext_register(pthread_functions_t fns, pthread_callbacks_t *callbacks) | |
585 | { | |
586 | if (pthread_functions != NULL) { | |
587 | panic("Re-initialisation of pthread kext callbacks."); | |
588 | } | |
589 | ||
590 | if (callbacks != NULL) { | |
591 | *callbacks = &pthread_callbacks; | |
592 | } else { | |
593 | panic("pthread_kext_register called without callbacks pointer."); | |
594 | } | |
595 | ||
596 | if (fns) { | |
597 | pthread_functions = fns; | |
598 | } | |
599 | } |