2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
58 #include <mach/mach_init.h>
59 #include <mach/mach_vm.h>
60 #include <mach/mach_sync_ipc.h>
62 #include <sys/resource.h>
63 #include <sys/sysctl.h>
64 #include <sys/queue.h>
65 #include <sys/ulock.h>
67 #include <machine/vmparam.h>
68 #define __APPLE_API_PRIVATE
69 #include <machine/cpu_capabilities.h>
70 #if __has_include(<ptrauth.h>)
72 #endif // __has_include(<ptrauth.h>)
73 #include <os/thread_self_restrict.h>
76 // Default stack size is 512KB; independent of the main thread's stack size.
77 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
84 * The pthread may be offset into a page. In that event, by contract
85 * with the kernel, the allocation will extend PTHREAD_SIZE from the
86 * start of the next page. There's also one page worth of allocation
87 * below stacksize for the guard page. <rdar://problem/19941744>
89 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct pthread_s)))
90 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
91 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
93 static const pthread_attr_t _pthread_attr_default
= {
94 .sig
= _PTHREAD_ATTR_SIG
,
96 .detached
= PTHREAD_CREATE_JOINABLE
,
97 .inherit
= _PTHREAD_DEFAULT_INHERITSCHED
,
98 .policy
= _PTHREAD_DEFAULT_POLICY
,
99 .defaultguardpage
= true,
100 // compile time constant for _pthread_default_priority(0)
101 .qosclass
= (1U << (THREAD_QOS_LEGACY
- 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
)) |
102 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK
),
105 #if PTHREAD_LAYOUT_SPI
107 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
109 .plo_pthread_tsd_base_offset
= offsetof(struct pthread_s
, tsd
),
110 .plo_pthread_tsd_base_address_offset
= 0,
111 .plo_pthread_tsd_entry_size
= sizeof(((struct pthread_s
*)NULL
)->tsd
[0]),
114 #endif // PTHREAD_LAYOUT_SPI
117 // Global exported variables
120 // This global should be used (carefully) by anyone needing to know if a
121 // pthread (other than the main thread) has been created.
122 int __is_threaded
= 0;
123 const int __unix_conforming
= 1; // we're always conformant, but it's exported
126 // Global internal variables
129 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
130 // list. Externally imported by pthread_cancelable.c.
131 struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
132 _pthread_lock _pthread_list_lock
= _PTHREAD_LOCK_INITIALIZER
;
137 // The main thread's pthread_t
138 struct pthread_s _main_thread
OS_ALIGNED(64);
139 #else // VARIANT_DYLD
140 pthread_t _main_thread_ptr
;
141 void *(*_pthread_malloc
)(size_t);
142 void (*_pthread_free
)(void *);
143 #endif // VARIANT_DYLD
145 #if PTHREAD_DEBUG_LOG
147 int _pthread_debuglog
;
148 uint64_t _pthread_debugstart
;
152 // Global static variables
154 static bool __workq_newapi
;
155 static uint8_t default_priority
;
157 static uint8_t max_priority
;
158 static uint8_t min_priority
;
159 #endif // !VARIANT_DYLD
160 static int _pthread_count
= 1;
161 static int pthread_concurrency
;
162 uintptr_t _pthread_ptr_munge_token
;
164 static void (*exitf
)(int) = __exit
;
166 // work queue support data
169 __pthread_invalid_keventfunction(void **events
, int *nevents
)
171 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
176 __pthread_invalid_workloopfunction(uint64_t *workloop_id
, void **events
, int *nevents
)
178 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
180 static pthread_workqueue_function2_t __libdispatch_workerfunction
;
181 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction
= &__pthread_invalid_keventfunction
;
182 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction
= &__pthread_invalid_workloopfunction
;
183 static int __pthread_supported_features
; // supported feature set
185 #if defined(__i386__) || defined(__x86_64__)
186 static mach_vm_address_t __pthread_stack_hint
= 0xB0000000;
187 #elif defined(__arm__) || defined(__arm64__)
188 static mach_vm_address_t __pthread_stack_hint
= 0x30000000;
190 #error no __pthread_stack_hint for this architecture
194 // Function prototypes
197 // pthread primitives
198 static inline void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
199 void *stack
, size_t stacksize
, void *freeaddr
, size_t freesize
);
202 static void _pthread_set_self_dyld(void);
203 #endif // VARIANT_DYLD
204 static inline void _pthread_set_self_internal(pthread_t
);
206 static inline void __pthread_started_thread(pthread_t t
);
208 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
210 static inline void _pthread_introspection_thread_create(pthread_t t
);
211 static inline void _pthread_introspection_thread_start(pthread_t t
);
212 static inline void _pthread_introspection_thread_terminate(pthread_t t
);
213 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
216 * Flags filed passed to bsdthread_create and back in pthread_start
217 * 31 <---------------------------------> 0
218 * _________________________________________
219 * | flags(8) | policy(8) | importance(16) |
220 * -----------------------------------------
222 #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
223 #define PTHREAD_START_SETSCHED 0x02000000
224 // was PTHREAD_START_DETACHED 0x04000000
225 #define PTHREAD_START_QOSCLASS 0x08000000
226 #define PTHREAD_START_TSD_BASE_SET 0x10000000
227 #define PTHREAD_START_SUSPENDED 0x20000000
228 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
229 #define PTHREAD_START_POLICY_BITSHIFT 16
230 #define PTHREAD_START_POLICY_MASK 0xff
231 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
233 #pragma mark pthread attrs
236 pthread_attr_destroy(pthread_attr_t
*attr
)
239 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
247 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
250 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
251 *detachstate
= attr
->detached
;
258 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
261 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
262 *inheritsched
= attr
->inherit
;
268 static OS_ALWAYS_INLINE
void
269 _pthread_attr_get_schedparam(const pthread_attr_t
*attr
,
270 struct sched_param
*param
)
272 if (attr
->schedset
) {
273 *param
= attr
->param
;
275 param
->sched_priority
= default_priority
;
276 param
->quantum
= 10; /* quantum isn't public yet */
281 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
284 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
285 _pthread_attr_get_schedparam(attr
, param
);
292 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
295 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
296 *policy
= attr
->policy
;
303 pthread_attr_init(pthread_attr_t
*attr
)
305 *attr
= _pthread_attr_default
;
310 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
313 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
314 (detachstate
== PTHREAD_CREATE_JOINABLE
||
315 detachstate
== PTHREAD_CREATE_DETACHED
)) {
316 attr
->detached
= detachstate
;
323 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
326 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
327 (inheritsched
== PTHREAD_INHERIT_SCHED
||
328 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
329 attr
->inherit
= inheritsched
;
336 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
339 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
340 /* TODO: Validate sched_param fields */
341 attr
->param
= *param
;
348 #define _PTHREAD_POLICY_IS_FIXEDPRI(x) ((x) == SCHED_RR || (x) == SCHED_FIFO)
351 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
354 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (policy
== SCHED_OTHER
||
355 policy
== SCHED_RR
|| policy
== SCHED_FIFO
)) {
356 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy
)) {
357 /* non-fixedpri policy should remove cpupercent */
358 attr
->cpupercentset
= 0;
360 attr
->policy
= policy
;
368 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
371 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
372 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
373 // No attribute yet for the scope.
375 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
383 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
386 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
387 *scope
= PTHREAD_SCOPE_SYSTEM
;
394 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
397 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
398 *stackaddr
= attr
->stackaddr
;
405 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
408 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
409 ((mach_vm_address_t
)stackaddr
& vm_page_mask
) == 0) {
410 attr
->stackaddr
= stackaddr
;
411 attr
->defaultguardpage
= false;
419 _pthread_attr_stacksize(const pthread_attr_t
*attr
)
421 return attr
->stacksize
? attr
->stacksize
: DEFAULT_STACK_SIZE
;
425 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
428 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
429 *stacksize
= _pthread_attr_stacksize(attr
);
436 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
439 // If the caller is doing something reasonable, help them out.
440 if (stacksize
% 0x1000 == 0) {
441 stacksize
= round_page(stacksize
);
443 #endif // TARGET_OS_OSX
446 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
447 ((stacksize
& vm_page_mask
) == 0) &&
448 stacksize
>= PTHREAD_STACK_MIN
) {
449 attr
->stacksize
= stacksize
;
456 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
459 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
460 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
461 *stacksize
= _pthread_attr_stacksize(attr
);
467 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
468 // address. This is not the same as in pthread_attr_setstackaddr.
470 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
473 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
474 (((mach_vm_address_t
)stackaddr
& vm_page_mask
) == 0) &&
475 ((stacksize
& vm_page_mask
) == 0) &&
476 stacksize
>= PTHREAD_STACK_MIN
) {
477 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
478 attr
->stacksize
= stacksize
;
485 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
488 // If the caller is doing something reasonable, help them out.
489 if (guardsize
% 0x1000 == 0) {
490 guardsize
= round_page(guardsize
);
492 #endif // TARGET_OS_OSX
495 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
496 (guardsize
& vm_page_mask
) == 0) {
497 /* Guardsize of 0 is valid, means no guard */
498 attr
->defaultguardpage
= false;
499 attr
->guardsize
= guardsize
;
506 _pthread_attr_guardsize(const pthread_attr_t
*attr
)
508 return attr
->defaultguardpage
? vm_page_size
: attr
->guardsize
;
512 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
515 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
516 *guardsize
= _pthread_attr_guardsize(attr
);
523 pthread_attr_setcpupercent_np(pthread_attr_t
*attr
, int percent
,
524 unsigned long refillms
)
527 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& percent
< UINT8_MAX
&&
528 refillms
< _PTHREAD_ATTR_REFILLMS_MAX
&& attr
->policyset
&&
529 _PTHREAD_POLICY_IS_FIXEDPRI(attr
->policy
)) {
530 attr
->cpupercent
= percent
;
531 attr
->refillms
= (uint32_t)(refillms
& 0x00ffffff);
532 attr
->cpupercentset
= 1;
538 #pragma mark pthread lifetime
540 // Allocate a thread structure, stack and guard page.
542 // The thread structure may optionally be placed in the same allocation as the
543 // stack, residing above the top of the stack. This cannot be done if a
544 // custom stack address is provided.
546 // Similarly the guard page cannot be allocated if a custom stack address is
549 // The allocated thread structure is initialized with values that indicate how
550 // it should be freed.
553 _pthread_allocate(const pthread_attr_t
*attrs
, void **stack
,
554 bool from_mach_thread
)
556 mach_vm_address_t allocaddr
= __pthread_stack_hint
;
557 size_t allocsize
, guardsize
, stacksize
, pthreadoff
;
561 if (os_unlikely(attrs
->stacksize
!= 0 &&
562 attrs
->stacksize
< PTHREAD_STACK_MIN
)) {
563 PTHREAD_CLIENT_CRASH(attrs
->stacksize
, "Stack size in attrs is too small");
566 if (os_unlikely((mach_vm_address_t
)attrs
->stackaddr
& vm_page_mask
)) {
567 PTHREAD_CLIENT_CRASH(attrs
->stackaddr
, "Unaligned stack addr in attrs");
570 // Allocate a pthread structure if necessary
572 if (attrs
->stackaddr
!= NULL
) {
573 allocsize
= PTHREAD_SIZE
;
576 // <rdar://problem/42588315> if the attrs struct specifies a custom
577 // stack address but not a custom size, using ->stacksize here instead
578 // of _pthread_attr_stacksize stores stacksize as zero, indicating
579 // that the stack size is unknown.
580 stacksize
= attrs
->stacksize
;
582 guardsize
= _pthread_attr_guardsize(attrs
);
583 stacksize
= _pthread_attr_stacksize(attrs
) + PTHREAD_T_OFFSET
;
584 pthreadoff
= stacksize
+ guardsize
;
585 allocsize
= pthreadoff
+ PTHREAD_SIZE
;
586 allocsize
= mach_vm_round_page(allocsize
);
589 kr
= mach_vm_map(mach_task_self(), &allocaddr
, allocsize
, vm_page_size
- 1,
590 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
591 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
593 if (kr
!= KERN_SUCCESS
) {
594 kr
= mach_vm_allocate(mach_task_self(), &allocaddr
, allocsize
,
595 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
596 } else if (__syscall_logger
&& !from_mach_thread
) {
597 // libsyscall will not output malloc stack logging events when
598 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
599 // To avoid losing the stack traces for normal p-thread create
600 // operations, libpthread must pretend to be the vm syscall and log
601 // the allocations. <rdar://36418708>
602 int eventTypeFlags
= stack_logging_type_vm_allocate
|
603 stack_logging_type_mapped_file_or_shared_mem
;
604 __syscall_logger(eventTypeFlags
| VM_MAKE_TAG(VM_MEMORY_STACK
),
605 (uintptr_t)mach_task_self(), (uintptr_t)allocsize
, 0,
606 (uintptr_t)allocaddr
, 0);
609 if (kr
!= KERN_SUCCESS
) {
612 } else if (__syscall_logger
&& !from_mach_thread
) {
613 // libsyscall will not output malloc stack logging events when
614 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
615 // To avoid losing the stack traces for normal p-thread create
616 // operations, libpthread must pretend to be the vm syscall and log
617 // the allocations. <rdar://36418708>
618 int eventTypeFlags
= stack_logging_type_vm_allocate
;
619 __syscall_logger(eventTypeFlags
| VM_MAKE_TAG(VM_MEMORY_STACK
),
620 (uintptr_t)mach_task_self(), (uintptr_t)allocsize
, 0,
621 (uintptr_t)allocaddr
, 0);
624 // The stack grows down.
625 // Set the guard page at the lowest address of the
626 // newly allocated stack. Return the highest address
629 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
,
630 FALSE
, VM_PROT_NONE
);
633 // Thread structure resides at the top of the stack (when using a
634 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
636 t
= (pthread_t
)(allocaddr
+ pthreadoff
);
637 if (attrs
->stackaddr
) {
638 *stack
= attrs
->stackaddr
;
643 _pthread_struct_init(t
, attrs
, *stack
, stacksize
, allocaddr
, allocsize
);
649 _pthread_deallocate(pthread_t t
, bool from_mach_thread
)
653 // Don't free the main thread.
654 if (t
!= main_thread()) {
655 if (!from_mach_thread
) { // see __pthread_add_thread
656 _pthread_introspection_thread_destroy(t
);
658 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
659 if (ret
!= KERN_SUCCESS
) {
660 PTHREAD_INTERNAL_CRASH(ret
, "Unable to deallocate stack");
665 #pragma clang diagnostic push
666 #pragma clang diagnostic ignored "-Wreturn-stack-address"
670 _pthread_current_stack_address(void)
676 #pragma clang diagnostic pop
679 _pthread_joiner_wake(pthread_t thread
)
681 uint32_t *exit_gate
= &thread
->tl_exit_gate
;
684 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, exit_gate
, 0);
685 if (ret
== 0 || ret
== -ENOENT
) {
689 PTHREAD_INTERNAL_CRASH(-ret
, "pthread_join() wake failure");
695 _pthread_dealloc_reply_port(pthread_t self
)
697 mach_port_t port
= _pthread_tsd_slot(self
, MIG_REPLY
);
698 if (port
!= MACH_PORT_NULL
) {
699 // this will also set the TSD to MACH_PORT_NULL
700 mig_dealloc_reply_port(port
);
705 _pthread_dealloc_special_reply_port(pthread_t self
)
707 mach_port_t port
= _pthread_tsd_slot(self
, MACH_SPECIAL_REPLY
);
708 if (port
!= MACH_PORT_NULL
) {
709 _pthread_tsd_slot(self
, MACH_SPECIAL_REPLY
) = MACH_PORT_NULL
;
710 thread_destruct_special_reply_port(port
, THREAD_SPECIAL_REPLY_PORT_ALL
);
714 // Terminates the thread if called from the currently running thread.
715 OS_NORETURN OS_NOINLINE OS_NOT_TAIL_CALLED
717 _pthread_terminate(pthread_t t
, void *exit_value
)
719 _pthread_introspection_thread_terminate(t
);
721 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
722 size_t freesize
= t
->freesize
;
725 // the size of just the stack
726 size_t freesize_stack
= t
->freesize
;
728 // We usually pass our structure+stack to bsdthread_terminate to free, but
729 // if we get told to keep the pthread_t structure around then we need to
730 // adjust the free size and addr in the pthread_t to just refer to the
731 // structure and not the stack. If we do end up deallocating the
732 // structure, this is useless work since no one can read the result, but we
733 // can't do it after the call to pthread_remove_thread because it isn't
734 // safe to dereference t after that.
735 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
736 // Check to ensure the pthread structure itself is part of the
737 // allocation described by freeaddr/freesize, in which case we split and
738 // only deallocate the area below the pthread structure. In the event of a
739 // custom stack, the freeaddr/size will be the pthread structure itself, in
740 // which case we shouldn't free anything (the final else case).
741 freesize_stack
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
743 // describe just the remainder for deallocation when the pthread_t goes away
744 t
->freeaddr
+= freesize_stack
;
745 t
->freesize
-= freesize_stack
;
746 } else if (t
== main_thread()) {
747 freeaddr
= t
->stackaddr
- pthread_get_stacksize_np(t
);
748 uintptr_t stackborder
= trunc_page((uintptr_t)_pthread_current_stack_address());
749 freesize_stack
= stackborder
- freeaddr
;
754 mach_port_t kport
= _pthread_tsd_slot(t
, MACH_THREAD_SELF
);
755 bool keep_thread_struct
= false, needs_wake
= false;
756 semaphore_t custom_stack_sema
= MACH_PORT_NULL
;
758 _pthread_dealloc_special_reply_port(t
);
759 _pthread_dealloc_reply_port(t
);
761 _pthread_lock_lock(&_pthread_list_lock
);
763 // This piece of code interacts with pthread_join. It will always:
764 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
765 // - set tl_exit_value to the value passed to pthread_exit()
766 // - decrement _pthread_count, so that we can exit the process when all
767 // threads exited even if not all of them were joined.
768 t
->tl_exit_gate
= MACH_PORT_DEAD
;
769 t
->tl_exit_value
= exit_value
;
770 should_exit
= (--_pthread_count
<= 0);
772 // If we see a joiner, we prepost that the join has to succeed,
773 // and the joiner is committed to finish (even if it was canceled)
774 if (t
->tl_join_ctx
) {
775 custom_stack_sema
= _pthread_joiner_prepost_wake(t
); // unsets tl_joinable
779 // Joinable threads that have no joiner yet are kept on the thread list
780 // so that pthread_join() can later discover the thread when it is joined,
781 // and will have to do the pthread_t cleanup.
782 if (t
->tl_joinable
) {
783 t
->tl_joiner_cleans_up
= keep_thread_struct
= true;
785 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
788 _pthread_lock_unlock(&_pthread_list_lock
);
791 // When we found a waiter, we want to drop the very contended list lock
792 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
793 // who gets to cleanup the pthread_t between the joiner and the exiting
795 // - the joiner tries to set tl_join_ctx to NULL
796 // - the exiting thread tries to set tl_joiner_cleans_up to true
797 // Whoever does it first commits the other guy to cleanup the pthread_t
798 _pthread_joiner_wake(t
);
799 _pthread_lock_lock(&_pthread_list_lock
);
800 if (t
->tl_join_ctx
) {
801 t
->tl_joiner_cleans_up
= true;
802 keep_thread_struct
= true;
804 _pthread_lock_unlock(&_pthread_list_lock
);
808 // /!\ dereferencing `t` past this point is not safe /!\
811 if (keep_thread_struct
|| t
== main_thread()) {
812 // Use the adjusted freesize of just the stack that we computed above.
813 freesize
= freesize_stack
;
815 _pthread_introspection_thread_destroy(t
);
818 // Check if there is nothing to free because the thread has a custom
819 // stack allocation and is joinable.
826 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, custom_stack_sema
);
827 PTHREAD_INTERNAL_CRASH(t
, "thread didn't terminate");
832 _pthread_terminate_invoke(pthread_t t
, void *exit_value
)
836 // <rdar://problem/25688492> During pthread termination there is a race
837 // between pthread_join and pthread_terminate; if the joiner is responsible
838 // for cleaning up the pthread_t struct, then it may destroy some part of the
839 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
840 // to crash because its stack has been removed from under its feet, just make
841 // sure termination happens in a part of the stack that is not on the same
842 // page as the pthread_t.
843 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
844 trunc_page((uintptr_t)t
)) {
845 p
= alloca(PTHREAD_T_OFFSET
);
847 // And this __asm__ volatile is needed to stop the compiler from optimising
848 // away the alloca() completely.
849 __asm__
volatile ("" : : "r"(p
) );
851 _pthread_terminate(t
, exit_value
);
854 #pragma mark pthread start / body
857 _pthread_start(pthread_t self
, mach_port_t kport
,
858 __unused
void *(*fun
)(void *), __unused
void *arg
,
859 __unused
size_t stacksize
, unsigned int pflags
)
861 if (os_unlikely(pflags
& PTHREAD_START_SUSPENDED
)) {
862 PTHREAD_INTERNAL_CRASH(pflags
,
863 "kernel without PTHREAD_START_SUSPENDED support");
865 if (os_unlikely((pflags
& PTHREAD_START_TSD_BASE_SET
) == 0)) {
866 PTHREAD_INTERNAL_CRASH(pflags
,
867 "thread_set_tsd_base() wasn't called by the kernel");
869 PTHREAD_DEBUG_ASSERT(MACH_PORT_VALID(kport
));
870 PTHREAD_DEBUG_ASSERT(_pthread_tsd_slot(self
, MACH_THREAD_SELF
) == kport
);
871 _pthread_validate_signature(self
);
872 _pthread_markcancel_if_canceled(self
, kport
);
874 _pthread_set_self_internal(self
);
875 __pthread_started_thread(self
);
876 _pthread_exit(self
, (self
->fun
)(self
->arg
));
881 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
882 void *stackaddr
, size_t stacksize
, void *freeaddr
, size_t freesize
)
884 _pthread_init_signature(t
);
885 _pthread_tsd_slot(t
, PTHREAD_SELF
) = t
;
886 _pthread_tsd_slot(t
, ERRNO
) = &t
->err_no
;
887 if (attrs
->schedset
== 0) {
888 _pthread_tsd_slot(t
, PTHREAD_QOS_CLASS
) = attrs
->qosclass
;
890 _pthread_tsd_slot(t
, PTHREAD_QOS_CLASS
) =
891 _pthread_unspecified_priority();
893 _pthread_tsd_slot(t
, PTR_MUNGE
) = _pthread_ptr_munge_token
;
894 t
->tl_has_custom_stack
= (attrs
->stackaddr
!= NULL
);
896 _pthread_lock_init(&t
->lock
);
898 t
->stackaddr
= stackaddr
;
899 t
->stackbottom
= stackaddr
- stacksize
;
900 t
->freeaddr
= freeaddr
;
901 t
->freesize
= freesize
;
903 t
->guardsize
= _pthread_attr_guardsize(attrs
);
904 t
->tl_joinable
= (attrs
->detached
== PTHREAD_CREATE_JOINABLE
);
905 t
->inherit
= attrs
->inherit
;
906 t
->tl_policy
= attrs
->policy
;
907 t
->schedset
= attrs
->schedset
;
908 _pthread_attr_get_schedparam(attrs
, &t
->tl_param
);
909 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
912 #pragma mark pthread public interface
914 /* Non portable public api to know whether this process has(had) atleast one thread
915 * apart from main thread. There could be race if there is a thread in the process of
916 * creation at the time of call . It does not tell whether there are more than one thread
917 * at this point of time.
920 pthread_is_threaded_np(void)
922 return __is_threaded
;
926 pthread_mach_thread_np(pthread_t t
)
928 mach_port_t kport
= MACH_PORT_NULL
;
929 (void)_pthread_is_valid(t
, &kport
);
934 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
938 /* No need to wait as mach port is already known */
939 _pthread_lock_lock(&_pthread_list_lock
);
941 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
942 if (_pthread_tsd_slot(p
, MACH_THREAD_SELF
) == kernel_thread
) {
947 _pthread_lock_unlock(&_pthread_list_lock
);
953 pthread_get_stacksize_np(pthread_t t
)
958 return ESRCH
; // XXX bug?
962 // The default rlimit based allocations will be provided with a stacksize
963 // of the current limit and a freesize of the max. However, custom
964 // allocations will just have the guard page to free. If we aren't in the
965 // latter case, call into rlimit to determine the current stack size. In
966 // the event that the current limit == max limit then we'll fall down the
967 // fast path, but since it's unlikely that the limit is going to be lowered
968 // after it's been change to the max, we should be fine.
970 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
971 // page. So, we can skip all this there.
972 if (t
== main_thread()) {
973 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
975 if (stacksize
+ vm_page_size
!= t
->freesize
) {
976 // We want to call getrlimit() just once, as it's relatively
978 static size_t rlimit_stack
;
980 if (rlimit_stack
== 0) {
982 int ret
= getrlimit(RLIMIT_STACK
, &limit
);
985 rlimit_stack
= (size_t) limit
.rlim_cur
;
989 if (rlimit_stack
== 0 || rlimit_stack
> t
->freesize
) {
992 return round_page(rlimit_stack
);
996 #endif /* TARGET_OS_OSX */
998 if (t
== pthread_self() || t
== main_thread()) {
999 size
= t
->stackaddr
- t
->stackbottom
;;
1003 if (_pthread_validate_thread_and_list_lock(t
)) {
1004 size
= t
->stackaddr
- t
->stackbottom
;;
1005 _pthread_lock_unlock(&_pthread_list_lock
);
1009 // <rdar://problem/42588315> binary compatibility issues force us to return
1010 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1011 return size
? size
: DEFAULT_STACK_SIZE
;
1015 pthread_get_stackaddr_np(pthread_t t
)
1017 // since the main thread will not get de-allocated from underneath us
1018 if (t
== pthread_self() || t
== main_thread()) {
1019 return t
->stackaddr
;
1022 if (!_pthread_validate_thread_and_list_lock(t
)) {
1023 return (void *)(uintptr_t)ESRCH
; // XXX bug?
1026 void *addr
= t
->stackaddr
;
1027 _pthread_lock_unlock(&_pthread_list_lock
);
1032 pthread_main_thread_np(void)
1034 return main_thread();
1037 /* returns non-zero if the current thread is the main thread */
1039 pthread_main_np(void)
1041 return pthread_self() == main_thread();
1045 _pthread_threadid_slow(pthread_t thread
, uint64_t *thread_id
)
1047 unsigned int info_count
= THREAD_IDENTIFIER_INFO_COUNT
;
1048 mach_port_t thport
= _pthread_tsd_slot(thread
, MACH_THREAD_SELF
);
1049 struct thread_identifier_info info
;
1052 kr
= thread_info(thport
, THREAD_IDENTIFIER_INFO
,
1053 (thread_info_t
)&info
, &info_count
);
1054 if (kr
== KERN_SUCCESS
&& info
.thread_id
) {
1055 *thread_id
= info
.thread_id
;
1057 os_atomic_store(&thread
->thread_id
, info
.thread_id
, relaxed
);
1059 os_atomic_store_wide(&thread
->thread_id
, info
.thread_id
, relaxed
);
1067 * if we are passed in a pthread_t that is NULL, then we return the current
1068 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1069 * us doing it, if they just want their thread_id.
1072 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
1075 pthread_t self
= pthread_self();
1077 if (thread_id
== NULL
) {
1081 if (thread
== NULL
|| thread
== self
) {
1082 *thread_id
= self
->thread_id
;
1083 } else if (!_pthread_validate_thread_and_list_lock(thread
)) {
1087 *thread_id
= os_atomic_load(&thread
->thread_id
, relaxed
);
1089 *thread_id
= os_atomic_load_wide(&thread
->thread_id
, relaxed
);
1091 if (os_unlikely(*thread_id
== 0)) {
1092 // there is a race at init because the thread sets its own TID.
1093 // correct this by asking mach
1094 res
= _pthread_threadid_slow(thread
, thread_id
);
1096 _pthread_lock_unlock(&_pthread_list_lock
);
1102 pthread_cpu_number_np(size_t *cpu_id
)
1104 if (cpu_id
== NULL
) {
1109 *cpu_id
= _os_cpu_number();
1114 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1116 if (thread
== pthread_self()) {
1117 strlcpy(threadname
, thread
->pthread_name
, len
);
1121 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1125 strlcpy(threadname
, thread
->pthread_name
, len
);
1126 _pthread_lock_unlock(&_pthread_list_lock
);
1131 pthread_setname_np(const char *name
)
1134 pthread_t self
= pthread_self();
1141 _pthread_validate_signature(self
);
1143 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
1146 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
1148 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
1158 pthread_jit_write_protect_np(int enable
)
1160 if (!os_thread_self_restrict_rwx_is_supported()) {
1165 os_thread_self_restrict_rwx_to_rx();
1167 os_thread_self_restrict_rwx_to_rw();
1171 int pthread_jit_write_protect_supported_np()
1173 return os_thread_self_restrict_rwx_is_supported();
1176 #endif // TARGET_OS_OSX
1180 __pthread_add_thread(pthread_t t
, mach_port_t self
, bool from_mach_thread
)
1182 _pthread_lock_lock(&_pthread_list_lock
, self
);
1183 TAILQ_INSERT_TAIL(&__pthread_head
, t
, tl_plist
);
1185 _pthread_lock_unlock(&_pthread_list_lock
, self
);
1187 if (!from_mach_thread
) {
1188 // PR-26275485: Mach threads will likely crash trying to run
1189 // introspection code. Since the fall out from the introspection
1190 // code not seeing the injected thread is likely less than crashing
1191 // in the introspection code, just don't make the call.
1192 _pthread_introspection_thread_create(t
);
1198 __pthread_undo_add_thread(pthread_t t
, mach_port_t self
)
1200 _pthread_lock_lock(&_pthread_list_lock
, self
);
1201 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
1203 _pthread_lock_unlock(&_pthread_list_lock
, self
);
1208 __pthread_started_thread(pthread_t t
)
1210 mach_port_t kport
= _pthread_tsd_slot(t
, MACH_THREAD_SELF
);
1211 if (os_unlikely(!MACH_PORT_VALID(kport
))) {
1212 PTHREAD_CLIENT_CRASH(kport
,
1213 "Unable to allocate thread port, possible port leak");
1215 _pthread_introspection_thread_start(t
);
1218 #define _PTHREAD_CREATE_NONE 0x0
1219 #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1220 #define _PTHREAD_CREATE_SUSPENDED 0x2
1223 _pthread_create(pthread_t
*thread
, const pthread_attr_t
*attrs
,
1224 void *(*start_routine
)(void *), void *arg
, unsigned int create_flags
)
1228 bool from_mach_thread
= (create_flags
& _PTHREAD_CREATE_FROM_MACH_THREAD
);
1229 mach_port_t self_kport
;
1232 if (attrs
== NULL
) {
1233 attrs
= &_pthread_attr_default
;
1234 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1238 unsigned int flags
= PTHREAD_START_CUSTOM
;
1239 if (attrs
->schedset
!= 0) {
1240 struct sched_param p
;
1241 _pthread_attr_get_schedparam(attrs
, &p
);
1242 flags
|= PTHREAD_START_SETSCHED
;
1243 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1244 flags
|= (p
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1245 } else if (attrs
->qosclass
!= 0) {
1246 flags
|= PTHREAD_START_QOSCLASS
;
1247 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1249 if (create_flags
& _PTHREAD_CREATE_SUSPENDED
) {
1250 flags
|= PTHREAD_START_SUSPENDED
;
1255 t
= _pthread_allocate(attrs
, &stack
, from_mach_thread
);
1260 if (os_unlikely(from_mach_thread
)) {
1261 self_kport
= mach_thread_self();
1263 self_kport
= _pthread_mach_thread_self_direct();
1267 t
->fun
= start_routine
;
1268 __pthread_add_thread(t
, self_kport
, from_mach_thread
);
1270 if (__bsdthread_create(start_routine
, arg
, stack
, t
, flags
) ==
1272 if (errno
== EMFILE
) {
1273 PTHREAD_CLIENT_CRASH(0,
1274 "Unable to allocate thread port, possible port leak");
1276 __pthread_undo_add_thread(t
, self_kport
);
1277 _pthread_deallocate(t
, from_mach_thread
);
1281 if (from_mach_thread
) {
1282 mach_port_deallocate(mach_task_self(), self_kport
);
1285 // n.b. if a thread is created detached and exits, t will be invalid
1291 pthread_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
1292 void *(*start_routine
)(void *), void *arg
)
1294 unsigned int flags
= _PTHREAD_CREATE_NONE
;
1295 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1299 pthread_create_from_mach_thread(pthread_t
*thread
, const pthread_attr_t
*attr
,
1300 void *(*start_routine
)(void *), void *arg
)
1302 unsigned int flags
= _PTHREAD_CREATE_FROM_MACH_THREAD
;
1303 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1307 pthread_create_suspended_np(pthread_t
*thread
, const pthread_attr_t
*attr
,
1308 void *(*start_routine
)(void *), void *arg
)
1310 unsigned int flags
= _PTHREAD_CREATE_SUSPENDED
;
1311 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1315 pthread_detach(pthread_t thread
)
1318 bool join
= false, wake
= false;
1320 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1324 if (!thread
->tl_joinable
) {
1326 } else if (thread
->tl_exit_gate
== MACH_PORT_DEAD
) {
1327 // Join the thread if it's already exited.
1330 thread
->tl_joinable
= false; // _pthread_joiner_prepost_wake uses this
1331 if (thread
->tl_join_ctx
) {
1332 (void)_pthread_joiner_prepost_wake(thread
);
1336 _pthread_lock_unlock(&_pthread_list_lock
);
1339 pthread_join(thread
, NULL
);
1341 _pthread_joiner_wake(thread
);
1347 pthread_kill(pthread_t th
, int sig
)
1349 if (sig
< 0 || sig
> NSIG
) {
1353 mach_port_t kport
= MACH_PORT_NULL
;
1355 if (!_pthread_is_valid(th
, &kport
)) {
1360 int ret
= __pthread_kill(kport
, sig
);
1369 __pthread_workqueue_setkill(int enable
)
1372 return __bsdthread_ctl(BSDTHREAD_CTL_WORKQ_ALLOW_KILL
, enable
, 0, 0);
1377 * Terminate a thread.
1382 _pthread_exit(pthread_t self
, void *exit_value
)
1384 struct __darwin_pthread_handler_rec
*handler
;
1386 // Disable signal delivery while we clean up
1387 __disable_threadsignal(1);
1389 // Set cancel state to disable and type to deferred
1390 _pthread_setcancelstate_exit(self
, exit_value
);
1392 while ((handler
= self
->__cleanup_stack
) != 0) {
1393 (handler
->__routine
)(handler
->__arg
);
1394 self
->__cleanup_stack
= handler
->__next
;
1396 _pthread_tsd_cleanup(self
);
1398 // Clear per-thread semaphore cache
1399 os_put_cached_semaphore(SEMAPHORE_NULL
);
1401 _pthread_terminate_invoke(self
, exit_value
);
1405 pthread_exit(void *exit_value
)
1407 pthread_t self
= pthread_self();
1408 if (os_unlikely(self
->wqthread
)) {
1409 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1410 "not created by pthread_create()");
1412 _pthread_validate_signature(self
);
1413 _pthread_exit(self
, exit_value
);
1417 pthread_self_is_exiting_np(void)
1419 return (os_atomic_load(&pthread_self()->cancel_state
, relaxed
) &
1420 _PTHREAD_CANCEL_EXITING
) != 0;
1424 pthread_getschedparam(pthread_t thread
, int *policy
, struct sched_param
*param
)
1426 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1430 if (policy
) *policy
= thread
->tl_policy
;
1431 if (param
) *param
= thread
->tl_param
;
1432 _pthread_lock_unlock(&_pthread_list_lock
);
1438 pthread_setschedparam_internal(pthread_t thread
, mach_port_t kport
, int policy
,
1439 const struct sched_param
*param
)
1441 policy_base_data_t bases
;
1443 mach_msg_type_number_t count
;
1446 if (os_unlikely(thread
->wqthread
)) {
1452 bases
.ts
.base_priority
= param
->sched_priority
;
1453 base
= (policy_base_t
)&bases
.ts
;
1454 count
= POLICY_TIMESHARE_BASE_COUNT
;
1457 bases
.fifo
.base_priority
= param
->sched_priority
;
1458 base
= (policy_base_t
)&bases
.fifo
;
1459 count
= POLICY_FIFO_BASE_COUNT
;
1462 bases
.rr
.base_priority
= param
->sched_priority
;
1463 /* quantum isn't public yet */
1464 bases
.rr
.quantum
= param
->quantum
;
1465 base
= (policy_base_t
)&bases
.rr
;
1466 count
= POLICY_RR_BASE_COUNT
;
1471 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1472 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1476 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1478 mach_port_t kport
= MACH_PORT_NULL
;
1481 // since the main thread will not get de-allocated from underneath us
1482 if (t
== pthread_self() || t
== main_thread()) {
1483 _pthread_validate_signature(t
);
1484 kport
= _pthread_tsd_slot(t
, MACH_THREAD_SELF
);
1487 if (!_pthread_is_valid(t
, &kport
)) {
1492 int res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1493 if (res
) return res
;
1496 _pthread_lock_lock(&_pthread_list_lock
);
1497 } else if (!_pthread_validate_thread_and_list_lock(t
)) {
1498 // Ensure the thread is still valid.
1502 t
->tl_policy
= policy
;
1503 t
->tl_param
= *param
;
1504 _pthread_lock_unlock(&_pthread_list_lock
);
1509 sched_get_priority_min(int policy
)
1511 return default_priority
- 16;
1515 sched_get_priority_max(int policy
)
1517 return default_priority
+ 16;
1521 pthread_equal(pthread_t t1
, pthread_t t2
)
1528 _pthread_set_self(pthread_t p
)
1531 if (os_likely(!p
)) {
1532 return _pthread_set_self_dyld();
1534 #endif // VARIANT_DYLD
1535 _pthread_set_self_internal(p
);
1536 _thread_set_tsd_base(&p
->tsd
[0]);
1540 // _pthread_set_self_dyld is noinline+noexport to allow the option for
1541 // static libsyscall to adopt this as the entry point from mach_init if
1545 _pthread_set_self_dyld(void)
1547 pthread_t p
= main_thread();
1548 p
->thread_id
= __thread_selfid();
1550 if (os_unlikely(p
->thread_id
== -1ull)) {
1551 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1554 // <rdar://problem/40930651> pthread self and the errno address are the
1555 // bare minimium TSD setup that dyld needs to actually function. Without
1556 // this, TSD access will fail and crash if it uses bits of Libc prior to
1557 // library initialization. __pthread_init will finish the initialization
1558 // during library init.
1559 _pthread_tsd_slot(p
, PTHREAD_SELF
) = p
;
1560 _pthread_tsd_slot(p
, ERRNO
) = &p
->err_no
;
1561 _thread_set_tsd_base(&p
->tsd
[0]);
1563 #endif // VARIANT_DYLD
1567 _pthread_set_self_internal(pthread_t p
)
1570 os_atomic_store(&p
->thread_id
, __thread_selfid(), relaxed
);
1572 os_atomic_store_wide(&p
->thread_id
, __thread_selfid(), relaxed
);
1575 if (os_unlikely(p
->thread_id
== -1ull)) {
1576 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1580 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1583 _os_once_acquire(os_once_t
*predicate
, void *context
, os_function_t function
)
1585 if (OS_EXPECT(os_atomic_load(predicate
, acquire
), ~0l) != ~0l) {
1586 _os_once(predicate
, context
, function
);
1587 OS_COMPILER_CAN_ASSUME(*predicate
== ~0l);
1591 struct _pthread_once_context
{
1592 pthread_once_t
*pthread_once
;
1593 void (*routine
)(void);
1597 __pthread_once_handler(void *context
)
1599 struct _pthread_once_context
*ctx
= context
;
1600 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1602 pthread_cleanup_pop(0);
1603 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1607 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1609 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1611 _os_once_acquire(&once_control
->once
, &ctx
, __pthread_once_handler
);
1612 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1617 pthread_getconcurrency(void)
1619 return pthread_concurrency
;
1623 pthread_setconcurrency(int new_level
)
1625 if (new_level
< 0) {
1628 pthread_concurrency
= new_level
;
1633 * Perform package initialization - called automatically when application starts
1637 static unsigned long
1638 _pthread_strtoul(const char *p
, const char **endptr
, int base
)
1642 // Expect hex string starting with "0x"
1643 if ((base
== 16 || base
== 0) && p
&& p
[0] == '0' && p
[1] == 'x') {
1647 if ('0' <= c
&& c
<= '9') {
1648 val
= (val
<< 4) + (c
- '0');
1649 } else if ('a' <= c
&& c
<= 'f') {
1650 val
= (val
<< 4) + (c
- 'a' + 10);
1651 } else if ('A' <= c
&& c
<= 'F') {
1652 val
= (val
<< 4) + (c
- 'A' + 10);
1660 *endptr
= (char *)p
;
1665 parse_main_stack_params(const char *apple
[],
1671 const char *p
= _simple_getenv(apple
, "main_stack");
1677 *stackaddr
= _pthread_strtoul(s
, &s
, 16);
1678 if (*s
!= ',') goto out
;
1680 *stacksize
= _pthread_strtoul(s
+ 1, &s
, 16);
1681 if (*s
!= ',') goto out
;
1683 *allocaddr
= _pthread_strtoul(s
+ 1, &s
, 16);
1684 if (*s
!= ',') goto out
;
1686 *allocsize
= _pthread_strtoul(s
+ 1, &s
, 16);
1687 if (*s
!= ',' && *s
!= 0) goto out
;
1691 bzero((char *)p
, strlen(p
));
1696 parse_ptr_munge_params(const char *envp
[], const char *apple
[])
1699 uintptr_t token
= 0;
1700 p
= _simple_getenv(apple
, "ptr_munge");
1702 token
= _pthread_strtoul(p
, &s
, 16);
1703 bzero((char *)p
, strlen(p
));
1706 * In DEBUG we allow the environment variable to override the kernel
1707 * security setting, including setting it to 0 which is helpful during
1708 * debugging sessions.
1710 * For other cases, the token must be set by the kernel or the environment
1711 * variable to a non 0 value.
1716 p
= _simple_getenv(envp
, "PTHREAD_PTR_MUNGE_TOKEN");
1718 uintptr_t t
= _pthread_strtoul(p
, &s
, 16);
1725 PTHREAD_INTERNAL_CRASH(token
, "Token from the kernel is 0");
1729 _pthread_ptr_munge_token
= token
;
1730 // we need to refresh the main thread signature now that we changed
1731 // the munge token. We need to do it while TSAN will not look at it
1732 _pthread_init_signature(_main_thread_ptr
);
1736 __pthread_init(const struct _libpthread_functions
*pthread_funcs
,
1737 const char *envp
[], const char *apple
[],
1738 const struct ProgramVars
*vars __unused
)
1740 // Save our provided pushed-down functions
1741 if (pthread_funcs
) {
1742 exitf
= pthread_funcs
->exit
;
1744 if (pthread_funcs
->version
>= 2) {
1745 _pthread_malloc
= pthread_funcs
->malloc
;
1746 _pthread_free
= pthread_funcs
->free
;
1750 // libpthread.a in dyld "owns" the main thread structure itself and sets
1751 // up the tsd to point to it. So take the pthread_self() from there
1752 // and make it our main thread point.
1753 pthread_t thread
= _pthread_self_direct();
1754 if (os_unlikely(thread
== NULL
)) {
1755 PTHREAD_INTERNAL_CRASH(0, "PTHREAD_SELF TSD not initialized");
1757 _main_thread_ptr
= thread
;
1758 // this needs to be done early so that pthread_self() works in TSAN
1759 _pthread_init_signature(thread
);
1762 // Get host information
1766 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1767 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1768 host_priority_info_data_t priority_info
;
1769 host_t host
= mach_host_self();
1770 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1771 if (kr
!= KERN_SUCCESS
) {
1772 PTHREAD_INTERNAL_CRASH(kr
, "host_info() failed");
1774 default_priority
= (uint8_t)priority_info
.user_priority
;
1775 min_priority
= (uint8_t)priority_info
.minimum_priority
;
1776 max_priority
= (uint8_t)priority_info
.maximum_priority
;
1778 mach_port_deallocate(mach_task_self(), host
);
1781 // Set up the main thread structure
1784 // Get the address and size of the main thread's stack from the kernel.
1785 void *stackaddr
= 0;
1786 size_t stacksize
= 0;
1787 void *allocaddr
= 0;
1788 size_t allocsize
= 0;
1789 if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) ||
1790 stackaddr
== NULL
|| stacksize
== 0) {
1791 // Fall back to previous bevhaior.
1792 size_t len
= sizeof(stackaddr
);
1793 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1794 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1795 #if defined(__LP64__)
1796 stackaddr
= (void *)USRSTACK64
;
1798 stackaddr
= (void *)USRSTACK
;
1801 stacksize
= DFLSSIZ
;
1806 // Initialize random ptr_munge token from the kernel.
1807 parse_ptr_munge_params(envp
, apple
);
1809 PTHREAD_DEBUG_ASSERT(_pthread_attr_default
.qosclass
==
1810 _pthread_default_priority(0));
1811 _pthread_struct_init(thread
, &_pthread_attr_default
,
1812 stackaddr
, stacksize
, allocaddr
, allocsize
);
1813 thread
->tl_joinable
= true;
1815 // Finish initialization with common code that is reinvoked on the
1816 // child side of a fork.
1818 // Finishes initialization of main thread attributes.
1819 // Initializes the thread list and add the main thread.
1820 // Calls _pthread_set_self() to prepare the main thread for execution.
1821 _pthread_main_thread_init(thread
);
1823 struct _pthread_registration_data registration_data
;
1824 // Set up kernel entry points with __bsdthread_register.
1825 _pthread_bsdthread_init(®istration_data
);
1827 // Have pthread_key and pthread_mutex do their init envvar checks.
1828 _pthread_key_global_init(envp
);
1829 _pthread_mutex_global_init(envp
, ®istration_data
);
1831 #if PTHREAD_DEBUG_LOG
1832 _SIMPLE_STRING path
= _simple_salloc();
1833 _simple_sprintf(path
, "/var/tmp/libpthread.%d.log", getpid());
1834 _pthread_debuglog
= open(_simple_string(path
),
1835 O_WRONLY
| O_APPEND
| O_CREAT
| O_NOFOLLOW
| O_CLOEXEC
, 0666);
1836 _simple_sfree(path
);
1837 _pthread_debugstart
= mach_absolute_time();
1842 #endif // !VARIANT_DYLD
1845 _pthread_main_thread_init(pthread_t p
)
1847 TAILQ_INIT(&__pthread_head
);
1848 _pthread_lock_init(&_pthread_list_lock
);
1849 _pthread_lock_init(&p
->lock
);
1850 p
->__cleanup_stack
= NULL
;
1851 p
->tl_join_ctx
= NULL
;
1852 p
->tl_exit_gate
= MACH_PORT_NULL
;
1853 _pthread_tsd_slot(p
, MACH_THREAD_SELF
) = mach_thread_self();
1854 _pthread_tsd_slot(p
, MIG_REPLY
) = mach_reply_port();
1855 _pthread_tsd_slot(p
, MACH_SPECIAL_REPLY
) = MACH_PORT_NULL
;
1856 _pthread_tsd_slot(p
, SEMAPHORE_CACHE
) = SEMAPHORE_NULL
;
1858 // Initialize the list of threads with the new main thread.
1859 TAILQ_INSERT_HEAD(&__pthread_head
, p
, tl_plist
);
1862 _pthread_introspection_thread_start(p
);
1866 _pthread_main_thread_postfork_init(pthread_t p
)
1868 _pthread_main_thread_init(p
);
1869 _pthread_set_self_internal(p
);
1879 // Libsystem knows about this symbol and exports it to libsyscall
1881 pthread_current_stack_contains_np(const void *addr
, size_t length
)
1883 uintptr_t begin
= (uintptr_t) addr
, end
;
1884 uintptr_t stack_base
= (uintptr_t) _pthread_self_direct()->stackbottom
;
1885 uintptr_t stack_top
= (uintptr_t) _pthread_self_direct()->stackaddr
;
1887 if (stack_base
== stack_top
) {
1891 if (__builtin_add_overflow(begin
, length
, &end
)) {
1895 return stack_base
<= begin
&& end
<= stack_top
;
1898 // Libsystem knows about this symbol and exports it to libsyscall
1901 _pthread_clear_qos_tsd(mach_port_t port
)
1903 pthread_priority_t pp
= _pthread_unspecified_priority();
1905 if (port
== MACH_PORT_NULL
|| _pthread_mach_thread_self_direct() == port
) {
1906 /* Clear the current thread's TSD, that can be done inline. */
1907 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, pp
);
1911 _pthread_lock_lock(&_pthread_list_lock
);
1913 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
1914 mach_port_t kp
= _pthread_tsd_slot(p
, MACH_THREAD_SELF
);
1916 _pthread_tsd_slot(p
, PTHREAD_QOS_CLASS
) = pp
;
1921 _pthread_lock_unlock(&_pthread_list_lock
);
1925 #pragma mark pthread/stack_np.h public interface
1927 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
1928 #if __ARM64_ARCH_8_32__
1930 * arm64_32 uses 64-bit sizes for the frame pointer and
1931 * return address of a stack frame.
1933 typedef uint64_t frame_data_addr_t
;
1935 typedef uintptr_t frame_data_addr_t
;
1939 frame_data_addr_t frame_addr_next
;
1940 frame_data_addr_t ret_addr
;
1943 #error ********** Unimplemented architecture
1947 pthread_stack_frame_decode_np(uintptr_t frame_addr
, uintptr_t *return_addr
)
1949 struct frame_data
*frame
= (struct frame_data
*)frame_addr
;
1952 #if __has_feature(ptrauth_calls)
1953 *return_addr
= (uintptr_t)ptrauth_strip((void *)frame
->ret_addr
,
1954 ptrauth_key_return_address
);
1956 *return_addr
= (uintptr_t)frame
->ret_addr
;
1957 #endif /* __has_feature(ptrauth_calls) */
1960 #if __has_feature(ptrauth_calls)
1961 return (uintptr_t)ptrauth_strip((void *)frame
->frame_addr_next
,
1962 ptrauth_key_frame_pointer
);
1963 #endif /* __has_feature(ptrauth_calls) */
1964 return (uintptr_t)frame
->frame_addr_next
;
1967 #pragma mark pthread workqueue support routines
1970 _pthread_bsdthread_init(struct _pthread_registration_data
*data
)
1972 bzero(data
, sizeof(*data
));
1973 data
->version
= sizeof(struct _pthread_registration_data
);
1974 data
->dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *);
1975 data
->return_to_kernel_offset
= __TSD_RETURN_TO_KERNEL
* sizeof(void *);
1976 data
->tsd_offset
= offsetof(struct pthread_s
, tsd
);
1977 data
->mach_thread_self_offset
= __TSD_MACH_THREAD_SELF
* sizeof(void *);
1978 data
->joinable_offset_bits
= CHAR_BIT
* (offsetof(struct pthread_s
, tl_policy
) + 1);
1980 int rv
= __bsdthread_register(thread_start
, start_wqthread
, (int)PTHREAD_SIZE
,
1981 (void*)data
, (uintptr_t)sizeof(*data
), data
->dispatch_queue_offset
);
1984 int required_features
=
1985 PTHREAD_FEATURE_FINEPRIO
|
1986 PTHREAD_FEATURE_BSDTHREADCTL
|
1987 PTHREAD_FEATURE_SETSELF
|
1988 PTHREAD_FEATURE_QOS_MAINTENANCE
|
1989 PTHREAD_FEATURE_QOS_DEFAULT
;
1990 if ((rv
& required_features
) != required_features
) {
1991 PTHREAD_INTERNAL_CRASH(rv
, "Missing required kernel support");
1993 __pthread_supported_features
= rv
;
1997 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
1998 * effect of resetting the child's stack_addr_hint before bailing out) and
1999 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2000 * the latter as fatal.
2002 * <rdar://problem/36451838>
2005 pthread_priority_t main_qos
= (pthread_priority_t
)data
->main_qos
;
2007 if (_pthread_priority_thread_qos(main_qos
) != THREAD_QOS_UNSPECIFIED
) {
2008 _pthread_set_main_qos(main_qos
);
2009 _pthread_tsd_slot(main_thread(), PTHREAD_QOS_CLASS
) = main_qos
;
2012 if (data
->stack_addr_hint
) {
2013 __pthread_stack_hint
= data
->stack_addr_hint
;
2016 if (__libdispatch_workerfunction
!= NULL
) {
2017 // prepare the kernel for workq action
2018 (void)__workq_open();
2024 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp
)
2026 /* Old thread priorities are inverted from where we have them in
2027 * the new flexible priority scheme. The highest priority is zero,
2028 * up to 2, with background at 3.
2030 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2031 bool overcommit
= (pp
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
);
2032 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2034 switch (_pthread_priority_thread_qos(pp
)) {
2035 case THREAD_QOS_USER_INITIATED
:
2036 return (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
2037 case THREAD_QOS_LEGACY
:
2038 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2039 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2040 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2042 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
2043 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED
, 0, 0));
2044 return (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
2045 case THREAD_QOS_UTILITY
:
2046 return (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
2047 case THREAD_QOS_BACKGROUND
:
2048 return (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
2050 PTHREAD_INTERNAL_CRASH(pp
, "Invalid pthread priority for the legacy interface");
2054 static inline pthread_priority_t
2055 _pthread_wqthread_priority(int flags
)
2057 pthread_priority_t pp
= 0;
2060 if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2061 pp
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2063 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
) {
2064 return pp
| _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
2067 if (flags
& WQ_FLAG_THREAD_OVERCOMMIT
) {
2068 pp
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2070 if (flags
& WQ_FLAG_THREAD_PRIO_QOS
) {
2071 qos
= (thread_qos_t
)(flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2072 pp
= _pthread_priority_make_from_thread_qos(qos
, 0, pp
);
2073 } else if (flags
& WQ_FLAG_THREAD_PRIO_SCHED
) {
2074 pp
|= _PTHREAD_PRIORITY_SCHED_PRI_MASK
;
2075 pp
|= (flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2077 PTHREAD_INTERNAL_CRASH(flags
, "Missing priority");
2084 _pthread_wqthread_setup(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2087 void *stackaddr
= self
;
2088 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
2090 _pthread_struct_init(self
, &_pthread_attr_default
, stackaddr
, stacksize
,
2091 PTHREAD_ALLOCADDR(stackaddr
, stacksize
),
2092 PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
2094 _pthread_tsd_slot(self
, MACH_THREAD_SELF
) = kport
;
2096 self
->wqkillset
= 0;
2097 self
->tl_joinable
= false;
2099 // Update the running thread count and set childrun bit.
2100 if (os_unlikely((flags
& WQ_FLAG_THREAD_TSD_BASE_SET
) == 0)) {
2101 PTHREAD_INTERNAL_CRASH(flags
,
2102 "thread_set_tsd_base() wasn't called by the kernel");
2104 _pthread_set_self_internal(self
);
2105 __pthread_add_thread(self
, kport
, false);
2106 __pthread_started_thread(self
);
2109 OS_NORETURN OS_NOINLINE
2111 _pthread_wqthread_exit(pthread_t self
)
2113 const thread_qos_t WORKQ_THREAD_QOS_CLEANUP
= THREAD_QOS_LEGACY
;
2114 pthread_priority_t pp
= _pthread_tsd_slot(self
, PTHREAD_QOS_CLASS
);
2117 qos
= _pthread_priority_thread_qos(pp
);
2118 if (qos
== THREAD_QOS_UNSPECIFIED
|| qos
> WORKQ_THREAD_QOS_CLEANUP
) {
2119 // Reset QoS to something low for the cleanup process
2120 pp
= _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP
, 0, 0);
2121 _pthread_tsd_slot(self
, PTHREAD_QOS_CLASS
) = pp
;
2124 _pthread_exit(self
, NULL
);
2127 // workqueue entry point from kernel
2129 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2130 void *keventlist
, int flags
, int nkevents
)
2132 if ((flags
& WQ_FLAG_THREAD_REUSE
) == 0) {
2133 _pthread_wqthread_setup(self
, kport
, stacklowaddr
, flags
);
2136 pthread_priority_t pp
;
2138 if (flags
& WQ_FLAG_THREAD_OUTSIDEQOS
) {
2139 self
->wq_outsideqos
= 1;
2140 pp
= _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY
, 0,
2141 _PTHREAD_PRIORITY_FALLBACK_FLAG
);
2143 self
->wq_outsideqos
= 0;
2144 pp
= _pthread_wqthread_priority(flags
);
2147 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2149 // avoid spills on the stack hard to keep used stack space minimal
2150 if (os_unlikely(nkevents
== WORKQ_EXIT_THREAD_NKEVENT
)) {
2151 _pthread_wqthread_exit(self
);
2152 } else if (flags
& WQ_FLAG_THREAD_WORKLOOP
) {
2153 kqueue_id_t
*kqidptr
= (kqueue_id_t
*)keventlist
- 1;
2154 self
->fun
= (void *(*)(void*))__libdispatch_workloopfunction
;
2155 self
->arg
= keventlist
;
2156 self
->wq_nevents
= nkevents
;
2157 (*__libdispatch_workloopfunction
)(kqidptr
, &self
->arg
, &self
->wq_nevents
);
2158 __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN
, self
->arg
, self
->wq_nevents
, 0);
2159 } else if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2160 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
2161 self
->arg
= keventlist
;
2162 self
->wq_nevents
= nkevents
;
2163 (*__libdispatch_keventfunction
)(&self
->arg
, &self
->wq_nevents
);
2164 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, self
->arg
, self
->wq_nevents
, 0);
2166 self
->fun
= (void *(*)(void*))__libdispatch_workerfunction
;
2167 self
->arg
= (void *)(uintptr_t)pp
;
2168 self
->wq_nevents
= 0;
2169 if (os_likely(__workq_newapi
)) {
2170 (*__libdispatch_workerfunction
)(pp
);
2172 _pthread_wqthread_legacy_worker_wrap(pp
);
2174 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
2177 _os_set_crash_log_cause_and_message(self
->err_no
,
2178 "BUG IN LIBPTHREAD: __workq_kernreturn returned");
2180 * 52858993: we should never return but the compiler insists on outlining,
2181 * so the __builtin_trap() is in _start_wqthread in pthread_asm.s
2185 #pragma mark pthread workqueue API for libdispatch
2187 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN
== WQ_KEVENT_LIST_LEN
,
2188 "Kernel and userland should agree on the event list size");
2191 pthread_workqueue_setdispatchoffset_np(int offset
)
2193 __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, offset
, 0x00);
2197 pthread_workqueue_setup(struct pthread_workqueue_config
*cfg
, size_t cfg_size
)
2200 struct workq_dispatch_config wdc_cfg
;
2201 size_t min_size
= 0;
2203 if (cfg_size
< sizeof(uint32_t)) {
2207 switch (cfg
->version
) {
2209 min_size
= offsetof(struct pthread_workqueue_config
, queue_label_offs
);
2212 min_size
= sizeof(struct pthread_workqueue_config
);
2218 if (!cfg
|| cfg_size
< min_size
) {
2222 if (cfg
->flags
& ~PTHREAD_WORKQUEUE_CONFIG_SUPPORTED_FLAGS
||
2223 cfg
->version
< PTHREAD_WORKQUEUE_CONFIG_MIN_SUPPORTED_VERSION
) {
2227 if (__libdispatch_workerfunction
== NULL
) {
2228 __workq_newapi
= true;
2230 wdc_cfg
.wdc_version
= WORKQ_DISPATCH_CONFIG_VERSION
;
2231 wdc_cfg
.wdc_flags
= 0;
2232 wdc_cfg
.wdc_queue_serialno_offs
= cfg
->queue_serialno_offs
;
2233 #if WORKQ_DISPATCH_CONFIG_VERSION >= 2
2234 wdc_cfg
.wdc_queue_label_offs
= cfg
->queue_label_offs
;
2237 // Tell the kernel about dispatch internals
2238 rv
= (int) __workq_kernreturn(WQOPS_SETUP_DISPATCH
, &wdc_cfg
, sizeof(wdc_cfg
), 0);
2242 __libdispatch_keventfunction
= cfg
->kevent_cb
;
2243 __libdispatch_workloopfunction
= cfg
->workloop_cb
;
2244 __libdispatch_workerfunction
= cfg
->workq_cb
;
2246 // Prepare the kernel for workq action
2247 (void)__workq_open();
2248 if (__is_threaded
== 0) {
2260 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func
,
2261 pthread_workqueue_function_kevent_t kevent_func
,
2262 pthread_workqueue_function_workloop_t workloop_func
,
2263 int offset
, int flags
)
2265 struct pthread_workqueue_config cfg
= {
2266 .version
= PTHREAD_WORKQUEUE_CONFIG_VERSION
,
2268 .workq_cb
= queue_func
,
2269 .kevent_cb
= kevent_func
,
2270 .workloop_cb
= workloop_func
,
2271 .queue_serialno_offs
= offset
,
2272 .queue_label_offs
= 0,
2275 return pthread_workqueue_setup(&cfg
, sizeof(cfg
));
2279 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
,
2280 pthread_workqueue_function_kevent_t kevent_func
,
2281 int offset
, int flags
)
2283 return _pthread_workqueue_init_with_workloop(queue_func
, kevent_func
, NULL
, offset
, flags
);
2287 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2289 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2293 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2295 struct pthread_workqueue_config cfg
= {
2296 .version
= PTHREAD_WORKQUEUE_CONFIG_VERSION
,
2298 .workq_cb
= (uint64_t)(pthread_workqueue_function2_t
)worker_func
,
2301 .queue_serialno_offs
= 0,
2302 .queue_label_offs
= 0,
2305 return pthread_workqueue_setup(&cfg
, sizeof(cfg
));
2309 _pthread_workqueue_supported(void)
2311 if (os_unlikely(!__pthread_supported_features
)) {
2312 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2315 return __pthread_supported_features
;
2319 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2323 // Cannot add threads without a worker function registered.
2324 if (__libdispatch_workerfunction
== NULL
) {
2328 pthread_priority_t kp
= 0;
2329 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIO_MASK
;
2332 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2333 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2336 #pragma clang diagnostic push
2337 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2338 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2339 #pragma clang diagnostic pop
2341 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2349 _pthread_workqueue_should_narrow(pthread_priority_t pri
)
2351 int res
= __workq_kernreturn(WQOPS_SHOULD_NARROW
, NULL
, (int)pri
, 0);
2359 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2363 if (__libdispatch_workerfunction
== NULL
) {
2368 // <rdar://problem/37687655> Legacy simulators fail to boot
2370 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2371 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2372 // validates and rejects.
2374 // As a workaround, forcefully unset this bit that cannot be set here
2376 priority
&= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG
;
2379 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2387 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2389 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2397 _pthread_workloop_create(uint64_t workloop_id
, uint64_t options
, pthread_attr_t
*attr
)
2399 struct kqueue_workloop_params params
= {
2400 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2401 .kqwlp_id
= workloop_id
,
2409 if (attr
->schedset
) {
2410 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_PRI
;
2411 params
.kqwlp_sched_pri
= attr
->param
.sched_priority
;
2414 if (attr
->policyset
) {
2415 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_POL
;
2416 params
.kqwlp_sched_pol
= attr
->policy
;
2419 if (attr
->cpupercentset
) {
2420 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_CPU_PERCENT
;
2421 params
.kqwlp_cpu_percent
= attr
->cpupercent
;
2422 params
.kqwlp_cpu_refillms
= attr
->refillms
;
2425 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE
, 0, ¶ms
,
2434 _pthread_workloop_destroy(uint64_t workloop_id
)
2436 struct kqueue_workloop_params params
= {
2437 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2438 .kqwlp_id
= workloop_id
,
2441 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY
, 0, ¶ms
,
2449 #pragma mark Introspection SPI for libpthread.
2451 static pthread_introspection_hook_t _pthread_introspection_hook
;
2453 pthread_introspection_hook_t
2454 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2456 pthread_introspection_hook_t prev
;
2457 prev
= _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook
, hook
);
2462 _pthread_introspection_call_hook(unsigned int event
,
2463 pthread_t thread
, void *addr
, size_t size
)
2465 pthread_t self
= pthread_self();
2466 uint16_t old
= self
->introspection
;
2467 self
->introspection
= (uint16_t)event
;
2468 _pthread_introspection_hook(event
, thread
, addr
, size
);
2469 self
->introspection
= old
;
2474 _pthread_introspection_hook_callout_thread_create(pthread_t t
)
2476 _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2481 _pthread_introspection_thread_create(pthread_t t
)
2483 if (os_fastpath(!_pthread_introspection_hook
)) return;
2484 _pthread_introspection_hook_callout_thread_create(t
);
2489 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2493 if (t
== main_thread()) {
2494 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2495 freesize
= stacksize
+ t
->guardsize
;
2496 freeaddr
= t
->stackaddr
- freesize
;
2498 freesize
= t
->freesize
- PTHREAD_SIZE
;
2499 freeaddr
= t
->freeaddr
;
2501 _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2502 freeaddr
, freesize
);
2506 _pthread_introspection_thread_start(pthread_t t
)
2508 if (os_fastpath(!_pthread_introspection_hook
)) return;
2509 _pthread_introspection_hook_callout_thread_start(t
);
2514 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
)
2518 if (t
== main_thread()) {
2519 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2520 freesize
= stacksize
+ t
->guardsize
;
2521 freeaddr
= t
->stackaddr
- freesize
;
2523 freesize
= t
->freesize
- PTHREAD_SIZE
;
2524 freeaddr
= t
->freeaddr
;
2526 _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2527 freeaddr
, freesize
);
2531 _pthread_introspection_thread_terminate(pthread_t t
)
2533 if (os_fastpath(!_pthread_introspection_hook
)) return;
2534 _pthread_introspection_hook_callout_thread_terminate(t
);
2539 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2541 _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2546 _pthread_introspection_thread_destroy(pthread_t t
)
2548 if (os_fastpath(!_pthread_introspection_hook
)) return;
2549 _pthread_introspection_hook_callout_thread_destroy(t
);
2552 #pragma mark libplatform shims
2555 #include <platform/string.h>
2557 // pthread_setup initializes large structures to 0,
2558 // which the compiler turns into a library call to memset.
2560 // To avoid linking against Libc, provide a simple wrapper
2561 // that calls through to the libplatform primitives
2566 memset(void *b
, int c
, size_t len
)
2568 return _platform_memset(b
, c
, len
);
2574 bzero(void *s
, size_t n
)
2576 _platform_bzero(s
, n
);
2582 memcpy(void* a
, const void* b
, unsigned long s
)
2584 return _platform_memmove(a
, b
, s
);
2587 #endif // !VARIANT_DYLD