2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
59 #include "pthread/stack_np.h"
60 #include "offsets.h" // included to validate the offsets at build time
66 #include <mach/mach_init.h>
67 #include <mach/mach_vm.h>
68 #include <mach/mach_sync_ipc.h>
70 #include <sys/resource.h>
71 #include <sys/sysctl.h>
72 #include <sys/queue.h>
73 #include <sys/ulock.h>
75 #include <machine/vmparam.h>
76 #define __APPLE_API_PRIVATE
77 #include <machine/cpu_capabilities.h>
78 #if __has_include(<ptrauth.h>)
80 #endif // __has_include(<ptrauth.h>)
83 #include <platform/string.h>
84 #include <platform/compat.h>
86 #include <stack_logging.h>
88 // Defined in libsyscall; initialized in libmalloc
89 extern malloc_logger_t
*__syscall_logger
;
91 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
,
92 void *newp
, size_t newlen
);
93 extern void __exit(int) __attribute__((noreturn
));
94 extern int __pthread_kill(mach_port_t
, int);
96 extern void _pthread_joiner_wake(pthread_t thread
);
99 PTHREAD_NOEXPORT
extern struct _pthread
*_main_thread_ptr
;
100 #define main_thread() (_main_thread_ptr)
101 #endif // VARIANT_DYLD
103 // Default stack size is 512KB; independent of the main thread's stack size.
104 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
112 * The pthread may be offset into a page. In that event, by contract
113 * with the kernel, the allocation will extend PTHREAD_SIZE from the
114 * start of the next page. There's also one page worth of allocation
115 * below stacksize for the guard page. <rdar://problem/19941744>
117 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
118 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
119 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
121 static const pthread_attr_t _pthread_attr_default
= {
122 .sig
= _PTHREAD_ATTR_SIG
,
124 .detached
= PTHREAD_CREATE_JOINABLE
,
125 .inherit
= _PTHREAD_DEFAULT_INHERITSCHED
,
126 .policy
= _PTHREAD_DEFAULT_POLICY
,
127 .defaultguardpage
= true,
128 // compile time constant for _pthread_default_priority(0)
129 .qosclass
= (1U << (THREAD_QOS_LEGACY
- 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
)) |
130 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK
),
133 #if PTHREAD_LAYOUT_SPI
135 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
137 .plo_pthread_tsd_base_offset
= offsetof(struct _pthread
, tsd
),
138 .plo_pthread_tsd_base_address_offset
= 0,
139 .plo_pthread_tsd_entry_size
= sizeof(((struct _pthread
*)NULL
)->tsd
[0]),
142 #endif // PTHREAD_LAYOUT_SPI
145 // Global exported variables
148 // This global should be used (carefully) by anyone needing to know if a
149 // pthread (other than the main thread) has been created.
150 int __is_threaded
= 0;
151 int __unix_conforming
= 0;
154 // Global internal variables
157 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
158 // list. Externally imported by pthread_cancelable.c.
159 struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
160 _pthread_lock _pthread_list_lock
= _PTHREAD_LOCK_INITIALIZER
;
165 // The main thread's pthread_t
166 struct _pthread _main_thread
__attribute__((aligned(64))) = { };
167 #define main_thread() (&_main_thread)
168 #else // VARIANT_DYLD
169 struct _pthread
*_main_thread_ptr
;
170 #endif // VARIANT_DYLD
172 #if PTHREAD_DEBUG_LOG
174 int _pthread_debuglog
;
175 uint64_t _pthread_debugstart
;
179 // Global static variables
181 static bool __workq_newapi
;
182 static uint8_t default_priority
;
184 static uint8_t max_priority
;
185 static uint8_t min_priority
;
186 #endif // !VARIANT_DYLD
187 static int _pthread_count
= 1;
188 static int pthread_concurrency
;
189 static uintptr_t _pthread_ptr_munge_token
;
191 static void (*exitf
)(int) = __exit
;
193 static void *(*_pthread_malloc
)(size_t) = NULL
;
194 static void (*_pthread_free
)(void *) = NULL
;
195 #endif // !VARIANT_DYLD
197 // work queue support data
200 __pthread_invalid_keventfunction(void **events
, int *nevents
)
202 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
207 __pthread_invalid_workloopfunction(uint64_t *workloop_id
, void **events
, int *nevents
)
209 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
211 static pthread_workqueue_function2_t __libdispatch_workerfunction
;
212 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction
= &__pthread_invalid_keventfunction
;
213 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction
= &__pthread_invalid_workloopfunction
;
214 static int __pthread_supported_features
; // supported feature set
216 #if defined(__i386__) || defined(__x86_64__)
217 static mach_vm_address_t __pthread_stack_hint
= 0xB0000000;
218 #elif defined(__arm__) || defined(__arm64__)
219 static mach_vm_address_t __pthread_stack_hint
= 0x30000000;
221 #error no __pthread_stack_hint for this architecture
225 // Function prototypes
228 // pthread primitives
229 static inline void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
230 void *stack
, size_t stacksize
, void *freeaddr
, size_t freesize
);
233 static void _pthread_set_self_dyld(void);
234 #endif // VARIANT_DYLD
235 static inline void _pthread_set_self_internal(pthread_t
);
237 static void _pthread_dealloc_reply_port(pthread_t t
);
238 static void _pthread_dealloc_special_reply_port(pthread_t t
);
240 static inline void __pthread_started_thread(pthread_t t
);
242 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
244 static inline void _pthread_introspection_thread_create(pthread_t t
);
245 static inline void _pthread_introspection_thread_start(pthread_t t
);
246 static inline void _pthread_introspection_thread_terminate(pthread_t t
);
247 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
249 extern void _pthread_set_self(pthread_t
);
250 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread
251 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start
254 * Flags filed passed to bsdthread_create and back in pthread_start
255 * 31 <---------------------------------> 0
256 * _________________________________________
257 * | flags(8) | policy(8) | importance(16) |
258 * -----------------------------------------
260 #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
261 #define PTHREAD_START_SETSCHED 0x02000000
262 // was PTHREAD_START_DETACHED 0x04000000
263 #define PTHREAD_START_QOSCLASS 0x08000000
264 #define PTHREAD_START_TSD_BASE_SET 0x10000000
265 #define PTHREAD_START_SUSPENDED 0x20000000
266 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
267 #define PTHREAD_START_POLICY_BITSHIFT 16
268 #define PTHREAD_START_POLICY_MASK 0xff
269 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
271 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
272 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
273 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
274 extern __uint64_t
__thread_selfid( void );
277 _Static_assert(offsetof(struct _pthread
, tsd
) == 224, "TSD LP64 offset");
279 _Static_assert(offsetof(struct _pthread
, tsd
) == 176, "TSD ILP32 offset");
281 _Static_assert(offsetof(struct _pthread
, tsd
) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
282 == offsetof(struct _pthread
, thread_id
),
283 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
285 #pragma mark pthread attrs
287 _Static_assert(sizeof(struct _pthread_attr_t
) == sizeof(__darwin_pthread_attr_t
),
288 "internal pthread_attr_t == external pthread_attr_t");
291 pthread_attr_destroy(pthread_attr_t
*attr
)
294 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
302 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
305 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
306 *detachstate
= attr
->detached
;
313 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
316 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
317 *inheritsched
= attr
->inherit
;
323 static PTHREAD_ALWAYS_INLINE
void
324 _pthread_attr_get_schedparam(const pthread_attr_t
*attr
,
325 struct sched_param
*param
)
327 if (attr
->schedset
) {
328 *param
= attr
->param
;
330 param
->sched_priority
= default_priority
;
331 param
->quantum
= 10; /* quantum isn't public yet */
336 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
339 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
340 _pthread_attr_get_schedparam(attr
, param
);
347 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
350 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
351 *policy
= attr
->policy
;
358 pthread_attr_init(pthread_attr_t
*attr
)
360 *attr
= _pthread_attr_default
;
365 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
368 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
369 (detachstate
== PTHREAD_CREATE_JOINABLE
||
370 detachstate
== PTHREAD_CREATE_DETACHED
)) {
371 attr
->detached
= detachstate
;
378 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
381 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
382 (inheritsched
== PTHREAD_INHERIT_SCHED
||
383 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
384 attr
->inherit
= inheritsched
;
391 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
394 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
395 /* TODO: Validate sched_param fields */
396 attr
->param
= *param
;
404 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
407 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (policy
== SCHED_OTHER
||
408 policy
== SCHED_RR
|| policy
== SCHED_FIFO
)) {
409 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy
)) {
410 /* non-fixedpri policy should remove cpupercent */
411 attr
->cpupercentset
= 0;
413 attr
->policy
= policy
;
421 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
424 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
425 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
426 // No attribute yet for the scope.
428 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
436 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
439 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
440 *scope
= PTHREAD_SCOPE_SYSTEM
;
447 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
450 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
451 *stackaddr
= attr
->stackaddr
;
458 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
461 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
462 ((uintptr_t)stackaddr
% vm_page_size
) == 0) {
463 attr
->stackaddr
= stackaddr
;
464 attr
->defaultguardpage
= false;
472 _pthread_attr_stacksize(const pthread_attr_t
*attr
)
474 return attr
->stacksize
? attr
->stacksize
: DEFAULT_STACK_SIZE
;
478 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
481 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
482 *stacksize
= _pthread_attr_stacksize(attr
);
489 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
492 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
493 (stacksize
% vm_page_size
) == 0 &&
494 stacksize
>= PTHREAD_STACK_MIN
) {
495 attr
->stacksize
= stacksize
;
502 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
505 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
506 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
507 *stacksize
= _pthread_attr_stacksize(attr
);
513 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
514 // address. This is not the same as in pthread_attr_setstackaddr.
516 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
519 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
520 ((uintptr_t)stackaddr
% vm_page_size
) == 0 &&
521 (stacksize
% vm_page_size
) == 0 &&
522 stacksize
>= PTHREAD_STACK_MIN
) {
523 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
524 attr
->stacksize
= stacksize
;
531 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
534 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (guardsize
% vm_page_size
) == 0) {
535 /* Guardsize of 0 is valid, means no guard */
536 attr
->defaultguardpage
= false;
537 attr
->guardsize
= guardsize
;
544 _pthread_attr_guardsize(const pthread_attr_t
*attr
)
546 return attr
->defaultguardpage
? vm_page_size
: attr
->guardsize
;
550 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
553 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
554 *guardsize
= _pthread_attr_guardsize(attr
);
561 pthread_attr_setcpupercent_np(pthread_attr_t
*attr
, int percent
,
562 unsigned long refillms
)
565 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& percent
< UINT8_MAX
&&
566 refillms
< _PTHREAD_ATTR_REFILLMS_MAX
&& attr
->policyset
&&
567 _PTHREAD_POLICY_IS_FIXEDPRI(attr
->policy
)) {
568 attr
->cpupercent
= percent
;
569 attr
->refillms
= (uint32_t)(refillms
& 0x00ffffff);
570 attr
->cpupercentset
= 1;
576 #pragma mark pthread lifetime
578 // Allocate a thread structure, stack and guard page.
580 // The thread structure may optionally be placed in the same allocation as the
581 // stack, residing above the top of the stack. This cannot be done if a
582 // custom stack address is provided.
584 // Similarly the guard page cannot be allocated if a custom stack address is
587 // The allocated thread structure is initialized with values that indicate how
588 // it should be freed.
591 _pthread_allocate(const pthread_attr_t
*attrs
, void **stack
,
592 bool from_mach_thread
)
594 mach_vm_address_t allocaddr
= __pthread_stack_hint
;
595 size_t allocsize
, guardsize
, stacksize
, pthreadoff
;
599 if (os_unlikely(attrs
->stacksize
!= 0 &&
600 attrs
->stacksize
< PTHREAD_STACK_MIN
)) {
601 PTHREAD_CLIENT_CRASH(attrs
->stacksize
, "Stack size in attrs is too small");
604 if (os_unlikely(((uintptr_t)attrs
->stackaddr
% vm_page_size
) != 0)) {
605 PTHREAD_CLIENT_CRASH(attrs
->stacksize
, "Unaligned stack addr in attrs");
608 // Allocate a pthread structure if necessary
610 if (attrs
->stackaddr
!= NULL
) {
611 allocsize
= PTHREAD_SIZE
;
614 // <rdar://problem/42588315> if the attrs struct specifies a custom
615 // stack address but not a custom size, using ->stacksize here instead
616 // of _pthread_attr_stacksize stores stacksize as zero, indicating
617 // that the stack size is unknown.
618 stacksize
= attrs
->stacksize
;
620 guardsize
= _pthread_attr_guardsize(attrs
);
621 stacksize
= _pthread_attr_stacksize(attrs
) + PTHREAD_T_OFFSET
;
622 pthreadoff
= stacksize
+ guardsize
;
623 allocsize
= pthreadoff
+ PTHREAD_SIZE
;
624 allocsize
= mach_vm_round_page(allocsize
);
627 kr
= mach_vm_map(mach_task_self(), &allocaddr
, allocsize
, vm_page_size
- 1,
628 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
629 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
631 if (kr
!= KERN_SUCCESS
) {
632 kr
= mach_vm_allocate(mach_task_self(), &allocaddr
, allocsize
,
633 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
634 } else if (__syscall_logger
&& !from_mach_thread
) {
635 // libsyscall will not output malloc stack logging events when
636 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
637 // To avoid losing the stack traces for normal p-thread create
638 // operations, libpthread must pretend to be the vm syscall and log
639 // the allocations. <rdar://36418708>
640 int eventTypeFlags
= stack_logging_type_vm_allocate
|
641 stack_logging_type_mapped_file_or_shared_mem
;
642 __syscall_logger(eventTypeFlags
| VM_MAKE_TAG(VM_MEMORY_STACK
),
643 (uintptr_t)mach_task_self(), (uintptr_t)allocsize
, 0,
644 (uintptr_t)allocaddr
, 0);
647 if (kr
!= KERN_SUCCESS
) {
650 } else if (__syscall_logger
&& !from_mach_thread
) {
651 // libsyscall will not output malloc stack logging events when
652 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
653 // To avoid losing the stack traces for normal p-thread create
654 // operations, libpthread must pretend to be the vm syscall and log
655 // the allocations. <rdar://36418708>
656 int eventTypeFlags
= stack_logging_type_vm_allocate
;
657 __syscall_logger(eventTypeFlags
| VM_MAKE_TAG(VM_MEMORY_STACK
),
658 (uintptr_t)mach_task_self(), (uintptr_t)allocsize
, 0,
659 (uintptr_t)allocaddr
, 0);
662 // The stack grows down.
663 // Set the guard page at the lowest address of the
664 // newly allocated stack. Return the highest address
667 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
,
668 FALSE
, VM_PROT_NONE
);
671 // Thread structure resides at the top of the stack (when using a
672 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
674 t
= (pthread_t
)(allocaddr
+ pthreadoff
);
675 if (attrs
->stackaddr
) {
676 *stack
= attrs
->stackaddr
;
681 _pthread_struct_init(t
, attrs
, *stack
, stacksize
, allocaddr
, allocsize
);
687 _pthread_deallocate(pthread_t t
, bool from_mach_thread
)
691 // Don't free the main thread.
692 if (t
!= main_thread()) {
693 if (!from_mach_thread
) { // see __pthread_add_thread
694 _pthread_introspection_thread_destroy(t
);
696 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
697 if (ret
!= KERN_SUCCESS
) {
698 PTHREAD_INTERNAL_CRASH(ret
, "Unable to deallocate stack");
703 #pragma clang diagnostic push
704 #pragma clang diagnostic ignored "-Wreturn-stack-address"
708 _pthread_current_stack_address(void)
714 #pragma clang diagnostic pop
717 _pthread_joiner_wake(pthread_t thread
)
719 uint32_t *exit_gate
= &thread
->tl_exit_gate
;
722 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, exit_gate
, 0);
723 if (ret
== 0 || ret
== -ENOENT
) {
727 PTHREAD_INTERNAL_CRASH(-ret
, "pthread_join() wake failure");
732 // Terminates the thread if called from the currently running thread.
733 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
735 _pthread_terminate(pthread_t t
, void *exit_value
)
737 _pthread_introspection_thread_terminate(t
);
739 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
740 size_t freesize
= t
->freesize
;
743 // the size of just the stack
744 size_t freesize_stack
= t
->freesize
;
746 // We usually pass our structure+stack to bsdthread_terminate to free, but
747 // if we get told to keep the pthread_t structure around then we need to
748 // adjust the free size and addr in the pthread_t to just refer to the
749 // structure and not the stack. If we do end up deallocating the
750 // structure, this is useless work since no one can read the result, but we
751 // can't do it after the call to pthread_remove_thread because it isn't
752 // safe to dereference t after that.
753 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
754 // Check to ensure the pthread structure itself is part of the
755 // allocation described by freeaddr/freesize, in which case we split and
756 // only deallocate the area below the pthread structure. In the event of a
757 // custom stack, the freeaddr/size will be the pthread structure itself, in
758 // which case we shouldn't free anything (the final else case).
759 freesize_stack
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
761 // describe just the remainder for deallocation when the pthread_t goes away
762 t
->freeaddr
+= freesize_stack
;
763 t
->freesize
-= freesize_stack
;
764 } else if (t
== main_thread()) {
765 freeaddr
= t
->stackaddr
- pthread_get_stacksize_np(t
);
766 uintptr_t stackborder
= trunc_page((uintptr_t)_pthread_current_stack_address());
767 freesize_stack
= stackborder
- freeaddr
;
772 mach_port_t kport
= _pthread_kernel_thread(t
);
773 bool keep_thread_struct
= false, needs_wake
= false;
774 semaphore_t custom_stack_sema
= MACH_PORT_NULL
;
776 _pthread_dealloc_special_reply_port(t
);
777 _pthread_dealloc_reply_port(t
);
779 _PTHREAD_LOCK(_pthread_list_lock
);
781 // This piece of code interacts with pthread_join. It will always:
782 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
783 // - set tl_exit_value to the value passed to pthread_exit()
784 // - decrement _pthread_count, so that we can exit the process when all
785 // threads exited even if not all of them were joined.
786 t
->tl_exit_gate
= MACH_PORT_DEAD
;
787 t
->tl_exit_value
= exit_value
;
788 should_exit
= (--_pthread_count
<= 0);
790 // If we see a joiner, we prepost that the join has to succeed,
791 // and the joiner is committed to finish (even if it was canceled)
792 if (t
->tl_join_ctx
) {
793 custom_stack_sema
= _pthread_joiner_prepost_wake(t
); // unsets tl_joinable
797 // Joinable threads that have no joiner yet are kept on the thread list
798 // so that pthread_join() can later discover the thread when it is joined,
799 // and will have to do the pthread_t cleanup.
800 if (t
->tl_joinable
) {
801 t
->tl_joiner_cleans_up
= keep_thread_struct
= true;
803 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
806 _PTHREAD_UNLOCK(_pthread_list_lock
);
809 // When we found a waiter, we want to drop the very contended list lock
810 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
811 // who gets to cleanup the pthread_t between the joiner and the exiting
813 // - the joiner tries to set tl_join_ctx to NULL
814 // - the exiting thread tries to set tl_joiner_cleans_up to true
815 // Whoever does it first commits the other guy to cleanup the pthread_t
816 _pthread_joiner_wake(t
);
817 _PTHREAD_LOCK(_pthread_list_lock
);
818 if (t
->tl_join_ctx
) {
819 t
->tl_joiner_cleans_up
= true;
820 keep_thread_struct
= true;
822 _PTHREAD_UNLOCK(_pthread_list_lock
);
826 // /!\ dereferencing `t` past this point is not safe /!\
829 if (keep_thread_struct
|| t
== main_thread()) {
830 // Use the adjusted freesize of just the stack that we computed above.
831 freesize
= freesize_stack
;
833 _pthread_introspection_thread_destroy(t
);
836 // Check if there is nothing to free because the thread has a custom
837 // stack allocation and is joinable.
844 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, custom_stack_sema
);
845 PTHREAD_INTERNAL_CRASH(t
, "thread didn't terminate");
850 _pthread_terminate_invoke(pthread_t t
, void *exit_value
)
854 // <rdar://problem/25688492> During pthread termination there is a race
855 // between pthread_join and pthread_terminate; if the joiner is responsible
856 // for cleaning up the pthread_t struct, then it may destroy some part of the
857 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
858 // to crash because its stack has been removed from under its feet, just make
859 // sure termination happens in a part of the stack that is not on the same
860 // page as the pthread_t.
861 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
862 trunc_page((uintptr_t)t
)) {
863 p
= alloca(PTHREAD_T_OFFSET
);
865 // And this __asm__ volatile is needed to stop the compiler from optimising
866 // away the alloca() completely.
867 __asm__
volatile ("" : : "r"(p
) );
869 _pthread_terminate(t
, exit_value
);
872 #pragma mark pthread start / body
876 _pthread_start(pthread_t self
, mach_port_t kport
,
877 __unused
void *(*fun
)(void *), __unused
void *arg
,
878 __unused
size_t stacksize
, unsigned int pflags
)
880 if (os_unlikely(pflags
& PTHREAD_START_SUSPENDED
)) {
881 PTHREAD_INTERNAL_CRASH(pflags
,
882 "kernel without PTHREAD_START_SUSPENDED support");
884 if (os_unlikely((pflags
& PTHREAD_START_TSD_BASE_SET
) == 0)) {
885 PTHREAD_INTERNAL_CRASH(pflags
,
886 "thread_set_tsd_base() wasn't called by the kernel");
888 PTHREAD_DEBUG_ASSERT(MACH_PORT_VALID(kport
));
889 PTHREAD_DEBUG_ASSERT(_pthread_kernel_thread(self
) == kport
);
890 _pthread_markcancel_if_canceled(self
, kport
);
892 _pthread_set_self_internal(self
);
893 __pthread_started_thread(self
);
894 _pthread_exit(self
, (self
->fun
)(self
->arg
));
897 PTHREAD_ALWAYS_INLINE
899 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
900 void *stackaddr
, size_t stacksize
, void *freeaddr
, size_t freesize
)
902 PTHREAD_DEBUG_ASSERT(t
->sig
!= _PTHREAD_SIG
);
904 t
->sig
= _PTHREAD_SIG
;
905 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
;
906 t
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &t
->err_no
;
907 if (attrs
->schedset
== 0) {
908 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = attrs
->qosclass
;
910 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] =
911 _pthread_unspecified_priority();
913 t
->tsd
[_PTHREAD_TSD_SLOT_PTR_MUNGE
] = _pthread_ptr_munge_token
;
914 t
->tl_has_custom_stack
= (attrs
->stackaddr
!= NULL
);
916 _PTHREAD_LOCK_INIT(t
->lock
);
918 t
->stackaddr
= stackaddr
;
919 t
->stackbottom
= stackaddr
- stacksize
;
920 t
->freeaddr
= freeaddr
;
921 t
->freesize
= freesize
;
923 t
->guardsize
= _pthread_attr_guardsize(attrs
);
924 t
->tl_joinable
= (attrs
->detached
== PTHREAD_CREATE_JOINABLE
);
925 t
->inherit
= attrs
->inherit
;
926 t
->tl_policy
= attrs
->policy
;
927 t
->schedset
= attrs
->schedset
;
928 _pthread_attr_get_schedparam(attrs
, &t
->tl_param
);
929 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
932 #pragma mark pthread public interface
934 /* Need to deprecate this in future */
936 _pthread_is_threaded(void)
938 return __is_threaded
;
941 /* Non portable public api to know whether this process has(had) atleast one thread
942 * apart from main thread. There could be race if there is a thread in the process of
943 * creation at the time of call . It does not tell whether there are more than one thread
944 * at this point of time.
947 pthread_is_threaded_np(void)
949 return __is_threaded
;
953 PTHREAD_NOEXPORT_VARIANT
955 pthread_mach_thread_np(pthread_t t
)
957 mach_port_t kport
= MACH_PORT_NULL
;
958 (void)_pthread_is_valid(t
, &kport
);
962 PTHREAD_NOEXPORT_VARIANT
964 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
966 struct _pthread
*p
= NULL
;
968 /* No need to wait as mach port is already known */
969 _PTHREAD_LOCK(_pthread_list_lock
);
971 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
972 if (_pthread_kernel_thread(p
) == kernel_thread
) {
977 _PTHREAD_UNLOCK(_pthread_list_lock
);
982 PTHREAD_NOEXPORT_VARIANT
984 pthread_get_stacksize_np(pthread_t t
)
989 return ESRCH
; // XXX bug?
993 // The default rlimit based allocations will be provided with a stacksize
994 // of the current limit and a freesize of the max. However, custom
995 // allocations will just have the guard page to free. If we aren't in the
996 // latter case, call into rlimit to determine the current stack size. In
997 // the event that the current limit == max limit then we'll fall down the
998 // fast path, but since it's unlikely that the limit is going to be lowered
999 // after it's been change to the max, we should be fine.
1001 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
1002 // page. So, we can skip all this there.
1003 if (t
== main_thread()) {
1004 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
1006 if (stacksize
+ vm_page_size
!= t
->freesize
) {
1007 // We want to call getrlimit() just once, as it's relatively
1009 static size_t rlimit_stack
;
1011 if (rlimit_stack
== 0) {
1012 struct rlimit limit
;
1013 int ret
= getrlimit(RLIMIT_STACK
, &limit
);
1016 rlimit_stack
= (size_t) limit
.rlim_cur
;
1020 if (rlimit_stack
== 0 || rlimit_stack
> t
->freesize
) {
1023 return round_page(rlimit_stack
);
1027 #endif /* TARGET_OS_OSX */
1029 if (t
== pthread_self() || t
== main_thread()) {
1030 size
= t
->stackaddr
- t
->stackbottom
;;
1034 if (_pthread_validate_thread_and_list_lock(t
)) {
1035 size
= t
->stackaddr
- t
->stackbottom
;;
1036 _PTHREAD_UNLOCK(_pthread_list_lock
);
1040 // <rdar://problem/42588315> binary compatibility issues force us to return
1041 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1042 return size
? size
: DEFAULT_STACK_SIZE
;
1045 PTHREAD_NOEXPORT_VARIANT
1047 pthread_get_stackaddr_np(pthread_t t
)
1049 // since the main thread will not get de-allocated from underneath us
1050 if (t
== pthread_self() || t
== main_thread()) {
1051 return t
->stackaddr
;
1054 if (!_pthread_validate_thread_and_list_lock(t
)) {
1055 return (void *)(uintptr_t)ESRCH
; // XXX bug?
1058 void *addr
= t
->stackaddr
;
1059 _PTHREAD_UNLOCK(_pthread_list_lock
);
1065 _pthread_reply_port(pthread_t t
)
1069 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
);
1071 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
];
1073 return (mach_port_t
)(uintptr_t)p
;
1077 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
)
1079 void *p
= (void *)(uintptr_t)reply_port
;
1081 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
);
1083 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
;
1088 _pthread_dealloc_reply_port(pthread_t t
)
1090 mach_port_t reply_port
= _pthread_reply_port(t
);
1091 if (reply_port
!= MACH_PORT_NULL
) {
1092 mig_dealloc_reply_port(reply_port
);
1097 _pthread_special_reply_port(pthread_t t
)
1101 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
);
1103 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
];
1105 return (mach_port_t
)(uintptr_t)p
;
1109 _pthread_dealloc_special_reply_port(pthread_t t
)
1111 mach_port_t special_reply_port
= _pthread_special_reply_port(t
);
1112 if (special_reply_port
!= MACH_PORT_NULL
) {
1113 thread_destruct_special_reply_port(special_reply_port
,
1114 THREAD_SPECIAL_REPLY_PORT_ALL
);
1119 pthread_main_thread_np(void)
1121 return main_thread();
1124 /* returns non-zero if the current thread is the main thread */
1126 pthread_main_np(void)
1128 return pthread_self() == main_thread();
1133 _pthread_threadid_slow(pthread_t thread
, uint64_t *thread_id
)
1135 unsigned int info_count
= THREAD_IDENTIFIER_INFO_COUNT
;
1136 mach_port_t thport
= _pthread_kernel_thread(thread
);
1137 struct thread_identifier_info info
;
1140 kr
= thread_info(thport
, THREAD_IDENTIFIER_INFO
,
1141 (thread_info_t
)&info
, &info_count
);
1142 if (kr
== KERN_SUCCESS
&& info
.thread_id
) {
1143 *thread_id
= info
.thread_id
;
1144 os_atomic_store(&thread
->thread_id
, info
.thread_id
, relaxed
);
1151 * if we are passed in a pthread_t that is NULL, then we return the current
1152 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1153 * us doing it, if they just want their thread_id.
1155 PTHREAD_NOEXPORT_VARIANT
1157 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
1160 pthread_t self
= pthread_self();
1162 if (thread_id
== NULL
) {
1166 if (thread
== NULL
|| thread
== self
) {
1167 *thread_id
= self
->thread_id
;
1168 } else if (!_pthread_validate_thread_and_list_lock(thread
)) {
1171 *thread_id
= os_atomic_load(&thread
->thread_id
, relaxed
);
1172 if (os_unlikely(*thread_id
== 0)) {
1173 // there is a race at init because the thread sets its own TID.
1174 // correct this by asking mach
1175 res
= _pthread_threadid_slow(thread
, thread_id
);
1177 _PTHREAD_UNLOCK(_pthread_list_lock
);
1182 PTHREAD_NOEXPORT_VARIANT
1184 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1186 if (thread
== pthread_self()) {
1187 strlcpy(threadname
, thread
->pthread_name
, len
);
1191 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1195 strlcpy(threadname
, thread
->pthread_name
, len
);
1196 _PTHREAD_UNLOCK(_pthread_list_lock
);
1202 pthread_setname_np(const char *name
)
1205 pthread_t self
= pthread_self();
1212 /* protytype is in pthread_internals.h */
1213 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
1216 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
1218 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
1225 PTHREAD_ALWAYS_INLINE
1227 __pthread_add_thread(pthread_t t
, bool from_mach_thread
)
1229 if (from_mach_thread
) {
1230 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1232 _PTHREAD_LOCK(_pthread_list_lock
);
1235 TAILQ_INSERT_TAIL(&__pthread_head
, t
, tl_plist
);
1238 if (from_mach_thread
) {
1239 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1241 _PTHREAD_UNLOCK(_pthread_list_lock
);
1244 if (!from_mach_thread
) {
1245 // PR-26275485: Mach threads will likely crash trying to run
1246 // introspection code. Since the fall out from the introspection
1247 // code not seeing the injected thread is likely less than crashing
1248 // in the introspection code, just don't make the call.
1249 _pthread_introspection_thread_create(t
);
1253 PTHREAD_ALWAYS_INLINE
1255 __pthread_undo_add_thread(pthread_t t
, bool from_mach_thread
)
1257 if (from_mach_thread
) {
1258 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1260 _PTHREAD_LOCK(_pthread_list_lock
);
1263 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
1266 if (from_mach_thread
) {
1267 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1269 _PTHREAD_UNLOCK(_pthread_list_lock
);
1273 PTHREAD_ALWAYS_INLINE
1275 __pthread_started_thread(pthread_t t
)
1277 mach_port_t kport
= _pthread_kernel_thread(t
);
1278 if (os_unlikely(!MACH_PORT_VALID(kport
))) {
1279 PTHREAD_CLIENT_CRASH(kport
,
1280 "Unable to allocate thread port, possible port leak");
1282 _pthread_introspection_thread_start(t
);
1285 #define _PTHREAD_CREATE_NONE 0x0
1286 #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1287 #define _PTHREAD_CREATE_SUSPENDED 0x2
1290 _pthread_create(pthread_t
*thread
, const pthread_attr_t
*attrs
,
1291 void *(*start_routine
)(void *), void *arg
, unsigned int create_flags
)
1295 bool from_mach_thread
= (create_flags
& _PTHREAD_CREATE_FROM_MACH_THREAD
);
1297 if (attrs
== NULL
) {
1298 attrs
= &_pthread_attr_default
;
1299 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1303 unsigned int flags
= PTHREAD_START_CUSTOM
;
1304 if (attrs
->schedset
!= 0) {
1305 struct sched_param p
;
1306 _pthread_attr_get_schedparam(attrs
, &p
);
1307 flags
|= PTHREAD_START_SETSCHED
;
1308 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1309 flags
|= (p
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1310 } else if (attrs
->qosclass
!= 0) {
1311 flags
|= PTHREAD_START_QOSCLASS
;
1312 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1314 if (create_flags
& _PTHREAD_CREATE_SUSPENDED
) {
1315 flags
|= PTHREAD_START_SUSPENDED
;
1320 t
=_pthread_allocate(attrs
, &stack
, from_mach_thread
);
1326 t
->fun
= start_routine
;
1327 __pthread_add_thread(t
, from_mach_thread
);
1329 if (__bsdthread_create(start_routine
, arg
, stack
, t
, flags
) ==
1331 if (errno
== EMFILE
) {
1332 PTHREAD_CLIENT_CRASH(0,
1333 "Unable to allocate thread port, possible port leak");
1335 __pthread_undo_add_thread(t
, from_mach_thread
);
1336 _pthread_deallocate(t
, from_mach_thread
);
1340 // n.b. if a thread is created detached and exits, t will be invalid
1346 pthread_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
1347 void *(*start_routine
)(void *), void *arg
)
1349 unsigned int flags
= _PTHREAD_CREATE_NONE
;
1350 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1354 pthread_create_from_mach_thread(pthread_t
*thread
, const pthread_attr_t
*attr
,
1355 void *(*start_routine
)(void *), void *arg
)
1357 unsigned int flags
= _PTHREAD_CREATE_FROM_MACH_THREAD
;
1358 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1362 pthread_create_suspended_np(pthread_t
*thread
, const pthread_attr_t
*attr
,
1363 void *(*start_routine
)(void *), void *arg
)
1365 unsigned int flags
= _PTHREAD_CREATE_SUSPENDED
;
1366 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1370 PTHREAD_NOEXPORT_VARIANT
1372 pthread_detach(pthread_t thread
)
1375 bool join
= false, wake
= false;
1377 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1381 if (!thread
->tl_joinable
) {
1383 } else if (thread
->tl_exit_gate
== MACH_PORT_DEAD
) {
1384 // Join the thread if it's already exited.
1387 thread
->tl_joinable
= false; // _pthread_joiner_prepost_wake uses this
1388 if (thread
->tl_join_ctx
) {
1389 (void)_pthread_joiner_prepost_wake(thread
);
1393 _PTHREAD_UNLOCK(_pthread_list_lock
);
1396 pthread_join(thread
, NULL
);
1398 _pthread_joiner_wake(thread
);
1403 PTHREAD_NOEXPORT_VARIANT
1405 pthread_kill(pthread_t th
, int sig
)
1407 if (sig
< 0 || sig
> NSIG
) {
1411 mach_port_t kport
= MACH_PORT_NULL
;
1413 if (!_pthread_is_valid(th
, &kport
)) {
1418 int ret
= __pthread_kill(kport
, sig
);
1426 PTHREAD_NOEXPORT_VARIANT
1428 __pthread_workqueue_setkill(int enable
)
1431 return __bsdthread_ctl(BSDTHREAD_CTL_WORKQ_ALLOW_KILL
, enable
, 0, 0);
1436 /* For compatibility... */
1441 return pthread_self();
1445 * Terminate a thread.
1447 extern int __disable_threadsignal(int);
1451 _pthread_exit(pthread_t self
, void *exit_value
)
1453 struct __darwin_pthread_handler_rec
*handler
;
1455 // Disable signal delivery while we clean up
1456 __disable_threadsignal(1);
1458 // Set cancel state to disable and type to deferred
1459 _pthread_setcancelstate_exit(self
, exit_value
);
1461 while ((handler
= self
->__cleanup_stack
) != 0) {
1462 (handler
->__routine
)(handler
->__arg
);
1463 self
->__cleanup_stack
= handler
->__next
;
1465 _pthread_tsd_cleanup(self
);
1467 // Clear per-thread semaphore cache
1468 os_put_cached_semaphore(SEMAPHORE_NULL
);
1470 _pthread_terminate_invoke(self
, exit_value
);
1474 pthread_exit(void *exit_value
)
1476 pthread_t self
= pthread_self();
1477 if (os_unlikely(self
->wqthread
)) {
1478 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1479 "not created by pthread_create()");
1481 _pthread_exit(self
, exit_value
);
1485 PTHREAD_NOEXPORT_VARIANT
1487 pthread_getschedparam(pthread_t thread
, int *policy
, struct sched_param
*param
)
1489 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1493 if (policy
) *policy
= thread
->tl_policy
;
1494 if (param
) *param
= thread
->tl_param
;
1495 _PTHREAD_UNLOCK(_pthread_list_lock
);
1501 PTHREAD_ALWAYS_INLINE
1503 pthread_setschedparam_internal(pthread_t thread
, mach_port_t kport
, int policy
,
1504 const struct sched_param
*param
)
1506 policy_base_data_t bases
;
1508 mach_msg_type_number_t count
;
1513 bases
.ts
.base_priority
= param
->sched_priority
;
1514 base
= (policy_base_t
)&bases
.ts
;
1515 count
= POLICY_TIMESHARE_BASE_COUNT
;
1518 bases
.fifo
.base_priority
= param
->sched_priority
;
1519 base
= (policy_base_t
)&bases
.fifo
;
1520 count
= POLICY_FIFO_BASE_COUNT
;
1523 bases
.rr
.base_priority
= param
->sched_priority
;
1524 /* quantum isn't public yet */
1525 bases
.rr
.quantum
= param
->quantum
;
1526 base
= (policy_base_t
)&bases
.rr
;
1527 count
= POLICY_RR_BASE_COUNT
;
1532 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1533 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1536 PTHREAD_NOEXPORT_VARIANT
1538 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1540 mach_port_t kport
= MACH_PORT_NULL
;
1543 // since the main thread will not get de-allocated from underneath us
1544 if (t
== pthread_self() || t
== main_thread()) {
1545 kport
= _pthread_kernel_thread(t
);
1548 if (!_pthread_is_valid(t
, &kport
)) {
1553 int res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1554 if (res
) return res
;
1557 _PTHREAD_LOCK(_pthread_list_lock
);
1558 } else if (!_pthread_validate_thread_and_list_lock(t
)) {
1559 // Ensure the thread is still valid.
1563 t
->tl_policy
= policy
;
1564 t
->tl_param
= *param
;
1565 _PTHREAD_UNLOCK(_pthread_list_lock
);
1571 sched_get_priority_min(int policy
)
1573 return default_priority
- 16;
1577 sched_get_priority_max(int policy
)
1579 return default_priority
+ 16;
1583 pthread_equal(pthread_t t1
, pthread_t t2
)
1589 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1590 * then _pthread_set_self won't be bound when secondary threads try and start up.
1594 _pthread_set_self(pthread_t p
)
1597 if (os_likely(!p
)) {
1598 return _pthread_set_self_dyld();
1600 #endif // VARIANT_DYLD
1601 _pthread_set_self_internal(p
);
1602 _thread_set_tsd_base(&p
->tsd
[0]);
1606 // _pthread_set_self_dyld is noinline+noexport to allow the option for
1607 // static libsyscall to adopt this as the entry point from mach_init if
1609 PTHREAD_NOINLINE PTHREAD_NOEXPORT
1611 _pthread_set_self_dyld(void)
1613 pthread_t p
= main_thread();
1614 p
->thread_id
= __thread_selfid();
1616 if (os_unlikely(p
->thread_id
== -1ull)) {
1617 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1620 // <rdar://problem/40930651> pthread self and the errno address are the
1621 // bare minimium TSD setup that dyld needs to actually function. Without
1622 // this, TSD access will fail and crash if it uses bits of Libc prior to
1623 // library initialization. __pthread_init will finish the initialization
1624 // during library init.
1625 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
;
1626 p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
;
1627 _thread_set_tsd_base(&p
->tsd
[0]);
1629 #endif // VARIANT_DYLD
1631 PTHREAD_ALWAYS_INLINE
1633 _pthread_set_self_internal(pthread_t p
)
1635 os_atomic_store(&p
->thread_id
, __thread_selfid(), relaxed
);
1637 if (os_unlikely(p
->thread_id
== -1ull)) {
1638 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1643 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1644 PTHREAD_ALWAYS_INLINE
1646 _os_once_acquire(os_once_t
*predicate
, void *context
, os_function_t function
)
1648 if (OS_EXPECT(os_atomic_load(predicate
, acquire
), ~0l) != ~0l) {
1649 _os_once(predicate
, context
, function
);
1650 OS_COMPILER_CAN_ASSUME(*predicate
== ~0l);
1654 struct _pthread_once_context
{
1655 pthread_once_t
*pthread_once
;
1656 void (*routine
)(void);
1660 __pthread_once_handler(void *context
)
1662 struct _pthread_once_context
*ctx
= context
;
1663 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1665 pthread_cleanup_pop(0);
1666 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1669 PTHREAD_NOEXPORT_VARIANT
1671 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1673 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1675 _os_once_acquire(&once_control
->once
, &ctx
, __pthread_once_handler
);
1676 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1682 pthread_getconcurrency(void)
1684 return pthread_concurrency
;
1688 pthread_setconcurrency(int new_level
)
1690 if (new_level
< 0) {
1693 pthread_concurrency
= new_level
;
1697 #if !defined(VARIANT_STATIC)
1701 if (_pthread_malloc
) {
1702 return _pthread_malloc(sz
);
1711 if (_pthread_free
) {
1715 #endif // VARIANT_STATIC
1718 * Perform package initialization - called automatically when application starts
1720 struct ProgramVars
; /* forward reference */
1723 static unsigned long
1724 _pthread_strtoul(const char *p
, const char **endptr
, int base
)
1728 // Expect hex string starting with "0x"
1729 if ((base
== 16 || base
== 0) && p
&& p
[0] == '0' && p
[1] == 'x') {
1733 if ('0' <= c
&& c
<= '9') {
1734 val
= (val
<< 4) + (c
- '0');
1735 } else if ('a' <= c
&& c
<= 'f') {
1736 val
= (val
<< 4) + (c
- 'a' + 10);
1737 } else if ('A' <= c
&& c
<= 'F') {
1738 val
= (val
<< 4) + (c
- 'A' + 10);
1746 *endptr
= (char *)p
;
1751 parse_main_stack_params(const char *apple
[],
1757 const char *p
= _simple_getenv(apple
, "main_stack");
1763 *stackaddr
= _pthread_strtoul(s
, &s
, 16);
1764 if (*s
!= ',') goto out
;
1766 *stacksize
= _pthread_strtoul(s
+ 1, &s
, 16);
1767 if (*s
!= ',') goto out
;
1769 *allocaddr
= _pthread_strtoul(s
+ 1, &s
, 16);
1770 if (*s
!= ',') goto out
;
1772 *allocsize
= _pthread_strtoul(s
+ 1, &s
, 16);
1773 if (*s
!= ',' && *s
!= 0) goto out
;
1777 bzero((char *)p
, strlen(p
));
1782 parse_ptr_munge_params(const char *envp
[], const char *apple
[])
1785 p
= _simple_getenv(apple
, "ptr_munge");
1787 _pthread_ptr_munge_token
= _pthread_strtoul(p
, &s
, 16);
1788 bzero((char *)p
, strlen(p
));
1791 if (_pthread_ptr_munge_token
) return;
1793 p
= _simple_getenv(envp
, "PTHREAD_PTR_MUNGE_TOKEN");
1795 uintptr_t t
= _pthread_strtoul(p
, &s
, 16);
1796 if (t
) _pthread_ptr_munge_token
= t
;
1801 __pthread_init(const struct _libpthread_functions
*pthread_funcs
,
1802 const char *envp
[], const char *apple
[],
1803 const struct ProgramVars
*vars __unused
)
1805 // Save our provided pushed-down functions
1806 if (pthread_funcs
) {
1807 exitf
= pthread_funcs
->exit
;
1809 if (pthread_funcs
->version
>= 2) {
1810 _pthread_malloc
= pthread_funcs
->malloc
;
1811 _pthread_free
= pthread_funcs
->free
;
1816 // Get host information
1820 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1821 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1822 host_priority_info_data_t priority_info
;
1823 host_t host
= mach_host_self();
1824 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1825 if (kr
!= KERN_SUCCESS
) {
1826 PTHREAD_INTERNAL_CRASH(kr
, "host_info() failed");
1828 default_priority
= (uint8_t)priority_info
.user_priority
;
1829 min_priority
= (uint8_t)priority_info
.minimum_priority
;
1830 max_priority
= (uint8_t)priority_info
.maximum_priority
;
1832 mach_port_deallocate(mach_task_self(), host
);
1835 // Set up the main thread structure
1838 // Get the address and size of the main thread's stack from the kernel.
1839 void *stackaddr
= 0;
1840 size_t stacksize
= 0;
1841 void *allocaddr
= 0;
1842 size_t allocsize
= 0;
1843 if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) ||
1844 stackaddr
== NULL
|| stacksize
== 0) {
1845 // Fall back to previous bevhaior.
1846 size_t len
= sizeof(stackaddr
);
1847 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1848 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1849 #if defined(__LP64__)
1850 stackaddr
= (void *)USRSTACK64
;
1852 stackaddr
= (void *)USRSTACK
;
1855 stacksize
= DFLSSIZ
;
1860 // Initialize random ptr_munge token from the kernel.
1861 parse_ptr_munge_params(envp
, apple
);
1863 // libpthread.a in dyld "owns" the main thread structure itself and sets
1864 // up the tsd to point to it. So take the pthread_self() from there
1865 // and make it our main thread point.
1866 pthread_t thread
= (pthread_t
)_pthread_getspecific_direct(
1867 _PTHREAD_TSD_SLOT_PTHREAD_SELF
);
1868 if (os_unlikely(thread
== NULL
)) {
1869 PTHREAD_INTERNAL_CRASH(0, "PTHREAD_SELF TSD not initialized");
1871 _main_thread_ptr
= thread
;
1873 PTHREAD_DEBUG_ASSERT(_pthread_attr_default
.qosclass
==
1874 _pthread_default_priority(0));
1875 _pthread_struct_init(thread
, &_pthread_attr_default
,
1876 stackaddr
, stacksize
, allocaddr
, allocsize
);
1877 thread
->tl_joinable
= true;
1879 // Finish initialization with common code that is reinvoked on the
1880 // child side of a fork.
1882 // Finishes initialization of main thread attributes.
1883 // Initializes the thread list and add the main thread.
1884 // Calls _pthread_set_self() to prepare the main thread for execution.
1885 _pthread_main_thread_init(thread
);
1887 struct _pthread_registration_data registration_data
;
1888 // Set up kernel entry points with __bsdthread_register.
1889 _pthread_bsdthread_init(®istration_data
);
1891 // Have pthread_key and pthread_mutex do their init envvar checks.
1892 _pthread_key_global_init(envp
);
1893 _pthread_mutex_global_init(envp
, ®istration_data
);
1895 #if PTHREAD_DEBUG_LOG
1896 _SIMPLE_STRING path
= _simple_salloc();
1897 _simple_sprintf(path
, "/var/tmp/libpthread.%d.log", getpid());
1898 _pthread_debuglog
= open(_simple_string(path
),
1899 O_WRONLY
| O_APPEND
| O_CREAT
| O_NOFOLLOW
| O_CLOEXEC
, 0666);
1900 _simple_sfree(path
);
1901 _pthread_debugstart
= mach_absolute_time();
1906 #endif // !VARIANT_DYLD
1908 PTHREAD_NOEXPORT
void
1909 _pthread_main_thread_init(pthread_t p
)
1911 TAILQ_INIT(&__pthread_head
);
1912 _PTHREAD_LOCK_INIT(_pthread_list_lock
);
1913 _PTHREAD_LOCK_INIT(p
->lock
);
1914 _pthread_set_kernel_thread(p
, mach_thread_self());
1915 _pthread_set_reply_port(p
, mach_reply_port());
1916 p
->__cleanup_stack
= NULL
;
1917 p
->tl_join_ctx
= NULL
;
1918 p
->tl_exit_gate
= MACH_PORT_NULL
;
1919 p
->tsd
[__TSD_SEMAPHORE_CACHE
] = (void*)(uintptr_t)SEMAPHORE_NULL
;
1920 p
->tsd
[__TSD_MACH_SPECIAL_REPLY
] = 0;
1922 // Initialize the list of threads with the new main thread.
1923 TAILQ_INSERT_HEAD(&__pthread_head
, p
, tl_plist
);
1926 _pthread_introspection_thread_start(p
);
1931 _pthread_main_thread_postfork_init(pthread_t p
)
1933 _pthread_main_thread_init(p
);
1934 _pthread_set_self_internal(p
);
1952 pthread_yield_np(void)
1957 // Libsystem knows about this symbol and exports it to libsyscall
1959 pthread_current_stack_contains_np(const void *addr
, size_t length
)
1961 uintptr_t begin
= (uintptr_t) addr
, end
;
1962 uintptr_t stack_base
= (uintptr_t) _pthread_self_direct()->stackbottom
;
1963 uintptr_t stack_top
= (uintptr_t) _pthread_self_direct()->stackaddr
;
1965 if (stack_base
== stack_top
) {
1969 if (__builtin_add_overflow(begin
, length
, &end
)) {
1973 return stack_base
<= begin
&& end
<= stack_top
;
1978 // Libsystem knows about this symbol and exports it to libsyscall
1979 PTHREAD_NOEXPORT_VARIANT
1981 _pthread_clear_qos_tsd(mach_port_t thread_port
)
1983 if (thread_port
== MACH_PORT_NULL
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) {
1984 /* Clear the current thread's TSD, that can be done inline. */
1985 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
1986 _pthread_unspecified_priority());
1990 _PTHREAD_LOCK(_pthread_list_lock
);
1992 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
1993 mach_port_t kp
= _pthread_kernel_thread(p
);
1994 if (thread_port
== kp
) {
1995 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] =
1996 _pthread_unspecified_priority();
2001 _PTHREAD_UNLOCK(_pthread_list_lock
);
2006 #pragma mark pthread/stack_np.h public interface
2009 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
2010 #if __ARM64_ARCH_8_32__
2012 * arm64_32 uses 64-bit sizes for the frame pointer and
2013 * return address of a stack frame.
2015 typedef uint64_t frame_data_addr_t
;
2017 typedef uintptr_t frame_data_addr_t
;
2021 frame_data_addr_t frame_addr_next
;
2022 frame_data_addr_t ret_addr
;
2025 #error ********** Unimplemented architecture
2029 pthread_stack_frame_decode_np(uintptr_t frame_addr
, uintptr_t *return_addr
)
2031 struct frame_data
*frame
= (struct frame_data
*)frame_addr
;
2034 #if __has_feature(ptrauth_calls)
2035 *return_addr
= (uintptr_t)ptrauth_strip((void *)frame
->ret_addr
,
2036 ptrauth_key_return_address
);
2038 *return_addr
= (uintptr_t)frame
->ret_addr
;
2039 #endif /* __has_feature(ptrauth_calls) */
2042 #if __has_feature(ptrauth_calls)
2043 return (uintptr_t)ptrauth_strip((void *)frame
->frame_addr_next
,
2044 ptrauth_key_frame_pointer
);
2045 #endif /* __has_feature(ptrauth_calls) */
2046 return (uintptr_t)frame
->frame_addr_next
;
2050 #pragma mark pthread workqueue support routines
2053 PTHREAD_NOEXPORT
void
2054 _pthread_bsdthread_init(struct _pthread_registration_data
*data
)
2056 bzero(data
, sizeof(*data
));
2057 data
->version
= sizeof(struct _pthread_registration_data
);
2058 data
->dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *);
2059 data
->return_to_kernel_offset
= __TSD_RETURN_TO_KERNEL
* sizeof(void *);
2060 data
->tsd_offset
= offsetof(struct _pthread
, tsd
);
2061 data
->mach_thread_self_offset
= __TSD_MACH_THREAD_SELF
* sizeof(void *);
2063 int rv
= __bsdthread_register(thread_start
, start_wqthread
, (int)PTHREAD_SIZE
,
2064 (void*)data
, (uintptr_t)sizeof(*data
), data
->dispatch_queue_offset
);
2067 int required_features
=
2068 PTHREAD_FEATURE_FINEPRIO
|
2069 PTHREAD_FEATURE_BSDTHREADCTL
|
2070 PTHREAD_FEATURE_SETSELF
|
2071 PTHREAD_FEATURE_QOS_MAINTENANCE
|
2072 PTHREAD_FEATURE_QOS_DEFAULT
;
2073 if ((rv
& required_features
) != required_features
) {
2074 PTHREAD_INTERNAL_CRASH(rv
, "Missing required kernel support");
2076 __pthread_supported_features
= rv
;
2080 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2081 * effect of resetting the child's stack_addr_hint before bailing out) and
2082 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2083 * the latter as fatal.
2085 * <rdar://problem/36451838>
2088 pthread_priority_t main_qos
= (pthread_priority_t
)data
->main_qos
;
2090 if (_pthread_priority_thread_qos(main_qos
) != THREAD_QOS_UNSPECIFIED
) {
2091 _pthread_set_main_qos(main_qos
);
2092 main_thread()->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = main_qos
;
2095 if (data
->stack_addr_hint
) {
2096 __pthread_stack_hint
= data
->stack_addr_hint
;
2099 if (__libdispatch_workerfunction
!= NULL
) {
2100 // prepare the kernel for workq action
2101 (void)__workq_open();
2107 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp
)
2109 /* Old thread priorities are inverted from where we have them in
2110 * the new flexible priority scheme. The highest priority is zero,
2111 * up to 2, with background at 3.
2113 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2114 bool overcommit
= (pp
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
);
2115 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2117 switch (_pthread_priority_thread_qos(pp
)) {
2118 case THREAD_QOS_USER_INITIATED
:
2119 return (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
2120 case THREAD_QOS_LEGACY
:
2121 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2122 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2123 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2125 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
2126 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED
, 0, 0));
2127 return (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
2128 case THREAD_QOS_UTILITY
:
2129 return (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
2130 case THREAD_QOS_BACKGROUND
:
2131 return (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
2133 PTHREAD_INTERNAL_CRASH(pp
, "Invalid pthread priority for the legacy interface");
2136 PTHREAD_ALWAYS_INLINE
2137 static inline pthread_priority_t
2138 _pthread_wqthread_priority(int flags
)
2140 pthread_priority_t pp
= 0;
2143 if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2144 pp
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2146 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
) {
2147 return pp
| _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
2150 if (flags
& WQ_FLAG_THREAD_OVERCOMMIT
) {
2151 pp
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2153 if (flags
& WQ_FLAG_THREAD_PRIO_QOS
) {
2154 qos
= (thread_qos_t
)(flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2155 pp
= _pthread_priority_make_from_thread_qos(qos
, 0, pp
);
2156 } else if (flags
& WQ_FLAG_THREAD_PRIO_SCHED
) {
2157 pp
|= _PTHREAD_PRIORITY_SCHED_PRI_MASK
;
2158 pp
|= (flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2160 PTHREAD_INTERNAL_CRASH(flags
, "Missing priority");
2167 _pthread_wqthread_setup(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2170 void *stackaddr
= self
;
2171 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
2173 _pthread_struct_init(self
, &_pthread_attr_default
, stackaddr
, stacksize
,
2174 PTHREAD_ALLOCADDR(stackaddr
, stacksize
),
2175 PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
2177 _pthread_set_kernel_thread(self
, kport
);
2179 self
->wqkillset
= 0;
2180 self
->tl_joinable
= false;
2182 // Update the running thread count and set childrun bit.
2183 if (os_unlikely((flags
& WQ_FLAG_THREAD_TSD_BASE_SET
) == 0)) {
2184 PTHREAD_INTERNAL_CRASH(flags
,
2185 "thread_set_tsd_base() wasn't called by the kernel");
2187 _pthread_set_self_internal(self
);
2188 __pthread_add_thread(self
, false);
2189 __pthread_started_thread(self
);
2192 PTHREAD_NORETURN PTHREAD_NOINLINE
2194 _pthread_wqthread_exit(pthread_t self
)
2196 pthread_priority_t pp
;
2199 pp
= (pthread_priority_t
)self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
];
2200 qos
= _pthread_priority_thread_qos(pp
);
2201 if (qos
== THREAD_QOS_UNSPECIFIED
|| qos
> WORKQ_THREAD_QOS_CLEANUP
) {
2202 // Reset QoS to something low for the cleanup process
2203 pp
= _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP
, 0, 0);
2204 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2207 _pthread_exit(self
, NULL
);
2210 // workqueue entry point from kernel
2212 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2213 void *keventlist
, int flags
, int nkevents
)
2215 if ((flags
& WQ_FLAG_THREAD_REUSE
) == 0) {
2216 _pthread_wqthread_setup(self
, kport
, stacklowaddr
, flags
);
2219 pthread_priority_t pp
;
2221 if (flags
& WQ_FLAG_THREAD_OUTSIDEQOS
) {
2222 self
->wq_outsideqos
= 1;
2223 pp
= _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY
, 0,
2224 _PTHREAD_PRIORITY_FALLBACK_FLAG
);
2226 self
->wq_outsideqos
= 0;
2227 pp
= _pthread_wqthread_priority(flags
);
2230 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2232 // avoid spills on the stack hard to keep used stack space minimal
2233 if (os_unlikely(nkevents
== WORKQ_EXIT_THREAD_NKEVENT
)) {
2234 _pthread_wqthread_exit(self
);
2235 } else if (flags
& WQ_FLAG_THREAD_WORKLOOP
) {
2236 kqueue_id_t
*kqidptr
= (kqueue_id_t
*)keventlist
- 1;
2237 self
->fun
= (void *(*)(void*))__libdispatch_workloopfunction
;
2238 self
->arg
= keventlist
;
2239 self
->wq_nevents
= nkevents
;
2240 (*__libdispatch_workloopfunction
)(kqidptr
, &self
->arg
, &self
->wq_nevents
);
2241 __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN
, self
->arg
, self
->wq_nevents
, 0);
2242 } else if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2243 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
2244 self
->arg
= keventlist
;
2245 self
->wq_nevents
= nkevents
;
2246 (*__libdispatch_keventfunction
)(&self
->arg
, &self
->wq_nevents
);
2247 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, self
->arg
, self
->wq_nevents
, 0);
2249 self
->fun
= (void *(*)(void*))__libdispatch_workerfunction
;
2250 self
->arg
= (void *)(uintptr_t)pp
;
2251 self
->wq_nevents
= 0;
2252 if (os_likely(__workq_newapi
)) {
2253 (*__libdispatch_workerfunction
)(pp
);
2255 _pthread_wqthread_legacy_worker_wrap(pp
);
2257 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
2260 _os_set_crash_log_cause_and_message(self
->err_no
,
2261 "BUG IN LIBPTHREAD: __workq_kernreturn returned");
2263 * 52858993: we should never return but the compiler insists on outlining,
2264 * so the __builtin_trap() is in _start_wqthread in pthread_asm.s
2269 #pragma mark pthread workqueue API for libdispatch
2272 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN
== WQ_KEVENT_LIST_LEN
,
2273 "Kernel and userland should agree on the event list size");
2276 pthread_workqueue_setdispatchoffset_np(int offset
)
2278 __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, offset
, 0x00);
2282 pthread_workqueue_setup(struct pthread_workqueue_config
*cfg
, size_t cfg_size
)
2285 struct workq_dispatch_config wdc_cfg
;
2286 size_t min_size
= 0;
2288 if (cfg_size
< sizeof(uint32_t)) {
2292 switch (cfg
->version
) {
2294 min_size
= offsetof(struct pthread_workqueue_config
, queue_label_offs
);
2297 min_size
= sizeof(struct pthread_workqueue_config
);
2303 if (!cfg
|| cfg_size
< min_size
) {
2307 if (cfg
->flags
& ~PTHREAD_WORKQUEUE_CONFIG_SUPPORTED_FLAGS
||
2308 cfg
->version
< PTHREAD_WORKQUEUE_CONFIG_MIN_SUPPORTED_VERSION
) {
2312 if (__libdispatch_workerfunction
== NULL
) {
2313 __workq_newapi
= true;
2315 wdc_cfg
.wdc_version
= WORKQ_DISPATCH_CONFIG_VERSION
;
2316 wdc_cfg
.wdc_flags
= 0;
2317 wdc_cfg
.wdc_queue_serialno_offs
= cfg
->queue_serialno_offs
;
2318 #if WORKQ_DISPATCH_CONFIG_VERSION >= 2
2319 wdc_cfg
.wdc_queue_label_offs
= cfg
->queue_label_offs
;
2322 // Tell the kernel about dispatch internals
2323 rv
= (int) __workq_kernreturn(WQOPS_SETUP_DISPATCH
, &wdc_cfg
, sizeof(wdc_cfg
), 0);
2327 __libdispatch_keventfunction
= cfg
->kevent_cb
;
2328 __libdispatch_workloopfunction
= cfg
->workloop_cb
;
2329 __libdispatch_workerfunction
= cfg
->workq_cb
;
2331 // Prepare the kernel for workq action
2332 (void)__workq_open();
2333 if (__is_threaded
== 0) {
2345 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func
,
2346 pthread_workqueue_function_kevent_t kevent_func
,
2347 pthread_workqueue_function_workloop_t workloop_func
,
2348 int offset
, int flags
)
2350 struct pthread_workqueue_config cfg
= {
2351 .version
= PTHREAD_WORKQUEUE_CONFIG_VERSION
,
2353 .workq_cb
= queue_func
,
2354 .kevent_cb
= kevent_func
,
2355 .workloop_cb
= workloop_func
,
2356 .queue_serialno_offs
= offset
,
2357 .queue_label_offs
= 0,
2360 return pthread_workqueue_setup(&cfg
, sizeof(cfg
));
2364 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
,
2365 pthread_workqueue_function_kevent_t kevent_func
,
2366 int offset
, int flags
)
2368 return _pthread_workqueue_init_with_workloop(queue_func
, kevent_func
, NULL
, offset
, flags
);
2372 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2374 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2378 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2380 struct pthread_workqueue_config cfg
= {
2381 .version
= PTHREAD_WORKQUEUE_CONFIG_VERSION
,
2383 .workq_cb
= (uint64_t)(pthread_workqueue_function2_t
)worker_func
,
2386 .queue_serialno_offs
= 0,
2387 .queue_label_offs
= 0,
2390 return pthread_workqueue_setup(&cfg
, sizeof(cfg
));
2394 _pthread_workqueue_supported(void)
2396 if (os_unlikely(!__pthread_supported_features
)) {
2397 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2400 return __pthread_supported_features
;
2404 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2408 // Cannot add threads without a worker function registered.
2409 if (__libdispatch_workerfunction
== NULL
) {
2413 pthread_priority_t kp
= 0;
2414 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIO_MASK
;
2417 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2418 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2421 #pragma clang diagnostic push
2422 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2423 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2424 #pragma clang diagnostic pop
2426 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2434 _pthread_workqueue_should_narrow(pthread_priority_t pri
)
2436 int res
= __workq_kernreturn(WQOPS_SHOULD_NARROW
, NULL
, (int)pri
, 0);
2444 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2448 if (__libdispatch_workerfunction
== NULL
) {
2453 // <rdar://problem/37687655> Legacy simulators fail to boot
2455 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2456 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2457 // validates and rejects.
2459 // As a workaround, forcefully unset this bit that cannot be set here
2461 priority
&= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG
;
2464 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2472 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2474 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2482 _pthread_workloop_create(uint64_t workloop_id
, uint64_t options
, pthread_attr_t
*attr
)
2484 struct kqueue_workloop_params params
= {
2485 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2486 .kqwlp_id
= workloop_id
,
2494 if (attr
->schedset
) {
2495 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_PRI
;
2496 params
.kqwlp_sched_pri
= attr
->param
.sched_priority
;
2499 if (attr
->policyset
) {
2500 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_POL
;
2501 params
.kqwlp_sched_pol
= attr
->policy
;
2504 if (attr
->cpupercentset
) {
2505 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_CPU_PERCENT
;
2506 params
.kqwlp_cpu_percent
= attr
->cpupercent
;
2507 params
.kqwlp_cpu_refillms
= attr
->refillms
;
2510 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE
, 0, ¶ms
,
2519 _pthread_workloop_destroy(uint64_t workloop_id
)
2521 struct kqueue_workloop_params params
= {
2522 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2523 .kqwlp_id
= workloop_id
,
2526 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY
, 0, ¶ms
,
2535 #pragma mark Introspection SPI for libpthread.
2538 static pthread_introspection_hook_t _pthread_introspection_hook
;
2540 pthread_introspection_hook_t
2541 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2543 pthread_introspection_hook_t prev
;
2544 prev
= _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook
, hook
);
2550 _pthread_introspection_hook_callout_thread_create(pthread_t t
)
2552 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2557 _pthread_introspection_thread_create(pthread_t t
)
2559 if (os_fastpath(!_pthread_introspection_hook
)) return;
2560 _pthread_introspection_hook_callout_thread_create(t
);
2565 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2569 if (t
== main_thread()) {
2570 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2571 freesize
= stacksize
+ t
->guardsize
;
2572 freeaddr
= t
->stackaddr
- freesize
;
2574 freesize
= t
->freesize
- PTHREAD_SIZE
;
2575 freeaddr
= t
->freeaddr
;
2577 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2578 freeaddr
, freesize
);
2582 _pthread_introspection_thread_start(pthread_t t
)
2584 if (os_fastpath(!_pthread_introspection_hook
)) return;
2585 _pthread_introspection_hook_callout_thread_start(t
);
2590 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
)
2594 if (t
== main_thread()) {
2595 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2596 freesize
= stacksize
+ t
->guardsize
;
2597 freeaddr
= t
->stackaddr
- freesize
;
2599 freesize
= t
->freesize
- PTHREAD_SIZE
;
2600 freeaddr
= t
->freeaddr
;
2602 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2603 freeaddr
, freesize
);
2607 _pthread_introspection_thread_terminate(pthread_t t
)
2609 if (os_fastpath(!_pthread_introspection_hook
)) return;
2610 _pthread_introspection_hook_callout_thread_terminate(t
);
2615 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2617 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2622 _pthread_introspection_thread_destroy(pthread_t t
)
2624 if (os_fastpath(!_pthread_introspection_hook
)) return;
2625 _pthread_introspection_hook_callout_thread_destroy(t
);
2630 #pragma mark libplatform shims
2632 #include <platform/string.h>
2634 // pthread_setup initializes large structures to 0,
2635 // which the compiler turns into a library call to memset.
2637 // To avoid linking against Libc, provide a simple wrapper
2638 // that calls through to the libplatform primitives
2643 memset(void *b
, int c
, size_t len
)
2645 return _platform_memset(b
, c
, len
);
2651 bzero(void *s
, size_t n
)
2653 _platform_bzero(s
, n
);
2659 memcpy(void* a
, const void* b
, unsigned long s
)
2661 return _platform_memmove(a
, b
, s
);
2664 #endif // !VARIANT_DYLD