2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
59 #include "pthread/stack_np.h"
60 #include "offsets.h" // included to validate the offsets at build time
66 #include <mach/mach_init.h>
67 #include <mach/mach_vm.h>
68 #include <mach/mach_sync_ipc.h>
70 #include <sys/resource.h>
71 #include <sys/sysctl.h>
72 #include <sys/queue.h>
73 #include <sys/ulock.h>
75 #include <machine/vmparam.h>
76 #define __APPLE_API_PRIVATE
77 #include <machine/cpu_capabilities.h>
78 #if __has_include(<ptrauth.h>)
80 #endif // __has_include(<ptrauth.h>)
83 #include <platform/string.h>
84 #include <platform/compat.h>
86 #include <stack_logging.h>
88 // Defined in libsyscall; initialized in libmalloc
89 extern malloc_logger_t
*__syscall_logger
;
91 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
,
92 void *newp
, size_t newlen
);
93 extern void __exit(int) __attribute__((noreturn
));
94 extern int __pthread_kill(mach_port_t
, int);
96 extern void _pthread_joiner_wake(pthread_t thread
);
99 PTHREAD_NOEXPORT
extern struct _pthread
*_main_thread_ptr
;
100 #define main_thread() (_main_thread_ptr)
101 #endif // VARIANT_DYLD
103 // Default stack size is 512KB; independent of the main thread's stack size.
104 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
112 * The pthread may be offset into a page. In that event, by contract
113 * with the kernel, the allocation will extend PTHREAD_SIZE from the
114 * start of the next page. There's also one page worth of allocation
115 * below stacksize for the guard page. <rdar://problem/19941744>
117 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
118 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
119 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
121 static const pthread_attr_t _pthread_attr_default
= {
122 .sig
= _PTHREAD_ATTR_SIG
,
124 .detached
= PTHREAD_CREATE_JOINABLE
,
125 .inherit
= _PTHREAD_DEFAULT_INHERITSCHED
,
126 .policy
= _PTHREAD_DEFAULT_POLICY
,
127 .defaultguardpage
= true,
128 // compile time constant for _pthread_default_priority(0)
129 .qosclass
= (1U << (THREAD_QOS_LEGACY
- 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
)) |
130 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK
),
133 #if PTHREAD_LAYOUT_SPI
135 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
137 .plo_pthread_tsd_base_offset
= offsetof(struct _pthread
, tsd
),
138 .plo_pthread_tsd_base_address_offset
= 0,
139 .plo_pthread_tsd_entry_size
= sizeof(((struct _pthread
*)NULL
)->tsd
[0]),
142 #endif // PTHREAD_LAYOUT_SPI
145 // Global exported variables
148 // This global should be used (carefully) by anyone needing to know if a
149 // pthread (other than the main thread) has been created.
150 int __is_threaded
= 0;
151 int __unix_conforming
= 0;
154 // Global internal variables
157 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
158 // list. Externally imported by pthread_cancelable.c.
159 struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
160 _pthread_lock _pthread_list_lock
= _PTHREAD_LOCK_INITIALIZER
;
165 // The main thread's pthread_t
166 struct _pthread _main_thread
__attribute__((aligned(64))) = { };
167 #define main_thread() (&_main_thread)
168 #else // VARIANT_DYLD
169 struct _pthread
*_main_thread_ptr
;
170 #endif // VARIANT_DYLD
172 #if PTHREAD_DEBUG_LOG
174 int _pthread_debuglog
;
175 uint64_t _pthread_debugstart
;
179 // Global static variables
181 static bool __workq_newapi
;
182 static uint8_t default_priority
;
184 static uint8_t max_priority
;
185 static uint8_t min_priority
;
186 #endif // !VARIANT_DYLD
187 static int _pthread_count
= 1;
188 static int pthread_concurrency
;
189 uintptr_t _pthread_ptr_munge_token
;
191 static void (*exitf
)(int) = __exit
;
193 static void *(*_pthread_malloc
)(size_t) = NULL
;
194 static void (*_pthread_free
)(void *) = NULL
;
195 #endif // !VARIANT_DYLD
197 // work queue support data
200 __pthread_invalid_keventfunction(void **events
, int *nevents
)
202 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
207 __pthread_invalid_workloopfunction(uint64_t *workloop_id
, void **events
, int *nevents
)
209 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
211 static pthread_workqueue_function2_t __libdispatch_workerfunction
;
212 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction
= &__pthread_invalid_keventfunction
;
213 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction
= &__pthread_invalid_workloopfunction
;
214 static int __pthread_supported_features
; // supported feature set
216 #if defined(__i386__) || defined(__x86_64__)
217 static mach_vm_address_t __pthread_stack_hint
= 0xB0000000;
218 #elif defined(__arm__) || defined(__arm64__)
219 static mach_vm_address_t __pthread_stack_hint
= 0x30000000;
221 #error no __pthread_stack_hint for this architecture
225 // Function prototypes
228 // pthread primitives
229 static inline void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
230 void *stack
, size_t stacksize
, void *freeaddr
, size_t freesize
);
233 static void _pthread_set_self_dyld(void);
234 #endif // VARIANT_DYLD
235 static inline void _pthread_set_self_internal(pthread_t
);
237 static void _pthread_dealloc_reply_port(pthread_t t
);
238 static void _pthread_dealloc_special_reply_port(pthread_t t
);
240 static inline void __pthread_started_thread(pthread_t t
);
242 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
244 static inline void _pthread_introspection_thread_create(pthread_t t
);
245 static inline void _pthread_introspection_thread_start(pthread_t t
);
246 static inline void _pthread_introspection_thread_terminate(pthread_t t
);
247 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
249 extern void _pthread_set_self(pthread_t
);
250 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread
251 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start
254 * Flags filed passed to bsdthread_create and back in pthread_start
255 * 31 <---------------------------------> 0
256 * _________________________________________
257 * | flags(8) | policy(8) | importance(16) |
258 * -----------------------------------------
260 #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
261 #define PTHREAD_START_SETSCHED 0x02000000
262 // was PTHREAD_START_DETACHED 0x04000000
263 #define PTHREAD_START_QOSCLASS 0x08000000
264 #define PTHREAD_START_TSD_BASE_SET 0x10000000
265 #define PTHREAD_START_SUSPENDED 0x20000000
266 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
267 #define PTHREAD_START_POLICY_BITSHIFT 16
268 #define PTHREAD_START_POLICY_MASK 0xff
269 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
271 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
272 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
273 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
274 extern __uint64_t
__thread_selfid( void );
277 _Static_assert(offsetof(struct _pthread
, tsd
) == 224, "TSD LP64 offset");
279 _Static_assert(offsetof(struct _pthread
, tsd
) == 176, "TSD ILP32 offset");
281 _Static_assert(offsetof(struct _pthread
, tsd
) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
282 == offsetof(struct _pthread
, thread_id
),
283 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
285 #pragma mark pthread attrs
287 _Static_assert(sizeof(struct _pthread_attr_t
) == sizeof(__darwin_pthread_attr_t
),
288 "internal pthread_attr_t == external pthread_attr_t");
291 pthread_attr_destroy(pthread_attr_t
*attr
)
294 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
302 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
305 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
306 *detachstate
= attr
->detached
;
313 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
316 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
317 *inheritsched
= attr
->inherit
;
323 static PTHREAD_ALWAYS_INLINE
void
324 _pthread_attr_get_schedparam(const pthread_attr_t
*attr
,
325 struct sched_param
*param
)
327 if (attr
->schedset
) {
328 *param
= attr
->param
;
330 param
->sched_priority
= default_priority
;
331 param
->quantum
= 10; /* quantum isn't public yet */
336 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
339 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
340 _pthread_attr_get_schedparam(attr
, param
);
347 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
350 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
351 *policy
= attr
->policy
;
358 pthread_attr_init(pthread_attr_t
*attr
)
360 *attr
= _pthread_attr_default
;
365 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
368 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
369 (detachstate
== PTHREAD_CREATE_JOINABLE
||
370 detachstate
== PTHREAD_CREATE_DETACHED
)) {
371 attr
->detached
= detachstate
;
378 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
381 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
382 (inheritsched
== PTHREAD_INHERIT_SCHED
||
383 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
384 attr
->inherit
= inheritsched
;
391 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
394 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
395 /* TODO: Validate sched_param fields */
396 attr
->param
= *param
;
404 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
407 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (policy
== SCHED_OTHER
||
408 policy
== SCHED_RR
|| policy
== SCHED_FIFO
)) {
409 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy
)) {
410 /* non-fixedpri policy should remove cpupercent */
411 attr
->cpupercentset
= 0;
413 attr
->policy
= policy
;
421 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
424 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
425 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
426 // No attribute yet for the scope.
428 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
436 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
439 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
440 *scope
= PTHREAD_SCOPE_SYSTEM
;
447 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
450 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
451 *stackaddr
= attr
->stackaddr
;
458 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
461 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
462 ((uintptr_t)stackaddr
% vm_page_size
) == 0) {
463 attr
->stackaddr
= stackaddr
;
464 attr
->defaultguardpage
= false;
472 _pthread_attr_stacksize(const pthread_attr_t
*attr
)
474 return attr
->stacksize
? attr
->stacksize
: DEFAULT_STACK_SIZE
;
478 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
481 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
482 *stacksize
= _pthread_attr_stacksize(attr
);
489 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
492 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
493 (stacksize
% vm_page_size
) == 0 &&
494 stacksize
>= PTHREAD_STACK_MIN
) {
495 attr
->stacksize
= stacksize
;
502 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
505 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
506 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
507 *stacksize
= _pthread_attr_stacksize(attr
);
513 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
514 // address. This is not the same as in pthread_attr_setstackaddr.
516 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
519 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
520 ((uintptr_t)stackaddr
% vm_page_size
) == 0 &&
521 (stacksize
% vm_page_size
) == 0 &&
522 stacksize
>= PTHREAD_STACK_MIN
) {
523 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
524 attr
->stacksize
= stacksize
;
531 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
534 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (guardsize
% vm_page_size
) == 0) {
535 /* Guardsize of 0 is valid, means no guard */
536 attr
->defaultguardpage
= false;
537 attr
->guardsize
= guardsize
;
544 _pthread_attr_guardsize(const pthread_attr_t
*attr
)
546 return attr
->defaultguardpage
? vm_page_size
: attr
->guardsize
;
550 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
553 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
554 *guardsize
= _pthread_attr_guardsize(attr
);
561 pthread_attr_setcpupercent_np(pthread_attr_t
*attr
, int percent
,
562 unsigned long refillms
)
565 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& percent
< UINT8_MAX
&&
566 refillms
< _PTHREAD_ATTR_REFILLMS_MAX
&& attr
->policyset
&&
567 _PTHREAD_POLICY_IS_FIXEDPRI(attr
->policy
)) {
568 attr
->cpupercent
= percent
;
569 attr
->refillms
= (uint32_t)(refillms
& 0x00ffffff);
570 attr
->cpupercentset
= 1;
576 #pragma mark pthread lifetime
578 // Allocate a thread structure, stack and guard page.
580 // The thread structure may optionally be placed in the same allocation as the
581 // stack, residing above the top of the stack. This cannot be done if a
582 // custom stack address is provided.
584 // Similarly the guard page cannot be allocated if a custom stack address is
587 // The allocated thread structure is initialized with values that indicate how
588 // it should be freed.
591 _pthread_allocate(const pthread_attr_t
*attrs
, void **stack
,
592 bool from_mach_thread
)
594 mach_vm_address_t allocaddr
= __pthread_stack_hint
;
595 size_t allocsize
, guardsize
, stacksize
, pthreadoff
;
599 if (os_unlikely(attrs
->stacksize
!= 0 &&
600 attrs
->stacksize
< PTHREAD_STACK_MIN
)) {
601 PTHREAD_CLIENT_CRASH(attrs
->stacksize
, "Stack size in attrs is too small");
604 if (os_unlikely(((uintptr_t)attrs
->stackaddr
% vm_page_size
) != 0)) {
605 PTHREAD_CLIENT_CRASH(attrs
->stacksize
, "Unaligned stack addr in attrs");
608 // Allocate a pthread structure if necessary
610 if (attrs
->stackaddr
!= NULL
) {
611 allocsize
= PTHREAD_SIZE
;
614 // <rdar://problem/42588315> if the attrs struct specifies a custom
615 // stack address but not a custom size, using ->stacksize here instead
616 // of _pthread_attr_stacksize stores stacksize as zero, indicating
617 // that the stack size is unknown.
618 stacksize
= attrs
->stacksize
;
620 guardsize
= _pthread_attr_guardsize(attrs
);
621 stacksize
= _pthread_attr_stacksize(attrs
) + PTHREAD_T_OFFSET
;
622 pthreadoff
= stacksize
+ guardsize
;
623 allocsize
= pthreadoff
+ PTHREAD_SIZE
;
624 allocsize
= mach_vm_round_page(allocsize
);
627 kr
= mach_vm_map(mach_task_self(), &allocaddr
, allocsize
, vm_page_size
- 1,
628 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
629 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
631 if (kr
!= KERN_SUCCESS
) {
632 kr
= mach_vm_allocate(mach_task_self(), &allocaddr
, allocsize
,
633 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
634 } else if (__syscall_logger
&& !from_mach_thread
) {
635 // libsyscall will not output malloc stack logging events when
636 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
637 // To avoid losing the stack traces for normal p-thread create
638 // operations, libpthread must pretend to be the vm syscall and log
639 // the allocations. <rdar://36418708>
640 int eventTypeFlags
= stack_logging_type_vm_allocate
|
641 stack_logging_type_mapped_file_or_shared_mem
;
642 __syscall_logger(eventTypeFlags
| VM_MAKE_TAG(VM_MEMORY_STACK
),
643 (uintptr_t)mach_task_self(), (uintptr_t)allocsize
, 0,
644 (uintptr_t)allocaddr
, 0);
647 if (kr
!= KERN_SUCCESS
) {
650 } else if (__syscall_logger
&& !from_mach_thread
) {
651 // libsyscall will not output malloc stack logging events when
652 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
653 // To avoid losing the stack traces for normal p-thread create
654 // operations, libpthread must pretend to be the vm syscall and log
655 // the allocations. <rdar://36418708>
656 int eventTypeFlags
= stack_logging_type_vm_allocate
;
657 __syscall_logger(eventTypeFlags
| VM_MAKE_TAG(VM_MEMORY_STACK
),
658 (uintptr_t)mach_task_self(), (uintptr_t)allocsize
, 0,
659 (uintptr_t)allocaddr
, 0);
662 // The stack grows down.
663 // Set the guard page at the lowest address of the
664 // newly allocated stack. Return the highest address
667 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
,
668 FALSE
, VM_PROT_NONE
);
671 // Thread structure resides at the top of the stack (when using a
672 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
674 t
= (pthread_t
)(allocaddr
+ pthreadoff
);
675 if (attrs
->stackaddr
) {
676 *stack
= attrs
->stackaddr
;
681 _pthread_struct_init(t
, attrs
, *stack
, stacksize
, allocaddr
, allocsize
);
687 _pthread_deallocate(pthread_t t
, bool from_mach_thread
)
691 // Don't free the main thread.
692 if (t
!= main_thread()) {
693 if (!from_mach_thread
) { // see __pthread_add_thread
694 _pthread_introspection_thread_destroy(t
);
696 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
697 if (ret
!= KERN_SUCCESS
) {
698 PTHREAD_INTERNAL_CRASH(ret
, "Unable to deallocate stack");
703 #pragma clang diagnostic push
704 #pragma clang diagnostic ignored "-Wreturn-stack-address"
708 _pthread_current_stack_address(void)
714 #pragma clang diagnostic pop
717 _pthread_joiner_wake(pthread_t thread
)
719 uint32_t *exit_gate
= &thread
->tl_exit_gate
;
722 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, exit_gate
, 0);
723 if (ret
== 0 || ret
== -ENOENT
) {
727 PTHREAD_INTERNAL_CRASH(-ret
, "pthread_join() wake failure");
732 // Terminates the thread if called from the currently running thread.
733 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
735 _pthread_terminate(pthread_t t
, void *exit_value
)
737 _pthread_introspection_thread_terminate(t
);
739 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
740 size_t freesize
= t
->freesize
;
743 // the size of just the stack
744 size_t freesize_stack
= t
->freesize
;
746 // We usually pass our structure+stack to bsdthread_terminate to free, but
747 // if we get told to keep the pthread_t structure around then we need to
748 // adjust the free size and addr in the pthread_t to just refer to the
749 // structure and not the stack. If we do end up deallocating the
750 // structure, this is useless work since no one can read the result, but we
751 // can't do it after the call to pthread_remove_thread because it isn't
752 // safe to dereference t after that.
753 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
754 // Check to ensure the pthread structure itself is part of the
755 // allocation described by freeaddr/freesize, in which case we split and
756 // only deallocate the area below the pthread structure. In the event of a
757 // custom stack, the freeaddr/size will be the pthread structure itself, in
758 // which case we shouldn't free anything (the final else case).
759 freesize_stack
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
761 // describe just the remainder for deallocation when the pthread_t goes away
762 t
->freeaddr
+= freesize_stack
;
763 t
->freesize
-= freesize_stack
;
764 } else if (t
== main_thread()) {
765 freeaddr
= t
->stackaddr
- pthread_get_stacksize_np(t
);
766 uintptr_t stackborder
= trunc_page((uintptr_t)_pthread_current_stack_address());
767 freesize_stack
= stackborder
- freeaddr
;
772 mach_port_t kport
= _pthread_kernel_thread(t
);
773 bool keep_thread_struct
= false, needs_wake
= false;
774 semaphore_t custom_stack_sema
= MACH_PORT_NULL
;
776 _pthread_dealloc_special_reply_port(t
);
777 _pthread_dealloc_reply_port(t
);
779 _PTHREAD_LOCK(_pthread_list_lock
);
781 // This piece of code interacts with pthread_join. It will always:
782 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
783 // - set tl_exit_value to the value passed to pthread_exit()
784 // - decrement _pthread_count, so that we can exit the process when all
785 // threads exited even if not all of them were joined.
786 t
->tl_exit_gate
= MACH_PORT_DEAD
;
787 t
->tl_exit_value
= exit_value
;
788 should_exit
= (--_pthread_count
<= 0);
790 // If we see a joiner, we prepost that the join has to succeed,
791 // and the joiner is committed to finish (even if it was canceled)
792 if (t
->tl_join_ctx
) {
793 custom_stack_sema
= _pthread_joiner_prepost_wake(t
); // unsets tl_joinable
797 // Joinable threads that have no joiner yet are kept on the thread list
798 // so that pthread_join() can later discover the thread when it is joined,
799 // and will have to do the pthread_t cleanup.
800 if (t
->tl_joinable
) {
801 t
->tl_joiner_cleans_up
= keep_thread_struct
= true;
803 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
806 _PTHREAD_UNLOCK(_pthread_list_lock
);
809 // When we found a waiter, we want to drop the very contended list lock
810 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
811 // who gets to cleanup the pthread_t between the joiner and the exiting
813 // - the joiner tries to set tl_join_ctx to NULL
814 // - the exiting thread tries to set tl_joiner_cleans_up to true
815 // Whoever does it first commits the other guy to cleanup the pthread_t
816 _pthread_joiner_wake(t
);
817 _PTHREAD_LOCK(_pthread_list_lock
);
818 if (t
->tl_join_ctx
) {
819 t
->tl_joiner_cleans_up
= true;
820 keep_thread_struct
= true;
822 _PTHREAD_UNLOCK(_pthread_list_lock
);
826 // /!\ dereferencing `t` past this point is not safe /!\
829 if (keep_thread_struct
|| t
== main_thread()) {
830 // Use the adjusted freesize of just the stack that we computed above.
831 freesize
= freesize_stack
;
833 _pthread_introspection_thread_destroy(t
);
836 // Check if there is nothing to free because the thread has a custom
837 // stack allocation and is joinable.
844 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, custom_stack_sema
);
845 PTHREAD_INTERNAL_CRASH(t
, "thread didn't terminate");
850 _pthread_terminate_invoke(pthread_t t
, void *exit_value
)
854 // <rdar://problem/25688492> During pthread termination there is a race
855 // between pthread_join and pthread_terminate; if the joiner is responsible
856 // for cleaning up the pthread_t struct, then it may destroy some part of the
857 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
858 // to crash because its stack has been removed from under its feet, just make
859 // sure termination happens in a part of the stack that is not on the same
860 // page as the pthread_t.
861 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
862 trunc_page((uintptr_t)t
)) {
863 p
= alloca(PTHREAD_T_OFFSET
);
865 // And this __asm__ volatile is needed to stop the compiler from optimising
866 // away the alloca() completely.
867 __asm__
volatile ("" : : "r"(p
) );
869 _pthread_terminate(t
, exit_value
);
872 #pragma mark pthread start / body
876 _pthread_start(pthread_t self
, mach_port_t kport
,
877 __unused
void *(*fun
)(void *), __unused
void *arg
,
878 __unused
size_t stacksize
, unsigned int pflags
)
880 if (os_unlikely(pflags
& PTHREAD_START_SUSPENDED
)) {
881 PTHREAD_INTERNAL_CRASH(pflags
,
882 "kernel without PTHREAD_START_SUSPENDED support");
884 if (os_unlikely((pflags
& PTHREAD_START_TSD_BASE_SET
) == 0)) {
885 PTHREAD_INTERNAL_CRASH(pflags
,
886 "thread_set_tsd_base() wasn't called by the kernel");
888 PTHREAD_DEBUG_ASSERT(MACH_PORT_VALID(kport
));
889 PTHREAD_DEBUG_ASSERT(_pthread_kernel_thread(self
) == kport
);
890 _pthread_validate_signature(self
);
891 _pthread_markcancel_if_canceled(self
, kport
);
893 _pthread_set_self_internal(self
);
894 __pthread_started_thread(self
);
895 _pthread_exit(self
, (self
->fun
)(self
->arg
));
898 PTHREAD_ALWAYS_INLINE
900 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
901 void *stackaddr
, size_t stacksize
, void *freeaddr
, size_t freesize
)
903 _pthread_init_signature(t
);
904 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
;
905 t
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &t
->err_no
;
906 if (attrs
->schedset
== 0) {
907 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = attrs
->qosclass
;
909 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] =
910 _pthread_unspecified_priority();
912 t
->tsd
[_PTHREAD_TSD_SLOT_PTR_MUNGE
] = _pthread_ptr_munge_token
;
913 t
->tl_has_custom_stack
= (attrs
->stackaddr
!= NULL
);
915 _PTHREAD_LOCK_INIT(t
->lock
);
917 t
->stackaddr
= stackaddr
;
918 t
->stackbottom
= stackaddr
- stacksize
;
919 t
->freeaddr
= freeaddr
;
920 t
->freesize
= freesize
;
922 t
->guardsize
= _pthread_attr_guardsize(attrs
);
923 t
->tl_joinable
= (attrs
->detached
== PTHREAD_CREATE_JOINABLE
);
924 t
->inherit
= attrs
->inherit
;
925 t
->tl_policy
= attrs
->policy
;
926 t
->schedset
= attrs
->schedset
;
927 _pthread_attr_get_schedparam(attrs
, &t
->tl_param
);
928 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
931 #pragma mark pthread public interface
933 /* Need to deprecate this in future */
935 _pthread_is_threaded(void)
937 return __is_threaded
;
940 /* Non portable public api to know whether this process has(had) atleast one thread
941 * apart from main thread. There could be race if there is a thread in the process of
942 * creation at the time of call . It does not tell whether there are more than one thread
943 * at this point of time.
946 pthread_is_threaded_np(void)
948 return __is_threaded
;
952 PTHREAD_NOEXPORT_VARIANT
954 pthread_mach_thread_np(pthread_t t
)
956 mach_port_t kport
= MACH_PORT_NULL
;
957 (void)_pthread_is_valid(t
, &kport
);
961 PTHREAD_NOEXPORT_VARIANT
963 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
965 struct _pthread
*p
= NULL
;
967 /* No need to wait as mach port is already known */
968 _PTHREAD_LOCK(_pthread_list_lock
);
970 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
971 if (_pthread_kernel_thread(p
) == kernel_thread
) {
976 _PTHREAD_UNLOCK(_pthread_list_lock
);
981 PTHREAD_NOEXPORT_VARIANT
983 pthread_get_stacksize_np(pthread_t t
)
988 return ESRCH
; // XXX bug?
992 // The default rlimit based allocations will be provided with a stacksize
993 // of the current limit and a freesize of the max. However, custom
994 // allocations will just have the guard page to free. If we aren't in the
995 // latter case, call into rlimit to determine the current stack size. In
996 // the event that the current limit == max limit then we'll fall down the
997 // fast path, but since it's unlikely that the limit is going to be lowered
998 // after it's been change to the max, we should be fine.
1000 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
1001 // page. So, we can skip all this there.
1002 if (t
== main_thread()) {
1003 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
1005 if (stacksize
+ vm_page_size
!= t
->freesize
) {
1006 // We want to call getrlimit() just once, as it's relatively
1008 static size_t rlimit_stack
;
1010 if (rlimit_stack
== 0) {
1011 struct rlimit limit
;
1012 int ret
= getrlimit(RLIMIT_STACK
, &limit
);
1015 rlimit_stack
= (size_t) limit
.rlim_cur
;
1019 if (rlimit_stack
== 0 || rlimit_stack
> t
->freesize
) {
1022 return round_page(rlimit_stack
);
1026 #endif /* TARGET_OS_OSX */
1028 if (t
== pthread_self() || t
== main_thread()) {
1029 size
= t
->stackaddr
- t
->stackbottom
;;
1033 if (_pthread_validate_thread_and_list_lock(t
)) {
1034 size
= t
->stackaddr
- t
->stackbottom
;;
1035 _PTHREAD_UNLOCK(_pthread_list_lock
);
1039 // <rdar://problem/42588315> binary compatibility issues force us to return
1040 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1041 return size
? size
: DEFAULT_STACK_SIZE
;
1044 PTHREAD_NOEXPORT_VARIANT
1046 pthread_get_stackaddr_np(pthread_t t
)
1048 // since the main thread will not get de-allocated from underneath us
1049 if (t
== pthread_self() || t
== main_thread()) {
1050 return t
->stackaddr
;
1053 if (!_pthread_validate_thread_and_list_lock(t
)) {
1054 return (void *)(uintptr_t)ESRCH
; // XXX bug?
1057 void *addr
= t
->stackaddr
;
1058 _PTHREAD_UNLOCK(_pthread_list_lock
);
1064 _pthread_reply_port(pthread_t t
)
1068 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
);
1070 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
];
1072 return (mach_port_t
)(uintptr_t)p
;
1076 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
)
1078 void *p
= (void *)(uintptr_t)reply_port
;
1080 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
);
1082 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
;
1087 _pthread_dealloc_reply_port(pthread_t t
)
1089 mach_port_t reply_port
= _pthread_reply_port(t
);
1090 if (reply_port
!= MACH_PORT_NULL
) {
1091 mig_dealloc_reply_port(reply_port
);
1096 _pthread_special_reply_port(pthread_t t
)
1100 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
);
1102 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
];
1104 return (mach_port_t
)(uintptr_t)p
;
1108 _pthread_dealloc_special_reply_port(pthread_t t
)
1110 mach_port_t special_reply_port
= _pthread_special_reply_port(t
);
1111 if (special_reply_port
!= MACH_PORT_NULL
) {
1112 thread_destruct_special_reply_port(special_reply_port
,
1113 THREAD_SPECIAL_REPLY_PORT_ALL
);
1118 pthread_main_thread_np(void)
1120 return main_thread();
1123 /* returns non-zero if the current thread is the main thread */
1125 pthread_main_np(void)
1127 return pthread_self() == main_thread();
1132 _pthread_threadid_slow(pthread_t thread
, uint64_t *thread_id
)
1134 unsigned int info_count
= THREAD_IDENTIFIER_INFO_COUNT
;
1135 mach_port_t thport
= _pthread_kernel_thread(thread
);
1136 struct thread_identifier_info info
;
1139 kr
= thread_info(thport
, THREAD_IDENTIFIER_INFO
,
1140 (thread_info_t
)&info
, &info_count
);
1141 if (kr
== KERN_SUCCESS
&& info
.thread_id
) {
1142 *thread_id
= info
.thread_id
;
1143 os_atomic_store(&thread
->thread_id
, info
.thread_id
, relaxed
);
1150 * if we are passed in a pthread_t that is NULL, then we return the current
1151 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1152 * us doing it, if they just want their thread_id.
1154 PTHREAD_NOEXPORT_VARIANT
1156 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
1159 pthread_t self
= pthread_self();
1161 if (thread_id
== NULL
) {
1165 if (thread
== NULL
|| thread
== self
) {
1166 *thread_id
= self
->thread_id
;
1167 } else if (!_pthread_validate_thread_and_list_lock(thread
)) {
1170 *thread_id
= os_atomic_load(&thread
->thread_id
, relaxed
);
1171 if (os_unlikely(*thread_id
== 0)) {
1172 // there is a race at init because the thread sets its own TID.
1173 // correct this by asking mach
1174 res
= _pthread_threadid_slow(thread
, thread_id
);
1176 _PTHREAD_UNLOCK(_pthread_list_lock
);
1181 PTHREAD_NOEXPORT_VARIANT
1183 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1185 if (thread
== pthread_self()) {
1186 strlcpy(threadname
, thread
->pthread_name
, len
);
1190 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1194 strlcpy(threadname
, thread
->pthread_name
, len
);
1195 _PTHREAD_UNLOCK(_pthread_list_lock
);
1201 pthread_setname_np(const char *name
)
1204 pthread_t self
= pthread_self();
1211 _pthread_validate_signature(self
);
1213 /* protytype is in pthread_internals.h */
1214 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
1217 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
1219 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
1226 PTHREAD_ALWAYS_INLINE
1228 __pthread_add_thread(pthread_t t
, bool from_mach_thread
)
1230 if (from_mach_thread
) {
1231 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1233 _PTHREAD_LOCK(_pthread_list_lock
);
1236 TAILQ_INSERT_TAIL(&__pthread_head
, t
, tl_plist
);
1239 if (from_mach_thread
) {
1240 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1242 _PTHREAD_UNLOCK(_pthread_list_lock
);
1245 if (!from_mach_thread
) {
1246 // PR-26275485: Mach threads will likely crash trying to run
1247 // introspection code. Since the fall out from the introspection
1248 // code not seeing the injected thread is likely less than crashing
1249 // in the introspection code, just don't make the call.
1250 _pthread_introspection_thread_create(t
);
1254 PTHREAD_ALWAYS_INLINE
1256 __pthread_undo_add_thread(pthread_t t
, bool from_mach_thread
)
1258 if (from_mach_thread
) {
1259 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1261 _PTHREAD_LOCK(_pthread_list_lock
);
1264 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
1267 if (from_mach_thread
) {
1268 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1270 _PTHREAD_UNLOCK(_pthread_list_lock
);
1274 PTHREAD_ALWAYS_INLINE
1276 __pthread_started_thread(pthread_t t
)
1278 mach_port_t kport
= _pthread_kernel_thread(t
);
1279 if (os_unlikely(!MACH_PORT_VALID(kport
))) {
1280 PTHREAD_CLIENT_CRASH(kport
,
1281 "Unable to allocate thread port, possible port leak");
1283 _pthread_introspection_thread_start(t
);
1286 #define _PTHREAD_CREATE_NONE 0x0
1287 #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1288 #define _PTHREAD_CREATE_SUSPENDED 0x2
1291 _pthread_create(pthread_t
*thread
, const pthread_attr_t
*attrs
,
1292 void *(*start_routine
)(void *), void *arg
, unsigned int create_flags
)
1296 bool from_mach_thread
= (create_flags
& _PTHREAD_CREATE_FROM_MACH_THREAD
);
1298 if (attrs
== NULL
) {
1299 attrs
= &_pthread_attr_default
;
1300 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1304 unsigned int flags
= PTHREAD_START_CUSTOM
;
1305 if (attrs
->schedset
!= 0) {
1306 struct sched_param p
;
1307 _pthread_attr_get_schedparam(attrs
, &p
);
1308 flags
|= PTHREAD_START_SETSCHED
;
1309 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1310 flags
|= (p
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1311 } else if (attrs
->qosclass
!= 0) {
1312 flags
|= PTHREAD_START_QOSCLASS
;
1313 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1315 if (create_flags
& _PTHREAD_CREATE_SUSPENDED
) {
1316 flags
|= PTHREAD_START_SUSPENDED
;
1321 t
=_pthread_allocate(attrs
, &stack
, from_mach_thread
);
1327 t
->fun
= start_routine
;
1328 __pthread_add_thread(t
, from_mach_thread
);
1330 if (__bsdthread_create(start_routine
, arg
, stack
, t
, flags
) ==
1332 if (errno
== EMFILE
) {
1333 PTHREAD_CLIENT_CRASH(0,
1334 "Unable to allocate thread port, possible port leak");
1336 __pthread_undo_add_thread(t
, from_mach_thread
);
1337 _pthread_deallocate(t
, from_mach_thread
);
1341 // n.b. if a thread is created detached and exits, t will be invalid
1347 pthread_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
1348 void *(*start_routine
)(void *), void *arg
)
1350 unsigned int flags
= _PTHREAD_CREATE_NONE
;
1351 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1355 pthread_create_from_mach_thread(pthread_t
*thread
, const pthread_attr_t
*attr
,
1356 void *(*start_routine
)(void *), void *arg
)
1358 unsigned int flags
= _PTHREAD_CREATE_FROM_MACH_THREAD
;
1359 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1363 pthread_create_suspended_np(pthread_t
*thread
, const pthread_attr_t
*attr
,
1364 void *(*start_routine
)(void *), void *arg
)
1366 unsigned int flags
= _PTHREAD_CREATE_SUSPENDED
;
1367 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1371 PTHREAD_NOEXPORT_VARIANT
1373 pthread_detach(pthread_t thread
)
1376 bool join
= false, wake
= false;
1378 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1382 if (!thread
->tl_joinable
) {
1384 } else if (thread
->tl_exit_gate
== MACH_PORT_DEAD
) {
1385 // Join the thread if it's already exited.
1388 thread
->tl_joinable
= false; // _pthread_joiner_prepost_wake uses this
1389 if (thread
->tl_join_ctx
) {
1390 (void)_pthread_joiner_prepost_wake(thread
);
1394 _PTHREAD_UNLOCK(_pthread_list_lock
);
1397 pthread_join(thread
, NULL
);
1399 _pthread_joiner_wake(thread
);
1404 PTHREAD_NOEXPORT_VARIANT
1406 pthread_kill(pthread_t th
, int sig
)
1408 if (sig
< 0 || sig
> NSIG
) {
1412 mach_port_t kport
= MACH_PORT_NULL
;
1414 if (!_pthread_is_valid(th
, &kport
)) {
1419 int ret
= __pthread_kill(kport
, sig
);
1427 PTHREAD_NOEXPORT_VARIANT
1429 __pthread_workqueue_setkill(int enable
)
1432 return __bsdthread_ctl(BSDTHREAD_CTL_WORKQ_ALLOW_KILL
, enable
, 0, 0);
1437 /* For compatibility... */
1442 return pthread_self();
1446 * Terminate a thread.
1448 extern int __disable_threadsignal(int);
1452 _pthread_exit(pthread_t self
, void *exit_value
)
1454 struct __darwin_pthread_handler_rec
*handler
;
1456 // Disable signal delivery while we clean up
1457 __disable_threadsignal(1);
1459 // Set cancel state to disable and type to deferred
1460 _pthread_setcancelstate_exit(self
, exit_value
);
1462 while ((handler
= self
->__cleanup_stack
) != 0) {
1463 (handler
->__routine
)(handler
->__arg
);
1464 self
->__cleanup_stack
= handler
->__next
;
1466 _pthread_tsd_cleanup(self
);
1468 // Clear per-thread semaphore cache
1469 os_put_cached_semaphore(SEMAPHORE_NULL
);
1471 _pthread_terminate_invoke(self
, exit_value
);
1475 pthread_exit(void *exit_value
)
1477 pthread_t self
= pthread_self();
1478 if (os_unlikely(self
->wqthread
)) {
1479 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1480 "not created by pthread_create()");
1482 _pthread_validate_signature(self
);
1483 _pthread_exit(self
, exit_value
);
1487 PTHREAD_NOEXPORT_VARIANT
1489 pthread_getschedparam(pthread_t thread
, int *policy
, struct sched_param
*param
)
1491 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1495 if (policy
) *policy
= thread
->tl_policy
;
1496 if (param
) *param
= thread
->tl_param
;
1497 _PTHREAD_UNLOCK(_pthread_list_lock
);
1503 PTHREAD_ALWAYS_INLINE
1505 pthread_setschedparam_internal(pthread_t thread
, mach_port_t kport
, int policy
,
1506 const struct sched_param
*param
)
1508 policy_base_data_t bases
;
1510 mach_msg_type_number_t count
;
1515 bases
.ts
.base_priority
= param
->sched_priority
;
1516 base
= (policy_base_t
)&bases
.ts
;
1517 count
= POLICY_TIMESHARE_BASE_COUNT
;
1520 bases
.fifo
.base_priority
= param
->sched_priority
;
1521 base
= (policy_base_t
)&bases
.fifo
;
1522 count
= POLICY_FIFO_BASE_COUNT
;
1525 bases
.rr
.base_priority
= param
->sched_priority
;
1526 /* quantum isn't public yet */
1527 bases
.rr
.quantum
= param
->quantum
;
1528 base
= (policy_base_t
)&bases
.rr
;
1529 count
= POLICY_RR_BASE_COUNT
;
1534 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1535 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1538 PTHREAD_NOEXPORT_VARIANT
1540 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1542 mach_port_t kport
= MACH_PORT_NULL
;
1545 // since the main thread will not get de-allocated from underneath us
1546 if (t
== pthread_self() || t
== main_thread()) {
1547 _pthread_validate_signature(t
);
1548 kport
= _pthread_kernel_thread(t
);
1551 if (!_pthread_is_valid(t
, &kport
)) {
1556 int res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1557 if (res
) return res
;
1560 _PTHREAD_LOCK(_pthread_list_lock
);
1561 } else if (!_pthread_validate_thread_and_list_lock(t
)) {
1562 // Ensure the thread is still valid.
1566 t
->tl_policy
= policy
;
1567 t
->tl_param
= *param
;
1568 _PTHREAD_UNLOCK(_pthread_list_lock
);
1574 sched_get_priority_min(int policy
)
1576 return default_priority
- 16;
1580 sched_get_priority_max(int policy
)
1582 return default_priority
+ 16;
1586 pthread_equal(pthread_t t1
, pthread_t t2
)
1592 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1593 * then _pthread_set_self won't be bound when secondary threads try and start up.
1597 _pthread_set_self(pthread_t p
)
1600 if (os_likely(!p
)) {
1601 return _pthread_set_self_dyld();
1603 #endif // VARIANT_DYLD
1604 _pthread_set_self_internal(p
);
1605 _thread_set_tsd_base(&p
->tsd
[0]);
1609 // _pthread_set_self_dyld is noinline+noexport to allow the option for
1610 // static libsyscall to adopt this as the entry point from mach_init if
1612 PTHREAD_NOINLINE PTHREAD_NOEXPORT
1614 _pthread_set_self_dyld(void)
1616 pthread_t p
= main_thread();
1617 p
->thread_id
= __thread_selfid();
1619 if (os_unlikely(p
->thread_id
== -1ull)) {
1620 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1623 // <rdar://problem/40930651> pthread self and the errno address are the
1624 // bare minimium TSD setup that dyld needs to actually function. Without
1625 // this, TSD access will fail and crash if it uses bits of Libc prior to
1626 // library initialization. __pthread_init will finish the initialization
1627 // during library init.
1628 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
;
1629 p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
;
1630 _thread_set_tsd_base(&p
->tsd
[0]);
1632 #endif // VARIANT_DYLD
1634 PTHREAD_ALWAYS_INLINE
1636 _pthread_set_self_internal(pthread_t p
)
1638 os_atomic_store(&p
->thread_id
, __thread_selfid(), relaxed
);
1640 if (os_unlikely(p
->thread_id
== -1ull)) {
1641 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1646 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1647 PTHREAD_ALWAYS_INLINE
1649 _os_once_acquire(os_once_t
*predicate
, void *context
, os_function_t function
)
1651 if (OS_EXPECT(os_atomic_load(predicate
, acquire
), ~0l) != ~0l) {
1652 _os_once(predicate
, context
, function
);
1653 OS_COMPILER_CAN_ASSUME(*predicate
== ~0l);
1657 struct _pthread_once_context
{
1658 pthread_once_t
*pthread_once
;
1659 void (*routine
)(void);
1663 __pthread_once_handler(void *context
)
1665 struct _pthread_once_context
*ctx
= context
;
1666 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1668 pthread_cleanup_pop(0);
1669 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1672 PTHREAD_NOEXPORT_VARIANT
1674 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1676 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1678 _os_once_acquire(&once_control
->once
, &ctx
, __pthread_once_handler
);
1679 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1685 pthread_getconcurrency(void)
1687 return pthread_concurrency
;
1691 pthread_setconcurrency(int new_level
)
1693 if (new_level
< 0) {
1696 pthread_concurrency
= new_level
;
1700 #if !defined(VARIANT_STATIC)
1704 if (_pthread_malloc
) {
1705 return _pthread_malloc(sz
);
1714 if (_pthread_free
) {
1718 #endif // VARIANT_STATIC
1721 * Perform package initialization - called automatically when application starts
1723 struct ProgramVars
; /* forward reference */
1726 static unsigned long
1727 _pthread_strtoul(const char *p
, const char **endptr
, int base
)
1731 // Expect hex string starting with "0x"
1732 if ((base
== 16 || base
== 0) && p
&& p
[0] == '0' && p
[1] == 'x') {
1736 if ('0' <= c
&& c
<= '9') {
1737 val
= (val
<< 4) + (c
- '0');
1738 } else if ('a' <= c
&& c
<= 'f') {
1739 val
= (val
<< 4) + (c
- 'a' + 10);
1740 } else if ('A' <= c
&& c
<= 'F') {
1741 val
= (val
<< 4) + (c
- 'A' + 10);
1749 *endptr
= (char *)p
;
1754 parse_main_stack_params(const char *apple
[],
1760 const char *p
= _simple_getenv(apple
, "main_stack");
1766 *stackaddr
= _pthread_strtoul(s
, &s
, 16);
1767 if (*s
!= ',') goto out
;
1769 *stacksize
= _pthread_strtoul(s
+ 1, &s
, 16);
1770 if (*s
!= ',') goto out
;
1772 *allocaddr
= _pthread_strtoul(s
+ 1, &s
, 16);
1773 if (*s
!= ',') goto out
;
1775 *allocsize
= _pthread_strtoul(s
+ 1, &s
, 16);
1776 if (*s
!= ',' && *s
!= 0) goto out
;
1780 bzero((char *)p
, strlen(p
));
1785 parse_ptr_munge_params(const char *envp
[], const char *apple
[])
1788 uintptr_t token
= 0;
1789 p
= _simple_getenv(apple
, "ptr_munge");
1791 token
= _pthread_strtoul(p
, &s
, 16);
1792 bzero((char *)p
, strlen(p
));
1797 p
= _simple_getenv(envp
, "PTHREAD_PTR_MUNGE_TOKEN");
1799 uintptr_t t
= _pthread_strtoul(p
, &s
, 16);
1806 PTHREAD_INTERNAL_CRASH(token
, "Token from the kernel is 0");
1810 _pthread_ptr_munge_token
= token
;
1811 // we need to refresh the main thread signature now that we changed
1812 // the munge token. We need to do it while TSAN will not look at it
1813 _pthread_init_signature(_main_thread_ptr
);
1817 __pthread_init(const struct _libpthread_functions
*pthread_funcs
,
1818 const char *envp
[], const char *apple
[],
1819 const struct ProgramVars
*vars __unused
)
1821 // Save our provided pushed-down functions
1822 if (pthread_funcs
) {
1823 exitf
= pthread_funcs
->exit
;
1825 if (pthread_funcs
->version
>= 2) {
1826 _pthread_malloc
= pthread_funcs
->malloc
;
1827 _pthread_free
= pthread_funcs
->free
;
1831 // libpthread.a in dyld "owns" the main thread structure itself and sets
1832 // up the tsd to point to it. So take the pthread_self() from there
1833 // and make it our main thread point.
1834 pthread_t thread
= (pthread_t
)_pthread_getspecific_direct(
1835 _PTHREAD_TSD_SLOT_PTHREAD_SELF
);
1836 if (os_unlikely(thread
== NULL
)) {
1837 PTHREAD_INTERNAL_CRASH(0, "PTHREAD_SELF TSD not initialized");
1839 _main_thread_ptr
= thread
;
1840 // this needs to be done early so that pthread_self() works in TSAN
1841 _pthread_init_signature(thread
);
1844 // Get host information
1848 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1849 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1850 host_priority_info_data_t priority_info
;
1851 host_t host
= mach_host_self();
1852 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1853 if (kr
!= KERN_SUCCESS
) {
1854 PTHREAD_INTERNAL_CRASH(kr
, "host_info() failed");
1856 default_priority
= (uint8_t)priority_info
.user_priority
;
1857 min_priority
= (uint8_t)priority_info
.minimum_priority
;
1858 max_priority
= (uint8_t)priority_info
.maximum_priority
;
1860 mach_port_deallocate(mach_task_self(), host
);
1863 // Set up the main thread structure
1866 // Get the address and size of the main thread's stack from the kernel.
1867 void *stackaddr
= 0;
1868 size_t stacksize
= 0;
1869 void *allocaddr
= 0;
1870 size_t allocsize
= 0;
1871 if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) ||
1872 stackaddr
== NULL
|| stacksize
== 0) {
1873 // Fall back to previous bevhaior.
1874 size_t len
= sizeof(stackaddr
);
1875 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1876 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1877 #if defined(__LP64__)
1878 stackaddr
= (void *)USRSTACK64
;
1880 stackaddr
= (void *)USRSTACK
;
1883 stacksize
= DFLSSIZ
;
1888 // Initialize random ptr_munge token from the kernel.
1889 parse_ptr_munge_params(envp
, apple
);
1891 PTHREAD_DEBUG_ASSERT(_pthread_attr_default
.qosclass
==
1892 _pthread_default_priority(0));
1893 _pthread_struct_init(thread
, &_pthread_attr_default
,
1894 stackaddr
, stacksize
, allocaddr
, allocsize
);
1895 thread
->tl_joinable
= true;
1897 // Finish initialization with common code that is reinvoked on the
1898 // child side of a fork.
1900 // Finishes initialization of main thread attributes.
1901 // Initializes the thread list and add the main thread.
1902 // Calls _pthread_set_self() to prepare the main thread for execution.
1903 _pthread_main_thread_init(thread
);
1905 struct _pthread_registration_data registration_data
;
1906 // Set up kernel entry points with __bsdthread_register.
1907 _pthread_bsdthread_init(®istration_data
);
1909 // Have pthread_key and pthread_mutex do their init envvar checks.
1910 _pthread_key_global_init(envp
);
1911 _pthread_mutex_global_init(envp
, ®istration_data
);
1913 #if PTHREAD_DEBUG_LOG
1914 _SIMPLE_STRING path
= _simple_salloc();
1915 _simple_sprintf(path
, "/var/tmp/libpthread.%d.log", getpid());
1916 _pthread_debuglog
= open(_simple_string(path
),
1917 O_WRONLY
| O_APPEND
| O_CREAT
| O_NOFOLLOW
| O_CLOEXEC
, 0666);
1918 _simple_sfree(path
);
1919 _pthread_debugstart
= mach_absolute_time();
1924 #endif // !VARIANT_DYLD
1926 PTHREAD_NOEXPORT
void
1927 _pthread_main_thread_init(pthread_t p
)
1929 TAILQ_INIT(&__pthread_head
);
1930 _PTHREAD_LOCK_INIT(_pthread_list_lock
);
1931 _PTHREAD_LOCK_INIT(p
->lock
);
1932 _pthread_set_kernel_thread(p
, mach_thread_self());
1933 _pthread_set_reply_port(p
, mach_reply_port());
1934 p
->__cleanup_stack
= NULL
;
1935 p
->tl_join_ctx
= NULL
;
1936 p
->tl_exit_gate
= MACH_PORT_NULL
;
1937 p
->tsd
[__TSD_SEMAPHORE_CACHE
] = (void*)(uintptr_t)SEMAPHORE_NULL
;
1938 p
->tsd
[__TSD_MACH_SPECIAL_REPLY
] = 0;
1940 // Initialize the list of threads with the new main thread.
1941 TAILQ_INSERT_HEAD(&__pthread_head
, p
, tl_plist
);
1944 _pthread_introspection_thread_start(p
);
1949 _pthread_main_thread_postfork_init(pthread_t p
)
1951 _pthread_main_thread_init(p
);
1952 _pthread_set_self_internal(p
);
1970 pthread_yield_np(void)
1975 // Libsystem knows about this symbol and exports it to libsyscall
1977 pthread_current_stack_contains_np(const void *addr
, size_t length
)
1979 uintptr_t begin
= (uintptr_t) addr
, end
;
1980 uintptr_t stack_base
= (uintptr_t) _pthread_self_direct()->stackbottom
;
1981 uintptr_t stack_top
= (uintptr_t) _pthread_self_direct()->stackaddr
;
1983 if (stack_base
== stack_top
) {
1987 if (__builtin_add_overflow(begin
, length
, &end
)) {
1991 return stack_base
<= begin
&& end
<= stack_top
;
1996 // Libsystem knows about this symbol and exports it to libsyscall
1997 PTHREAD_NOEXPORT_VARIANT
1999 _pthread_clear_qos_tsd(mach_port_t thread_port
)
2001 if (thread_port
== MACH_PORT_NULL
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) {
2002 /* Clear the current thread's TSD, that can be done inline. */
2003 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
2004 _pthread_unspecified_priority());
2008 _PTHREAD_LOCK(_pthread_list_lock
);
2010 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
2011 mach_port_t kp
= _pthread_kernel_thread(p
);
2012 if (thread_port
== kp
) {
2013 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] =
2014 _pthread_unspecified_priority();
2019 _PTHREAD_UNLOCK(_pthread_list_lock
);
2024 #pragma mark pthread/stack_np.h public interface
2027 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
2028 #if __ARM64_ARCH_8_32__
2030 * arm64_32 uses 64-bit sizes for the frame pointer and
2031 * return address of a stack frame.
2033 typedef uint64_t frame_data_addr_t
;
2035 typedef uintptr_t frame_data_addr_t
;
2039 frame_data_addr_t frame_addr_next
;
2040 frame_data_addr_t ret_addr
;
2043 #error ********** Unimplemented architecture
2047 pthread_stack_frame_decode_np(uintptr_t frame_addr
, uintptr_t *return_addr
)
2049 struct frame_data
*frame
= (struct frame_data
*)frame_addr
;
2052 #if __has_feature(ptrauth_calls)
2053 *return_addr
= (uintptr_t)ptrauth_strip((void *)frame
->ret_addr
,
2054 ptrauth_key_return_address
);
2056 *return_addr
= (uintptr_t)frame
->ret_addr
;
2057 #endif /* __has_feature(ptrauth_calls) */
2060 #if __has_feature(ptrauth_calls)
2061 return (uintptr_t)ptrauth_strip((void *)frame
->frame_addr_next
,
2062 ptrauth_key_frame_pointer
);
2063 #endif /* __has_feature(ptrauth_calls) */
2064 return (uintptr_t)frame
->frame_addr_next
;
2068 #pragma mark pthread workqueue support routines
2071 PTHREAD_NOEXPORT
void
2072 _pthread_bsdthread_init(struct _pthread_registration_data
*data
)
2074 bzero(data
, sizeof(*data
));
2075 data
->version
= sizeof(struct _pthread_registration_data
);
2076 data
->dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *);
2077 data
->return_to_kernel_offset
= __TSD_RETURN_TO_KERNEL
* sizeof(void *);
2078 data
->tsd_offset
= offsetof(struct _pthread
, tsd
);
2079 data
->mach_thread_self_offset
= __TSD_MACH_THREAD_SELF
* sizeof(void *);
2081 int rv
= __bsdthread_register(thread_start
, start_wqthread
, (int)PTHREAD_SIZE
,
2082 (void*)data
, (uintptr_t)sizeof(*data
), data
->dispatch_queue_offset
);
2085 int required_features
=
2086 PTHREAD_FEATURE_FINEPRIO
|
2087 PTHREAD_FEATURE_BSDTHREADCTL
|
2088 PTHREAD_FEATURE_SETSELF
|
2089 PTHREAD_FEATURE_QOS_MAINTENANCE
|
2090 PTHREAD_FEATURE_QOS_DEFAULT
;
2091 if ((rv
& required_features
) != required_features
) {
2092 PTHREAD_INTERNAL_CRASH(rv
, "Missing required kernel support");
2094 __pthread_supported_features
= rv
;
2098 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2099 * effect of resetting the child's stack_addr_hint before bailing out) and
2100 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2101 * the latter as fatal.
2103 * <rdar://problem/36451838>
2106 pthread_priority_t main_qos
= (pthread_priority_t
)data
->main_qos
;
2108 if (_pthread_priority_thread_qos(main_qos
) != THREAD_QOS_UNSPECIFIED
) {
2109 _pthread_set_main_qos(main_qos
);
2110 main_thread()->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = main_qos
;
2113 if (data
->stack_addr_hint
) {
2114 __pthread_stack_hint
= data
->stack_addr_hint
;
2117 if (__libdispatch_workerfunction
!= NULL
) {
2118 // prepare the kernel for workq action
2119 (void)__workq_open();
2125 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp
)
2127 /* Old thread priorities are inverted from where we have them in
2128 * the new flexible priority scheme. The highest priority is zero,
2129 * up to 2, with background at 3.
2131 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2132 bool overcommit
= (pp
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
);
2133 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2135 switch (_pthread_priority_thread_qos(pp
)) {
2136 case THREAD_QOS_USER_INITIATED
:
2137 return (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
2138 case THREAD_QOS_LEGACY
:
2139 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2140 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2141 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2143 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
2144 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED
, 0, 0));
2145 return (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
2146 case THREAD_QOS_UTILITY
:
2147 return (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
2148 case THREAD_QOS_BACKGROUND
:
2149 return (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
2151 PTHREAD_INTERNAL_CRASH(pp
, "Invalid pthread priority for the legacy interface");
2154 PTHREAD_ALWAYS_INLINE
2155 static inline pthread_priority_t
2156 _pthread_wqthread_priority(int flags
)
2158 pthread_priority_t pp
= 0;
2161 if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2162 pp
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2164 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
) {
2165 return pp
| _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
2168 if (flags
& WQ_FLAG_THREAD_OVERCOMMIT
) {
2169 pp
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2171 if (flags
& WQ_FLAG_THREAD_PRIO_QOS
) {
2172 qos
= (thread_qos_t
)(flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2173 pp
= _pthread_priority_make_from_thread_qos(qos
, 0, pp
);
2174 } else if (flags
& WQ_FLAG_THREAD_PRIO_SCHED
) {
2175 pp
|= _PTHREAD_PRIORITY_SCHED_PRI_MASK
;
2176 pp
|= (flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2178 PTHREAD_INTERNAL_CRASH(flags
, "Missing priority");
2185 _pthread_wqthread_setup(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2188 void *stackaddr
= self
;
2189 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
2191 _pthread_struct_init(self
, &_pthread_attr_default
, stackaddr
, stacksize
,
2192 PTHREAD_ALLOCADDR(stackaddr
, stacksize
),
2193 PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
2195 _pthread_set_kernel_thread(self
, kport
);
2197 self
->wqkillset
= 0;
2198 self
->tl_joinable
= false;
2200 // Update the running thread count and set childrun bit.
2201 if (os_unlikely((flags
& WQ_FLAG_THREAD_TSD_BASE_SET
) == 0)) {
2202 PTHREAD_INTERNAL_CRASH(flags
,
2203 "thread_set_tsd_base() wasn't called by the kernel");
2205 _pthread_set_self_internal(self
);
2206 __pthread_add_thread(self
, false);
2207 __pthread_started_thread(self
);
2210 PTHREAD_NORETURN PTHREAD_NOINLINE
2212 _pthread_wqthread_exit(pthread_t self
)
2214 pthread_priority_t pp
;
2217 pp
= (pthread_priority_t
)self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
];
2218 qos
= _pthread_priority_thread_qos(pp
);
2219 if (qos
== THREAD_QOS_UNSPECIFIED
|| qos
> WORKQ_THREAD_QOS_CLEANUP
) {
2220 // Reset QoS to something low for the cleanup process
2221 pp
= _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP
, 0, 0);
2222 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2225 _pthread_exit(self
, NULL
);
2228 // workqueue entry point from kernel
2230 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2231 void *keventlist
, int flags
, int nkevents
)
2233 if ((flags
& WQ_FLAG_THREAD_REUSE
) == 0) {
2234 _pthread_wqthread_setup(self
, kport
, stacklowaddr
, flags
);
2237 pthread_priority_t pp
;
2239 if (flags
& WQ_FLAG_THREAD_OUTSIDEQOS
) {
2240 self
->wq_outsideqos
= 1;
2241 pp
= _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY
, 0,
2242 _PTHREAD_PRIORITY_FALLBACK_FLAG
);
2244 self
->wq_outsideqos
= 0;
2245 pp
= _pthread_wqthread_priority(flags
);
2248 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2250 // avoid spills on the stack hard to keep used stack space minimal
2251 if (os_unlikely(nkevents
== WORKQ_EXIT_THREAD_NKEVENT
)) {
2252 _pthread_wqthread_exit(self
);
2253 } else if (flags
& WQ_FLAG_THREAD_WORKLOOP
) {
2254 kqueue_id_t
*kqidptr
= (kqueue_id_t
*)keventlist
- 1;
2255 self
->fun
= (void *(*)(void*))__libdispatch_workloopfunction
;
2256 self
->arg
= keventlist
;
2257 self
->wq_nevents
= nkevents
;
2258 (*__libdispatch_workloopfunction
)(kqidptr
, &self
->arg
, &self
->wq_nevents
);
2259 __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN
, self
->arg
, self
->wq_nevents
, 0);
2260 } else if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2261 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
2262 self
->arg
= keventlist
;
2263 self
->wq_nevents
= nkevents
;
2264 (*__libdispatch_keventfunction
)(&self
->arg
, &self
->wq_nevents
);
2265 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, self
->arg
, self
->wq_nevents
, 0);
2267 self
->fun
= (void *(*)(void*))__libdispatch_workerfunction
;
2268 self
->arg
= (void *)(uintptr_t)pp
;
2269 self
->wq_nevents
= 0;
2270 if (os_likely(__workq_newapi
)) {
2271 (*__libdispatch_workerfunction
)(pp
);
2273 _pthread_wqthread_legacy_worker_wrap(pp
);
2275 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
2278 _os_set_crash_log_cause_and_message(self
->err_no
,
2279 "BUG IN LIBPTHREAD: __workq_kernreturn returned");
2281 * 52858993: we should never return but the compiler insists on outlining,
2282 * so the __builtin_trap() is in _start_wqthread in pthread_asm.s
2287 #pragma mark pthread workqueue API for libdispatch
2290 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN
== WQ_KEVENT_LIST_LEN
,
2291 "Kernel and userland should agree on the event list size");
2294 pthread_workqueue_setdispatchoffset_np(int offset
)
2296 __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, offset
, 0x00);
2300 pthread_workqueue_setup(struct pthread_workqueue_config
*cfg
, size_t cfg_size
)
2303 struct workq_dispatch_config wdc_cfg
;
2304 size_t min_size
= 0;
2306 if (cfg_size
< sizeof(uint32_t)) {
2310 switch (cfg
->version
) {
2312 min_size
= offsetof(struct pthread_workqueue_config
, queue_label_offs
);
2315 min_size
= sizeof(struct pthread_workqueue_config
);
2321 if (!cfg
|| cfg_size
< min_size
) {
2325 if (cfg
->flags
& ~PTHREAD_WORKQUEUE_CONFIG_SUPPORTED_FLAGS
||
2326 cfg
->version
< PTHREAD_WORKQUEUE_CONFIG_MIN_SUPPORTED_VERSION
) {
2330 if (__libdispatch_workerfunction
== NULL
) {
2331 __workq_newapi
= true;
2333 wdc_cfg
.wdc_version
= WORKQ_DISPATCH_CONFIG_VERSION
;
2334 wdc_cfg
.wdc_flags
= 0;
2335 wdc_cfg
.wdc_queue_serialno_offs
= cfg
->queue_serialno_offs
;
2336 #if WORKQ_DISPATCH_CONFIG_VERSION >= 2
2337 wdc_cfg
.wdc_queue_label_offs
= cfg
->queue_label_offs
;
2340 // Tell the kernel about dispatch internals
2341 rv
= (int) __workq_kernreturn(WQOPS_SETUP_DISPATCH
, &wdc_cfg
, sizeof(wdc_cfg
), 0);
2345 __libdispatch_keventfunction
= cfg
->kevent_cb
;
2346 __libdispatch_workloopfunction
= cfg
->workloop_cb
;
2347 __libdispatch_workerfunction
= cfg
->workq_cb
;
2349 // Prepare the kernel for workq action
2350 (void)__workq_open();
2351 if (__is_threaded
== 0) {
2363 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func
,
2364 pthread_workqueue_function_kevent_t kevent_func
,
2365 pthread_workqueue_function_workloop_t workloop_func
,
2366 int offset
, int flags
)
2368 struct pthread_workqueue_config cfg
= {
2369 .version
= PTHREAD_WORKQUEUE_CONFIG_VERSION
,
2371 .workq_cb
= queue_func
,
2372 .kevent_cb
= kevent_func
,
2373 .workloop_cb
= workloop_func
,
2374 .queue_serialno_offs
= offset
,
2375 .queue_label_offs
= 0,
2378 return pthread_workqueue_setup(&cfg
, sizeof(cfg
));
2382 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
,
2383 pthread_workqueue_function_kevent_t kevent_func
,
2384 int offset
, int flags
)
2386 return _pthread_workqueue_init_with_workloop(queue_func
, kevent_func
, NULL
, offset
, flags
);
2390 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2392 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2396 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2398 struct pthread_workqueue_config cfg
= {
2399 .version
= PTHREAD_WORKQUEUE_CONFIG_VERSION
,
2401 .workq_cb
= (uint64_t)(pthread_workqueue_function2_t
)worker_func
,
2404 .queue_serialno_offs
= 0,
2405 .queue_label_offs
= 0,
2408 return pthread_workqueue_setup(&cfg
, sizeof(cfg
));
2412 _pthread_workqueue_supported(void)
2414 if (os_unlikely(!__pthread_supported_features
)) {
2415 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2418 return __pthread_supported_features
;
2422 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2426 // Cannot add threads without a worker function registered.
2427 if (__libdispatch_workerfunction
== NULL
) {
2431 pthread_priority_t kp
= 0;
2432 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIO_MASK
;
2435 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2436 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2439 #pragma clang diagnostic push
2440 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2441 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2442 #pragma clang diagnostic pop
2444 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2452 _pthread_workqueue_should_narrow(pthread_priority_t pri
)
2454 int res
= __workq_kernreturn(WQOPS_SHOULD_NARROW
, NULL
, (int)pri
, 0);
2462 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2466 if (__libdispatch_workerfunction
== NULL
) {
2471 // <rdar://problem/37687655> Legacy simulators fail to boot
2473 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2474 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2475 // validates and rejects.
2477 // As a workaround, forcefully unset this bit that cannot be set here
2479 priority
&= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG
;
2482 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2490 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2492 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2500 _pthread_workloop_create(uint64_t workloop_id
, uint64_t options
, pthread_attr_t
*attr
)
2502 struct kqueue_workloop_params params
= {
2503 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2504 .kqwlp_id
= workloop_id
,
2512 if (attr
->schedset
) {
2513 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_PRI
;
2514 params
.kqwlp_sched_pri
= attr
->param
.sched_priority
;
2517 if (attr
->policyset
) {
2518 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_POL
;
2519 params
.kqwlp_sched_pol
= attr
->policy
;
2522 if (attr
->cpupercentset
) {
2523 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_CPU_PERCENT
;
2524 params
.kqwlp_cpu_percent
= attr
->cpupercent
;
2525 params
.kqwlp_cpu_refillms
= attr
->refillms
;
2528 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE
, 0, ¶ms
,
2537 _pthread_workloop_destroy(uint64_t workloop_id
)
2539 struct kqueue_workloop_params params
= {
2540 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2541 .kqwlp_id
= workloop_id
,
2544 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY
, 0, ¶ms
,
2553 #pragma mark Introspection SPI for libpthread.
2556 static pthread_introspection_hook_t _pthread_introspection_hook
;
2558 pthread_introspection_hook_t
2559 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2561 pthread_introspection_hook_t prev
;
2562 prev
= _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook
, hook
);
2568 _pthread_introspection_hook_callout_thread_create(pthread_t t
)
2570 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2575 _pthread_introspection_thread_create(pthread_t t
)
2577 if (os_fastpath(!_pthread_introspection_hook
)) return;
2578 _pthread_introspection_hook_callout_thread_create(t
);
2583 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2587 if (t
== main_thread()) {
2588 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2589 freesize
= stacksize
+ t
->guardsize
;
2590 freeaddr
= t
->stackaddr
- freesize
;
2592 freesize
= t
->freesize
- PTHREAD_SIZE
;
2593 freeaddr
= t
->freeaddr
;
2595 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2596 freeaddr
, freesize
);
2600 _pthread_introspection_thread_start(pthread_t t
)
2602 if (os_fastpath(!_pthread_introspection_hook
)) return;
2603 _pthread_introspection_hook_callout_thread_start(t
);
2608 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
)
2612 if (t
== main_thread()) {
2613 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2614 freesize
= stacksize
+ t
->guardsize
;
2615 freeaddr
= t
->stackaddr
- freesize
;
2617 freesize
= t
->freesize
- PTHREAD_SIZE
;
2618 freeaddr
= t
->freeaddr
;
2620 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2621 freeaddr
, freesize
);
2625 _pthread_introspection_thread_terminate(pthread_t t
)
2627 if (os_fastpath(!_pthread_introspection_hook
)) return;
2628 _pthread_introspection_hook_callout_thread_terminate(t
);
2633 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2635 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2640 _pthread_introspection_thread_destroy(pthread_t t
)
2642 if (os_fastpath(!_pthread_introspection_hook
)) return;
2643 _pthread_introspection_hook_callout_thread_destroy(t
);
2648 #pragma mark libplatform shims
2650 #include <platform/string.h>
2652 // pthread_setup initializes large structures to 0,
2653 // which the compiler turns into a library call to memset.
2655 // To avoid linking against Libc, provide a simple wrapper
2656 // that calls through to the libplatform primitives
2661 memset(void *b
, int c
, size_t len
)
2663 return _platform_memset(b
, c
, len
);
2669 bzero(void *s
, size_t n
)
2671 _platform_bzero(s
, n
);
2677 memcpy(void* a
, const void* b
, unsigned long s
)
2679 return _platform_memmove(a
, b
, s
);
2682 #endif // !VARIANT_DYLD