2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
59 #include "pthread/stack_np.h"
60 #include "offsets.h" // included to validate the offsets at build time
66 #include <mach/mach_init.h>
67 #include <mach/mach_vm.h>
68 #include <mach/mach_sync_ipc.h>
70 #include <sys/resource.h>
71 #include <sys/sysctl.h>
72 #include <sys/queue.h>
73 #include <sys/ulock.h>
75 #include <machine/vmparam.h>
76 #define __APPLE_API_PRIVATE
77 #include <machine/cpu_capabilities.h>
80 #include <platform/string.h>
81 #include <platform/compat.h>
83 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
,
84 void *newp
, size_t newlen
);
85 extern void __exit(int) __attribute__((noreturn
));
86 extern int __pthread_kill(mach_port_t
, int);
88 extern void _pthread_joiner_wake(pthread_t thread
);
91 PTHREAD_NOEXPORT
extern struct _pthread
*_main_thread_ptr
;
92 #define main_thread() (_main_thread_ptr)
93 #endif // VARIANT_DYLD
95 // Default stack size is 512KB; independent of the main thread's stack size.
96 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
104 * The pthread may be offset into a page. In that event, by contract
105 * with the kernel, the allocation will extend PTHREAD_SIZE from the
106 * start of the next page. There's also one page worth of allocation
107 * below stacksize for the guard page. <rdar://problem/19941744>
109 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
110 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
111 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
113 static const pthread_attr_t _pthread_attr_default
= {
114 .sig
= _PTHREAD_ATTR_SIG
,
116 .detached
= PTHREAD_CREATE_JOINABLE
,
117 .inherit
= _PTHREAD_DEFAULT_INHERITSCHED
,
118 .policy
= _PTHREAD_DEFAULT_POLICY
,
119 .defaultguardpage
= true,
120 // compile time constant for _pthread_default_priority(0)
121 .qosclass
= (1U << (THREAD_QOS_LEGACY
- 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
)) |
122 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK
),
125 #if PTHREAD_LAYOUT_SPI
127 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
129 .plo_pthread_tsd_base_offset
= offsetof(struct _pthread
, tsd
),
130 .plo_pthread_tsd_base_address_offset
= 0,
131 .plo_pthread_tsd_entry_size
= sizeof(((struct _pthread
*)NULL
)->tsd
[0]),
134 #endif // PTHREAD_LAYOUT_SPI
137 // Global exported variables
140 // This global should be used (carefully) by anyone needing to know if a
141 // pthread (other than the main thread) has been created.
142 int __is_threaded
= 0;
143 int __unix_conforming
= 0;
146 // Global internal variables
149 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
150 // list. Externally imported by pthread_cancelable.c.
151 struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
152 _pthread_lock _pthread_list_lock
= _PTHREAD_LOCK_INITIALIZER
;
157 // The main thread's pthread_t
158 struct _pthread _main_thread
__attribute__((aligned(64))) = { };
159 #define main_thread() (&_main_thread)
160 #else // VARIANT_DYLD
161 struct _pthread
*_main_thread_ptr
;
162 #endif // VARIANT_DYLD
164 #if PTHREAD_DEBUG_LOG
166 int _pthread_debuglog
;
167 uint64_t _pthread_debugstart
;
171 // Global static variables
173 static bool __workq_newapi
;
174 static uint8_t default_priority
;
176 static uint8_t max_priority
;
177 static uint8_t min_priority
;
178 #endif // !VARIANT_DYLD
179 static int _pthread_count
= 1;
180 static int pthread_concurrency
;
181 static uintptr_t _pthread_ptr_munge_token
;
183 static void (*exitf
)(int) = __exit
;
185 static void *(*_pthread_malloc
)(size_t) = NULL
;
186 static void (*_pthread_free
)(void *) = NULL
;
187 #endif // !VARIANT_DYLD
189 // work queue support data
192 __pthread_invalid_keventfunction(void **events
, int *nevents
)
194 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
199 __pthread_invalid_workloopfunction(uint64_t *workloop_id
, void **events
, int *nevents
)
201 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
203 static pthread_workqueue_function2_t __libdispatch_workerfunction
;
204 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction
= &__pthread_invalid_keventfunction
;
205 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction
= &__pthread_invalid_workloopfunction
;
206 static int __libdispatch_offset
;
207 static int __pthread_supported_features
; // supported feature set
209 #if defined(__i386__) || defined(__x86_64__)
210 static mach_vm_address_t __pthread_stack_hint
= 0xB0000000;
212 #error no __pthread_stack_hint for this architecture
216 // Function prototypes
219 // pthread primitives
220 static inline void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
221 void *stack
, size_t stacksize
, void *freeaddr
, size_t freesize
);
224 static void _pthread_set_self_dyld(void);
225 #endif // VARIANT_DYLD
226 static inline void _pthread_set_self_internal(pthread_t
, bool needs_tsd_base_set
);
228 static void _pthread_dealloc_reply_port(pthread_t t
);
229 static void _pthread_dealloc_special_reply_port(pthread_t t
);
231 static inline void __pthread_started_thread(pthread_t t
);
233 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
235 static inline void _pthread_introspection_thread_create(pthread_t t
);
236 static inline void _pthread_introspection_thread_start(pthread_t t
);
237 static inline void _pthread_introspection_thread_terminate(pthread_t t
);
238 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
240 extern void _pthread_set_self(pthread_t
);
241 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread
242 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start
245 * Flags filed passed to bsdthread_create and back in pthread_start
246 * 31 <---------------------------------> 0
247 * _________________________________________
248 * | flags(8) | policy(8) | importance(16) |
249 * -----------------------------------------
251 #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
252 #define PTHREAD_START_SETSCHED 0x02000000
253 // was PTHREAD_START_DETACHED 0x04000000
254 #define PTHREAD_START_QOSCLASS 0x08000000
255 #define PTHREAD_START_TSD_BASE_SET 0x10000000
256 #define PTHREAD_START_SUSPENDED 0x20000000
257 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
258 #define PTHREAD_START_POLICY_BITSHIFT 16
259 #define PTHREAD_START_POLICY_MASK 0xff
260 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
262 #if (!defined(__OPEN_SOURCE__) && TARGET_OS_OSX) || OS_VARIANT_RESOLVED // 40703288
263 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int,
264 const struct sched_param
*);
267 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
268 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
269 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
270 extern __uint64_t
__thread_selfid( void );
273 _Static_assert(offsetof(struct _pthread
, tsd
) == 224, "TSD LP64 offset");
275 _Static_assert(offsetof(struct _pthread
, tsd
) == 176, "TSD ILP32 offset");
277 _Static_assert(offsetof(struct _pthread
, tsd
) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
278 == offsetof(struct _pthread
, thread_id
),
279 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
281 #pragma mark pthread attrs
283 _Static_assert(sizeof(struct _pthread_attr_t
) == sizeof(__darwin_pthread_attr_t
),
284 "internal pthread_attr_t == external pthread_attr_t");
287 pthread_attr_destroy(pthread_attr_t
*attr
)
290 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
298 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
301 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
302 *detachstate
= attr
->detached
;
309 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
312 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
313 *inheritsched
= attr
->inherit
;
319 static PTHREAD_ALWAYS_INLINE
void
320 _pthread_attr_get_schedparam(const pthread_attr_t
*attr
,
321 struct sched_param
*param
)
323 if (attr
->schedset
) {
324 *param
= attr
->param
;
326 param
->sched_priority
= default_priority
;
327 param
->quantum
= 10; /* quantum isn't public yet */
332 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
335 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
336 _pthread_attr_get_schedparam(attr
, param
);
343 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
346 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
347 *policy
= attr
->policy
;
354 pthread_attr_init(pthread_attr_t
*attr
)
356 *attr
= _pthread_attr_default
;
361 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
364 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
365 (detachstate
== PTHREAD_CREATE_JOINABLE
||
366 detachstate
== PTHREAD_CREATE_DETACHED
)) {
367 attr
->detached
= detachstate
;
374 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
377 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
378 (inheritsched
== PTHREAD_INHERIT_SCHED
||
379 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
380 attr
->inherit
= inheritsched
;
387 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
390 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
391 /* TODO: Validate sched_param fields */
392 attr
->param
= *param
;
400 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
403 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (policy
== SCHED_OTHER
||
404 policy
== SCHED_RR
|| policy
== SCHED_FIFO
)) {
405 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy
)) {
406 /* non-fixedpri policy should remove cpupercent */
407 attr
->cpupercentset
= 0;
409 attr
->policy
= policy
;
417 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
420 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
421 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
422 // No attribute yet for the scope.
424 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
432 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
435 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
436 *scope
= PTHREAD_SCOPE_SYSTEM
;
443 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
446 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
447 *stackaddr
= attr
->stackaddr
;
454 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
457 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
458 ((uintptr_t)stackaddr
% vm_page_size
) == 0) {
459 attr
->stackaddr
= stackaddr
;
460 attr
->defaultguardpage
= false;
468 _pthread_attr_stacksize(const pthread_attr_t
*attr
)
470 return attr
->stacksize
? attr
->stacksize
: DEFAULT_STACK_SIZE
;
474 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
477 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
478 *stacksize
= _pthread_attr_stacksize(attr
);
485 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
488 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
489 (stacksize
% vm_page_size
) == 0 &&
490 stacksize
>= PTHREAD_STACK_MIN
) {
491 attr
->stacksize
= stacksize
;
498 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
501 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
502 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
503 *stacksize
= _pthread_attr_stacksize(attr
);
509 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
510 // address. This is not the same as in pthread_attr_setstackaddr.
512 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
515 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
516 ((uintptr_t)stackaddr
% vm_page_size
) == 0 &&
517 (stacksize
% vm_page_size
) == 0 &&
518 stacksize
>= PTHREAD_STACK_MIN
) {
519 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
520 attr
->stacksize
= stacksize
;
527 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
530 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& (guardsize
% vm_page_size
) == 0) {
531 /* Guardsize of 0 is valid, means no guard */
532 attr
->defaultguardpage
= false;
533 attr
->guardsize
= guardsize
;
540 _pthread_attr_guardsize(const pthread_attr_t
*attr
)
542 return attr
->defaultguardpage
? vm_page_size
: attr
->guardsize
;
546 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
549 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
550 *guardsize
= _pthread_attr_guardsize(attr
);
557 pthread_attr_setcpupercent_np(pthread_attr_t
*attr
, int percent
,
558 unsigned long refillms
)
561 if (attr
->sig
== _PTHREAD_ATTR_SIG
&& percent
< UINT8_MAX
&&
562 refillms
< _PTHREAD_ATTR_REFILLMS_MAX
&& attr
->policyset
&&
563 _PTHREAD_POLICY_IS_FIXEDPRI(attr
->policy
)) {
564 attr
->cpupercent
= percent
;
565 attr
->refillms
= (uint32_t)(refillms
& 0x00ffffff);
566 attr
->cpupercentset
= 1;
572 #pragma mark pthread lifetime
574 // Allocate a thread structure, stack and guard page.
576 // The thread structure may optionally be placed in the same allocation as the
577 // stack, residing above the top of the stack. This cannot be done if a
578 // custom stack address is provided.
580 // Similarly the guard page cannot be allocated if a custom stack address is
583 // The allocated thread structure is initialized with values that indicate how
584 // it should be freed.
587 _pthread_allocate(const pthread_attr_t
*attrs
, void **stack
)
589 mach_vm_address_t allocaddr
= __pthread_stack_hint
;
590 size_t allocsize
, guardsize
, stacksize
, pthreadoff
;
594 PTHREAD_ASSERT(attrs
->stacksize
== 0 ||
595 attrs
->stacksize
>= PTHREAD_STACK_MIN
);
597 // Allocate a pthread structure if necessary
599 if (attrs
->stackaddr
!= NULL
) {
600 PTHREAD_ASSERT(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
601 allocsize
= PTHREAD_SIZE
;
604 // <rdar://problem/42588315> if the attrs struct specifies a custom
605 // stack address but not a custom size, using ->stacksize here instead
606 // of _pthread_attr_stacksize stores stacksize as zero, indicating
607 // that the stack size is unknown.
608 stacksize
= attrs
->stacksize
;
610 guardsize
= _pthread_attr_guardsize(attrs
);
611 stacksize
= _pthread_attr_stacksize(attrs
) + PTHREAD_T_OFFSET
;
612 pthreadoff
= stacksize
+ guardsize
;
613 allocsize
= pthreadoff
+ PTHREAD_SIZE
;
614 allocsize
= mach_vm_round_page(allocsize
);
617 kr
= mach_vm_map(mach_task_self(), &allocaddr
, allocsize
, vm_page_size
- 1,
618 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
619 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
621 if (kr
!= KERN_SUCCESS
) {
622 kr
= mach_vm_allocate(mach_task_self(), &allocaddr
, allocsize
,
623 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
625 if (kr
!= KERN_SUCCESS
) {
630 // The stack grows down.
631 // Set the guard page at the lowest address of the
632 // newly allocated stack. Return the highest address
635 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
,
636 FALSE
, VM_PROT_NONE
);
639 // Thread structure resides at the top of the stack (when using a
640 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
642 t
= (pthread_t
)(allocaddr
+ pthreadoff
);
643 if (attrs
->stackaddr
) {
644 *stack
= attrs
->stackaddr
;
649 _pthread_struct_init(t
, attrs
, *stack
, stacksize
, allocaddr
, allocsize
);
655 _pthread_deallocate(pthread_t t
, bool from_mach_thread
)
659 // Don't free the main thread.
660 if (t
!= main_thread()) {
661 if (!from_mach_thread
) { // see __pthread_add_thread
662 _pthread_introspection_thread_destroy(t
);
664 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
665 PTHREAD_ASSERT(ret
== KERN_SUCCESS
);
669 #pragma clang diagnostic push
670 #pragma clang diagnostic ignored "-Wreturn-stack-address"
674 _pthread_current_stack_address(void)
680 #pragma clang diagnostic pop
683 _pthread_joiner_wake(pthread_t thread
)
685 uint32_t *exit_gate
= &thread
->tl_exit_gate
;
688 int ret
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
, exit_gate
, 0);
689 if (ret
== 0 || ret
== -ENOENT
) {
693 PTHREAD_INTERNAL_CRASH(-ret
, "pthread_join() wake failure");
698 // Terminates the thread if called from the currently running thread.
699 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
701 _pthread_terminate(pthread_t t
, void *exit_value
)
703 PTHREAD_ASSERT(t
== pthread_self());
705 _pthread_introspection_thread_terminate(t
);
707 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
708 size_t freesize
= t
->freesize
;
711 // the size of just the stack
712 size_t freesize_stack
= t
->freesize
;
714 // We usually pass our structure+stack to bsdthread_terminate to free, but
715 // if we get told to keep the pthread_t structure around then we need to
716 // adjust the free size and addr in the pthread_t to just refer to the
717 // structure and not the stack. If we do end up deallocating the
718 // structure, this is useless work since no one can read the result, but we
719 // can't do it after the call to pthread_remove_thread because it isn't
720 // safe to dereference t after that.
721 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
722 // Check to ensure the pthread structure itself is part of the
723 // allocation described by freeaddr/freesize, in which case we split and
724 // only deallocate the area below the pthread structure. In the event of a
725 // custom stack, the freeaddr/size will be the pthread structure itself, in
726 // which case we shouldn't free anything (the final else case).
727 freesize_stack
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
729 // describe just the remainder for deallocation when the pthread_t goes away
730 t
->freeaddr
+= freesize_stack
;
731 t
->freesize
-= freesize_stack
;
732 } else if (t
== main_thread()) {
733 freeaddr
= t
->stackaddr
- pthread_get_stacksize_np(t
);
734 uintptr_t stackborder
= trunc_page((uintptr_t)_pthread_current_stack_address());
735 freesize_stack
= stackborder
- freeaddr
;
740 mach_port_t kport
= _pthread_kernel_thread(t
);
741 bool keep_thread_struct
= false, needs_wake
= false;
742 semaphore_t custom_stack_sema
= MACH_PORT_NULL
;
744 _pthread_dealloc_special_reply_port(t
);
745 _pthread_dealloc_reply_port(t
);
747 _PTHREAD_LOCK(_pthread_list_lock
);
749 // This piece of code interacts with pthread_join. It will always:
750 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
751 // - set tl_exit_value to the value passed to pthread_exit()
752 // - decrement _pthread_count, so that we can exit the process when all
753 // threads exited even if not all of them were joined.
754 t
->tl_exit_gate
= MACH_PORT_DEAD
;
755 t
->tl_exit_value
= exit_value
;
756 should_exit
= (--_pthread_count
<= 0);
758 // If we see a joiner, we prepost that the join has to succeed,
759 // and the joiner is committed to finish (even if it was canceled)
760 if (t
->tl_join_ctx
) {
761 custom_stack_sema
= _pthread_joiner_prepost_wake(t
); // unsets tl_joinable
765 // Joinable threads that have no joiner yet are kept on the thread list
766 // so that pthread_join() can later discover the thread when it is joined,
767 // and will have to do the pthread_t cleanup.
768 if (t
->tl_joinable
) {
769 t
->tl_joiner_cleans_up
= keep_thread_struct
= true;
771 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
774 _PTHREAD_UNLOCK(_pthread_list_lock
);
777 // When we found a waiter, we want to drop the very contended list lock
778 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
779 // who gets to cleanup the pthread_t between the joiner and the exiting
781 // - the joiner tries to set tl_join_ctx to NULL
782 // - the exiting thread tries to set tl_joiner_cleans_up to true
783 // Whoever does it first commits the other guy to cleanup the pthread_t
784 _pthread_joiner_wake(t
);
785 _PTHREAD_LOCK(_pthread_list_lock
);
786 if (t
->tl_join_ctx
) {
787 t
->tl_joiner_cleans_up
= true;
788 keep_thread_struct
= true;
790 _PTHREAD_UNLOCK(_pthread_list_lock
);
794 // /!\ dereferencing `t` past this point is not safe /!\
797 if (keep_thread_struct
|| t
== main_thread()) {
798 // Use the adjusted freesize of just the stack that we computed above.
799 freesize
= freesize_stack
;
801 _pthread_introspection_thread_destroy(t
);
804 // Check if there is nothing to free because the thread has a custom
805 // stack allocation and is joinable.
812 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, custom_stack_sema
);
813 PTHREAD_INTERNAL_CRASH(t
, "thread didn't terminate");
818 _pthread_terminate_invoke(pthread_t t
, void *exit_value
)
822 // <rdar://problem/25688492> During pthread termination there is a race
823 // between pthread_join and pthread_terminate; if the joiner is responsible
824 // for cleaning up the pthread_t struct, then it may destroy some part of the
825 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
826 // to crash because its stack has been removed from under its feet, just make
827 // sure termination happens in a part of the stack that is not on the same
828 // page as the pthread_t.
829 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
830 trunc_page((uintptr_t)t
)) {
831 p
= alloca(PTHREAD_T_OFFSET
);
833 // And this __asm__ volatile is needed to stop the compiler from optimising
834 // away the alloca() completely.
835 __asm__
volatile ("" : : "r"(p
) );
837 _pthread_terminate(t
, exit_value
);
840 #pragma mark pthread start / body
843 * Create and start execution of a new thread.
845 PTHREAD_NOINLINE PTHREAD_NORETURN
847 _pthread_body(pthread_t self
, bool needs_tsd_base_set
)
849 _pthread_set_self_internal(self
, needs_tsd_base_set
);
850 __pthread_started_thread(self
);
851 _pthread_exit(self
, (self
->fun
)(self
->arg
));
856 _pthread_start(pthread_t self
, mach_port_t kport
,
857 __unused
void *(*fun
)(void *), __unused
void *arg
,
858 __unused
size_t stacksize
, unsigned int pflags
)
860 bool thread_tsd_bsd_set
= (bool)(pflags
& PTHREAD_START_TSD_BASE_SET
);
862 if (os_unlikely(pflags
& PTHREAD_START_SUSPENDED
)) {
863 PTHREAD_INTERNAL_CRASH(0,
864 "kernel without PTHREAD_START_SUSPENDED support");
867 PTHREAD_ASSERT(MACH_PORT_VALID(kport
));
868 PTHREAD_ASSERT(_pthread_kernel_thread(self
) == kport
);
870 // will mark the thread initialized
871 _pthread_markcancel_if_canceled(self
, kport
);
873 _pthread_body(self
, !thread_tsd_bsd_set
);
876 PTHREAD_ALWAYS_INLINE
878 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
,
879 void *stackaddr
, size_t stacksize
, void *freeaddr
, size_t freesize
)
882 PTHREAD_ASSERT(t
->sig
!= _PTHREAD_SIG
);
885 t
->sig
= _PTHREAD_SIG
;
886 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
;
887 t
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &t
->err_no
;
888 if (attrs
->schedset
== 0) {
889 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = attrs
->qosclass
;
891 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] =
892 _pthread_unspecified_priority();
894 t
->tsd
[_PTHREAD_TSD_SLOT_PTR_MUNGE
] = _pthread_ptr_munge_token
;
895 t
->tl_has_custom_stack
= (attrs
->stackaddr
!= NULL
);
897 _PTHREAD_LOCK_INIT(t
->lock
);
899 t
->stackaddr
= stackaddr
;
900 t
->stackbottom
= stackaddr
- stacksize
;
901 t
->freeaddr
= freeaddr
;
902 t
->freesize
= freesize
;
904 t
->guardsize
= _pthread_attr_guardsize(attrs
);
905 t
->tl_joinable
= (attrs
->detached
== PTHREAD_CREATE_JOINABLE
);
906 t
->inherit
= attrs
->inherit
;
907 t
->tl_policy
= attrs
->policy
;
908 t
->schedset
= attrs
->schedset
;
909 _pthread_attr_get_schedparam(attrs
, &t
->tl_param
);
910 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
913 #pragma mark pthread public interface
915 /* Need to deprecate this in future */
917 _pthread_is_threaded(void)
919 return __is_threaded
;
922 /* Non portable public api to know whether this process has(had) atleast one thread
923 * apart from main thread. There could be race if there is a thread in the process of
924 * creation at the time of call . It does not tell whether there are more than one thread
925 * at this point of time.
928 pthread_is_threaded_np(void)
930 return __is_threaded
;
934 PTHREAD_NOEXPORT_VARIANT
936 pthread_mach_thread_np(pthread_t t
)
938 mach_port_t kport
= MACH_PORT_NULL
;
939 (void)_pthread_is_valid(t
, &kport
);
943 PTHREAD_NOEXPORT_VARIANT
945 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
947 struct _pthread
*p
= NULL
;
949 /* No need to wait as mach port is already known */
950 _PTHREAD_LOCK(_pthread_list_lock
);
952 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
953 if (_pthread_kernel_thread(p
) == kernel_thread
) {
958 _PTHREAD_UNLOCK(_pthread_list_lock
);
963 PTHREAD_NOEXPORT_VARIANT
965 pthread_get_stacksize_np(pthread_t t
)
968 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
971 return ESRCH
; // XXX bug?
974 #if !defined(__arm__) && !defined(__arm64__)
975 // The default rlimit based allocations will be provided with a stacksize
976 // of the current limit and a freesize of the max. However, custom
977 // allocations will just have the guard page to free. If we aren't in the
978 // latter case, call into rlimit to determine the current stack size. In
979 // the event that the current limit == max limit then we'll fall down the
980 // fast path, but since it's unlikely that the limit is going to be lowered
981 // after it's been change to the max, we should be fine.
983 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
984 // page. So, we can skip all this there.
985 if (t
== main_thread() && stacksize
+ vm_page_size
!= t
->freesize
) {
986 // We want to call getrlimit() just once, as it's relatively expensive
987 static size_t rlimit_stack
;
989 if (rlimit_stack
== 0) {
991 int ret
= getrlimit(RLIMIT_STACK
, &limit
);
994 rlimit_stack
= (size_t) limit
.rlim_cur
;
998 if (rlimit_stack
== 0 || rlimit_stack
> t
->freesize
) {
1001 return rlimit_stack
;
1004 #endif /* !defined(__arm__) && !defined(__arm64__) */
1006 if (t
== pthread_self() || t
== main_thread()) {
1011 if (_pthread_validate_thread_and_list_lock(t
)) {
1013 _PTHREAD_UNLOCK(_pthread_list_lock
);
1015 size
= ESRCH
; // XXX bug?
1019 // <rdar://problem/42588315> binary compatibility issues force us to return
1020 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1021 return size
? size
: DEFAULT_STACK_SIZE
;
1024 PTHREAD_NOEXPORT_VARIANT
1026 pthread_get_stackaddr_np(pthread_t t
)
1028 // since the main thread will not get de-allocated from underneath us
1029 if (t
== pthread_self() || t
== main_thread()) {
1030 return t
->stackaddr
;
1033 if (!_pthread_validate_thread_and_list_lock(t
)) {
1034 return (void *)(uintptr_t)ESRCH
; // XXX bug?
1037 void *addr
= t
->stackaddr
;
1038 _PTHREAD_UNLOCK(_pthread_list_lock
);
1044 _pthread_reply_port(pthread_t t
)
1048 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
);
1050 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
];
1052 return (mach_port_t
)(uintptr_t)p
;
1056 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
)
1058 void *p
= (void *)(uintptr_t)reply_port
;
1060 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
);
1062 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
;
1067 _pthread_dealloc_reply_port(pthread_t t
)
1069 mach_port_t reply_port
= _pthread_reply_port(t
);
1070 if (reply_port
!= MACH_PORT_NULL
) {
1071 mig_dealloc_reply_port(reply_port
);
1076 _pthread_special_reply_port(pthread_t t
)
1080 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
);
1082 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
];
1084 return (mach_port_t
)(uintptr_t)p
;
1088 _pthread_dealloc_special_reply_port(pthread_t t
)
1090 mach_port_t special_reply_port
= _pthread_special_reply_port(t
);
1091 if (special_reply_port
!= MACH_PORT_NULL
) {
1092 thread_destruct_special_reply_port(special_reply_port
,
1093 THREAD_SPECIAL_REPLY_PORT_ALL
);
1098 pthread_main_thread_np(void)
1100 return main_thread();
1103 /* returns non-zero if the current thread is the main thread */
1105 pthread_main_np(void)
1107 return pthread_self() == main_thread();
1112 * if we are passed in a pthread_t that is NULL, then we return the current
1113 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1114 * us doing it, if they just want their thread_id.
1116 PTHREAD_NOEXPORT_VARIANT
1118 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
1121 pthread_t self
= pthread_self();
1123 if (thread_id
== NULL
) {
1127 if (thread
== NULL
|| thread
== self
) {
1128 *thread_id
= self
->thread_id
;
1129 } else if (!_pthread_validate_thread_and_list_lock(thread
)) {
1132 if (thread
->thread_id
== 0) {
1135 *thread_id
= thread
->thread_id
;
1137 _PTHREAD_UNLOCK(_pthread_list_lock
);
1142 PTHREAD_NOEXPORT_VARIANT
1144 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1146 if (thread
== pthread_self()) {
1147 strlcpy(threadname
, thread
->pthread_name
, len
);
1151 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1155 strlcpy(threadname
, thread
->pthread_name
, len
);
1156 _PTHREAD_UNLOCK(_pthread_list_lock
);
1162 pthread_setname_np(const char *name
)
1165 pthread_t self
= pthread_self();
1172 /* protytype is in pthread_internals.h */
1173 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
1176 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
1178 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
1185 PTHREAD_ALWAYS_INLINE
1187 __pthread_add_thread(pthread_t t
, bool from_mach_thread
)
1189 if (from_mach_thread
) {
1190 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1192 _PTHREAD_LOCK(_pthread_list_lock
);
1195 TAILQ_INSERT_TAIL(&__pthread_head
, t
, tl_plist
);
1198 if (from_mach_thread
) {
1199 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1201 _PTHREAD_UNLOCK(_pthread_list_lock
);
1204 if (!from_mach_thread
) {
1205 // PR-26275485: Mach threads will likely crash trying to run
1206 // introspection code. Since the fall out from the introspection
1207 // code not seeing the injected thread is likely less than crashing
1208 // in the introspection code, just don't make the call.
1209 _pthread_introspection_thread_create(t
);
1213 PTHREAD_ALWAYS_INLINE
1215 __pthread_undo_add_thread(pthread_t t
, bool from_mach_thread
)
1217 if (from_mach_thread
) {
1218 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1220 _PTHREAD_LOCK(_pthread_list_lock
);
1223 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
);
1226 if (from_mach_thread
) {
1227 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1229 _PTHREAD_UNLOCK(_pthread_list_lock
);
1233 PTHREAD_ALWAYS_INLINE
1235 __pthread_started_thread(pthread_t t
)
1237 mach_port_t kport
= _pthread_kernel_thread(t
);
1238 if (os_slowpath(!MACH_PORT_VALID(kport
))) {
1239 PTHREAD_CLIENT_CRASH(kport
,
1240 "Unable to allocate thread port, possible port leak");
1242 _pthread_introspection_thread_start(t
);
1245 #define _PTHREAD_CREATE_NONE 0x0
1246 #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1247 #define _PTHREAD_CREATE_SUSPENDED 0x2
1250 _pthread_create(pthread_t
*thread
, const pthread_attr_t
*attrs
,
1251 void *(*start_routine
)(void *), void *arg
, unsigned int create_flags
)
1255 bool from_mach_thread
= (create_flags
& _PTHREAD_CREATE_FROM_MACH_THREAD
);
1257 if (attrs
== NULL
) {
1258 attrs
= &_pthread_attr_default
;
1259 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1263 unsigned int flags
= PTHREAD_START_CUSTOM
;
1264 if (attrs
->schedset
!= 0) {
1265 struct sched_param p
;
1266 _pthread_attr_get_schedparam(attrs
, &p
);
1267 flags
|= PTHREAD_START_SETSCHED
;
1268 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1269 flags
|= (p
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1270 } else if (attrs
->qosclass
!= 0) {
1271 flags
|= PTHREAD_START_QOSCLASS
;
1272 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1274 if (create_flags
& _PTHREAD_CREATE_SUSPENDED
) {
1275 flags
|= PTHREAD_START_SUSPENDED
;
1280 t
=_pthread_allocate(attrs
, &stack
);
1286 t
->fun
= start_routine
;
1287 __pthread_add_thread(t
, from_mach_thread
);
1289 if (__bsdthread_create(start_routine
, arg
, stack
, t
, flags
) ==
1291 if (errno
== EMFILE
) {
1292 PTHREAD_CLIENT_CRASH(0,
1293 "Unable to allocate thread port, possible port leak");
1295 __pthread_undo_add_thread(t
, from_mach_thread
);
1296 _pthread_deallocate(t
, from_mach_thread
);
1300 if (create_flags
& _PTHREAD_CREATE_SUSPENDED
) {
1301 _pthread_markcancel_if_canceled(t
, _pthread_kernel_thread(t
));
1304 // n.b. if a thread is created detached and exits, t will be invalid
1310 pthread_create(pthread_t
*thread
, const pthread_attr_t
*attr
,
1311 void *(*start_routine
)(void *), void *arg
)
1313 unsigned int flags
= _PTHREAD_CREATE_NONE
;
1314 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1318 pthread_create_from_mach_thread(pthread_t
*thread
, const pthread_attr_t
*attr
,
1319 void *(*start_routine
)(void *), void *arg
)
1321 unsigned int flags
= _PTHREAD_CREATE_FROM_MACH_THREAD
;
1322 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1325 #if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288
1326 /* Functions defined in machine-dependent files. */
1327 PTHREAD_NOEXPORT
void _pthread_setup_suspended(pthread_t th
, void (*f
)(pthread_t
), void *sp
);
1331 _pthread_suspended_body(pthread_t self
)
1333 _pthread_set_self(self
);
1334 __pthread_started_thread(self
);
1335 _pthread_exit(self
, (self
->fun
)(self
->arg
));
1339 _pthread_create_suspended_np(pthread_t
*thread
, const pthread_attr_t
*attrs
,
1340 void *(*start_routine
)(void *), void *arg
)
1344 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1346 if (attrs
== NULL
) {
1347 attrs
= &_pthread_attr_default
;
1348 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1352 t
= _pthread_allocate(attrs
, &stack
);
1357 if (thread_create(mach_task_self(), &kernel_thread
) != KERN_SUCCESS
) {
1358 _pthread_deallocate(t
, false);
1362 _pthread_set_kernel_thread(t
, kernel_thread
);
1363 (void)pthread_setschedparam_internal(t
, kernel_thread
,
1364 t
->tl_policy
, &t
->tl_param
);
1369 t
->fun
= start_routine
;
1370 t
->cancel_state
|= _PTHREAD_CANCEL_INITIALIZED
;
1371 __pthread_add_thread(t
, false);
1373 // Set up a suspended thread.
1374 _pthread_setup_suspended(t
, _pthread_suspended_body
, stack
);
1378 #endif // !defined(__OPEN_SOURCE__) && TARGET_OS_OSX
1381 pthread_create_suspended_np(pthread_t
*thread
, const pthread_attr_t
*attr
,
1382 void *(*start_routine
)(void *), void *arg
)
1384 #if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288
1385 if (_os_xbs_chrooted
) {
1386 return _pthread_create_suspended_np(thread
, attr
, start_routine
, arg
);
1389 unsigned int flags
= _PTHREAD_CREATE_SUSPENDED
;
1390 return _pthread_create(thread
, attr
, start_routine
, arg
, flags
);
1394 PTHREAD_NOEXPORT_VARIANT
1396 pthread_detach(pthread_t thread
)
1399 bool join
= false, wake
= false;
1401 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1405 if (!thread
->tl_joinable
) {
1407 } else if (thread
->tl_exit_gate
== MACH_PORT_DEAD
) {
1408 // Join the thread if it's already exited.
1411 thread
->tl_joinable
= false; // _pthread_joiner_prepost_wake uses this
1412 if (thread
->tl_join_ctx
) {
1413 (void)_pthread_joiner_prepost_wake(thread
);
1417 _PTHREAD_UNLOCK(_pthread_list_lock
);
1420 pthread_join(thread
, NULL
);
1422 _pthread_joiner_wake(thread
);
1427 PTHREAD_NOEXPORT_VARIANT
1429 pthread_kill(pthread_t th
, int sig
)
1431 if (sig
< 0 || sig
> NSIG
) {
1435 mach_port_t kport
= MACH_PORT_NULL
;
1436 if (!_pthread_is_valid(th
, &kport
)) {
1437 return ESRCH
; // Not a valid thread.
1440 // Don't signal workqueue threads.
1441 if (th
->wqthread
!= 0 && th
->wqkillset
== 0) {
1445 int ret
= __pthread_kill(kport
, sig
);
1453 PTHREAD_NOEXPORT_VARIANT
1455 __pthread_workqueue_setkill(int enable
)
1457 pthread_t self
= pthread_self();
1459 _PTHREAD_LOCK(self
->lock
);
1460 self
->wqkillset
= enable
? 1 : 0;
1461 _PTHREAD_UNLOCK(self
->lock
);
1467 /* For compatibility... */
1472 return pthread_self();
1476 * Terminate a thread.
1478 extern int __disable_threadsignal(int);
1482 _pthread_exit(pthread_t self
, void *exit_value
)
1484 struct __darwin_pthread_handler_rec
*handler
;
1486 // Disable signal delivery while we clean up
1487 __disable_threadsignal(1);
1489 // Set cancel state to disable and type to deferred
1490 _pthread_setcancelstate_exit(self
, exit_value
);
1492 while ((handler
= self
->__cleanup_stack
) != 0) {
1493 (handler
->__routine
)(handler
->__arg
);
1494 self
->__cleanup_stack
= handler
->__next
;
1496 _pthread_tsd_cleanup(self
);
1498 // Clear per-thread semaphore cache
1499 os_put_cached_semaphore(SEMAPHORE_NULL
);
1501 _pthread_terminate_invoke(self
, exit_value
);
1505 pthread_exit(void *exit_value
)
1507 pthread_t self
= pthread_self();
1508 if (os_unlikely(self
->wqthread
)) {
1509 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1510 "not created by pthread_create()");
1512 _pthread_exit(self
, exit_value
);
1516 PTHREAD_NOEXPORT_VARIANT
1518 pthread_getschedparam(pthread_t thread
, int *policy
, struct sched_param
*param
)
1520 if (!_pthread_validate_thread_and_list_lock(thread
)) {
1524 if (policy
) *policy
= thread
->tl_policy
;
1525 if (param
) *param
= thread
->tl_param
;
1526 _PTHREAD_UNLOCK(_pthread_list_lock
);
1532 PTHREAD_ALWAYS_INLINE
1534 pthread_setschedparam_internal(pthread_t thread
, mach_port_t kport
, int policy
,
1535 const struct sched_param
*param
)
1537 policy_base_data_t bases
;
1539 mach_msg_type_number_t count
;
1544 bases
.ts
.base_priority
= param
->sched_priority
;
1545 base
= (policy_base_t
)&bases
.ts
;
1546 count
= POLICY_TIMESHARE_BASE_COUNT
;
1549 bases
.fifo
.base_priority
= param
->sched_priority
;
1550 base
= (policy_base_t
)&bases
.fifo
;
1551 count
= POLICY_FIFO_BASE_COUNT
;
1554 bases
.rr
.base_priority
= param
->sched_priority
;
1555 /* quantum isn't public yet */
1556 bases
.rr
.quantum
= param
->quantum
;
1557 base
= (policy_base_t
)&bases
.rr
;
1558 count
= POLICY_RR_BASE_COUNT
;
1563 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1564 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1567 PTHREAD_NOEXPORT_VARIANT
1569 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1571 mach_port_t kport
= MACH_PORT_NULL
;
1574 // since the main thread will not get de-allocated from underneath us
1575 if (t
== pthread_self() || t
== main_thread()) {
1576 kport
= _pthread_kernel_thread(t
);
1579 if (!_pthread_is_valid(t
, &kport
)) {
1584 int res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1585 if (res
) return res
;
1588 _PTHREAD_LOCK(_pthread_list_lock
);
1589 } else if (!_pthread_validate_thread_and_list_lock(t
)) {
1590 // Ensure the thread is still valid.
1594 t
->tl_policy
= policy
;
1595 t
->tl_param
= *param
;
1596 _PTHREAD_UNLOCK(_pthread_list_lock
);
1602 sched_get_priority_min(int policy
)
1604 return default_priority
- 16;
1608 sched_get_priority_max(int policy
)
1610 return default_priority
+ 16;
1614 pthread_equal(pthread_t t1
, pthread_t t2
)
1620 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1621 * then _pthread_set_self won't be bound when secondary threads try and start up.
1625 _pthread_set_self(pthread_t p
)
1628 if (os_likely(!p
)) {
1629 return _pthread_set_self_dyld();
1631 #endif // VARIANT_DYLD
1632 _pthread_set_self_internal(p
, true);
1636 // _pthread_set_self_dyld is noinline+noexport to allow the option for
1637 // static libsyscall to adopt this as the entry point from mach_init if
1639 PTHREAD_NOINLINE PTHREAD_NOEXPORT
1641 _pthread_set_self_dyld(void)
1643 pthread_t p
= main_thread();
1644 p
->thread_id
= __thread_selfid();
1646 if (os_unlikely(p
->thread_id
== -1ull)) {
1647 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1650 // <rdar://problem/40930651> pthread self and the errno address are the
1651 // bare minimium TSD setup that dyld needs to actually function. Without
1652 // this, TSD access will fail and crash if it uses bits of Libc prior to
1653 // library initialization. __pthread_init will finish the initialization
1654 // during library init.
1655 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
;
1656 p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
;
1657 _thread_set_tsd_base(&p
->tsd
[0]);
1659 #endif // VARIANT_DYLD
1661 PTHREAD_ALWAYS_INLINE
1663 _pthread_set_self_internal(pthread_t p
, bool needs_tsd_base_set
)
1665 p
->thread_id
= __thread_selfid();
1667 if (os_unlikely(p
->thread_id
== -1ull)) {
1668 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1671 if (needs_tsd_base_set
) {
1672 _thread_set_tsd_base(&p
->tsd
[0]);
1677 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1678 PTHREAD_ALWAYS_INLINE
1680 _os_once_acquire(os_once_t
*predicate
, void *context
, os_function_t function
)
1682 if (OS_EXPECT(os_atomic_load(predicate
, acquire
), ~0l) != ~0l) {
1683 _os_once(predicate
, context
, function
);
1684 OS_COMPILER_CAN_ASSUME(*predicate
== ~0l);
1688 struct _pthread_once_context
{
1689 pthread_once_t
*pthread_once
;
1690 void (*routine
)(void);
1694 __pthread_once_handler(void *context
)
1696 struct _pthread_once_context
*ctx
= context
;
1697 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1699 pthread_cleanup_pop(0);
1700 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1703 PTHREAD_NOEXPORT_VARIANT
1705 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1707 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1709 _os_once_acquire(&once_control
->once
, &ctx
, __pthread_once_handler
);
1710 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1716 pthread_getconcurrency(void)
1718 return pthread_concurrency
;
1722 pthread_setconcurrency(int new_level
)
1724 if (new_level
< 0) {
1727 pthread_concurrency
= new_level
;
1731 #if !defined(VARIANT_STATIC)
1735 if (_pthread_malloc
) {
1736 return _pthread_malloc(sz
);
1745 if (_pthread_free
) {
1749 #endif // VARIANT_STATIC
1752 * Perform package initialization - called automatically when application starts
1754 struct ProgramVars
; /* forward reference */
1757 static unsigned long
1758 _pthread_strtoul(const char *p
, const char **endptr
, int base
)
1762 // Expect hex string starting with "0x"
1763 if ((base
== 16 || base
== 0) && p
&& p
[0] == '0' && p
[1] == 'x') {
1767 if ('0' <= c
&& c
<= '9') {
1768 val
= (val
<< 4) + (c
- '0');
1769 } else if ('a' <= c
&& c
<= 'f') {
1770 val
= (val
<< 4) + (c
- 'a' + 10);
1771 } else if ('A' <= c
&& c
<= 'F') {
1772 val
= (val
<< 4) + (c
- 'A' + 10);
1780 *endptr
= (char *)p
;
1785 parse_main_stack_params(const char *apple
[],
1791 const char *p
= _simple_getenv(apple
, "main_stack");
1797 *stackaddr
= _pthread_strtoul(s
, &s
, 16);
1798 if (*s
!= ',') goto out
;
1800 *stacksize
= _pthread_strtoul(s
+ 1, &s
, 16);
1801 if (*s
!= ',') goto out
;
1803 *allocaddr
= _pthread_strtoul(s
+ 1, &s
, 16);
1804 if (*s
!= ',') goto out
;
1806 *allocsize
= _pthread_strtoul(s
+ 1, &s
, 16);
1807 if (*s
!= ',' && *s
!= 0) goto out
;
1811 bzero((char *)p
, strlen(p
));
1816 parse_ptr_munge_params(const char *envp
[], const char *apple
[])
1819 p
= _simple_getenv(apple
, "ptr_munge");
1821 _pthread_ptr_munge_token
= _pthread_strtoul(p
, &s
, 16);
1822 bzero((char *)p
, strlen(p
));
1825 if (_pthread_ptr_munge_token
) return;
1827 p
= _simple_getenv(envp
, "PTHREAD_PTR_MUNGE_TOKEN");
1829 uintptr_t t
= _pthread_strtoul(p
, &s
, 16);
1830 if (t
) _pthread_ptr_munge_token
= t
;
1835 __pthread_init(const struct _libpthread_functions
*pthread_funcs
,
1836 const char *envp
[], const char *apple
[],
1837 const struct ProgramVars
*vars __unused
)
1839 // Save our provided pushed-down functions
1840 if (pthread_funcs
) {
1841 exitf
= pthread_funcs
->exit
;
1843 if (pthread_funcs
->version
>= 2) {
1844 _pthread_malloc
= pthread_funcs
->malloc
;
1845 _pthread_free
= pthread_funcs
->free
;
1850 // Get host information
1854 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1855 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1856 host_priority_info_data_t priority_info
;
1857 host_t host
= mach_host_self();
1858 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1859 if (kr
!= KERN_SUCCESS
) {
1860 PTHREAD_INTERNAL_CRASH(kr
, "host_info() failed");
1862 default_priority
= (uint8_t)priority_info
.user_priority
;
1863 min_priority
= (uint8_t)priority_info
.minimum_priority
;
1864 max_priority
= (uint8_t)priority_info
.maximum_priority
;
1866 mach_port_deallocate(mach_task_self(), host
);
1869 // Set up the main thread structure
1872 // Get the address and size of the main thread's stack from the kernel.
1873 void *stackaddr
= 0;
1874 size_t stacksize
= 0;
1875 void *allocaddr
= 0;
1876 size_t allocsize
= 0;
1877 if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) ||
1878 stackaddr
== NULL
|| stacksize
== 0) {
1879 // Fall back to previous bevhaior.
1880 size_t len
= sizeof(stackaddr
);
1881 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1882 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1883 #if defined(__LP64__)
1884 stackaddr
= (void *)USRSTACK64
;
1886 stackaddr
= (void *)USRSTACK
;
1889 stacksize
= DFLSSIZ
;
1894 // Initialize random ptr_munge token from the kernel.
1895 parse_ptr_munge_params(envp
, apple
);
1897 // libpthread.a in dyld "owns" the main thread structure itself and sets
1898 // up the tsd to point to it. So take the pthread_self() from there
1899 // and make it our main thread point.
1900 pthread_t thread
= (pthread_t
)_pthread_getspecific_direct(
1901 _PTHREAD_TSD_SLOT_PTHREAD_SELF
);
1902 PTHREAD_ASSERT(thread
);
1903 _main_thread_ptr
= thread
;
1905 PTHREAD_ASSERT(_pthread_attr_default
.qosclass
==
1906 _pthread_default_priority(0));
1907 _pthread_struct_init(thread
, &_pthread_attr_default
,
1908 stackaddr
, stacksize
, allocaddr
, allocsize
);
1909 thread
->tl_joinable
= true;
1911 // Finish initialization with common code that is reinvoked on the
1912 // child side of a fork.
1914 // Finishes initialization of main thread attributes.
1915 // Initializes the thread list and add the main thread.
1916 // Calls _pthread_set_self() to prepare the main thread for execution.
1917 _pthread_main_thread_init(thread
);
1919 struct _pthread_registration_data registration_data
;
1920 // Set up kernel entry points with __bsdthread_register.
1921 _pthread_bsdthread_init(®istration_data
);
1923 // Have pthread_key and pthread_mutex do their init envvar checks.
1924 _pthread_key_global_init(envp
);
1925 _pthread_mutex_global_init(envp
, ®istration_data
);
1927 #if PTHREAD_DEBUG_LOG
1928 _SIMPLE_STRING path
= _simple_salloc();
1929 _simple_sprintf(path
, "/var/tmp/libpthread.%d.log", getpid());
1930 _pthread_debuglog
= open(_simple_string(path
),
1931 O_WRONLY
| O_APPEND
| O_CREAT
| O_NOFOLLOW
| O_CLOEXEC
, 0666);
1932 _simple_sfree(path
);
1933 _pthread_debugstart
= mach_absolute_time();
1938 #endif // !VARIANT_DYLD
1940 PTHREAD_NOEXPORT
void
1941 _pthread_main_thread_init(pthread_t p
)
1943 TAILQ_INIT(&__pthread_head
);
1944 _PTHREAD_LOCK_INIT(_pthread_list_lock
);
1945 _PTHREAD_LOCK_INIT(p
->lock
);
1946 _pthread_set_kernel_thread(p
, mach_thread_self());
1947 _pthread_set_reply_port(p
, mach_reply_port());
1948 p
->__cleanup_stack
= NULL
;
1949 p
->tl_join_ctx
= NULL
;
1950 p
->tl_exit_gate
= MACH_PORT_NULL
;
1951 p
->tsd
[__TSD_SEMAPHORE_CACHE
] = (void*)SEMAPHORE_NULL
;
1952 p
->tsd
[__TSD_MACH_SPECIAL_REPLY
] = 0;
1953 p
->cancel_state
|= _PTHREAD_CANCEL_INITIALIZED
;
1955 // Initialize the list of threads with the new main thread.
1956 TAILQ_INSERT_HEAD(&__pthread_head
, p
, tl_plist
);
1959 _pthread_introspection_thread_start(p
);
1977 pthread_yield_np(void)
1984 // Libsystem knows about this symbol and exports it to libsyscall
1985 PTHREAD_NOEXPORT_VARIANT
1987 _pthread_clear_qos_tsd(mach_port_t thread_port
)
1989 if (thread_port
== MACH_PORT_NULL
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) {
1990 /* Clear the current thread's TSD, that can be done inline. */
1991 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
1992 _pthread_unspecified_priority());
1996 _PTHREAD_LOCK(_pthread_list_lock
);
1998 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) {
1999 mach_port_t kp
= _pthread_kernel_thread(p
);
2000 if (thread_port
== kp
) {
2001 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] =
2002 _pthread_unspecified_priority();
2007 _PTHREAD_UNLOCK(_pthread_list_lock
);
2012 #pragma mark pthread/stack_np.h public interface
2015 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
2016 typedef uintptr_t frame_data_addr_t
;
2019 frame_data_addr_t frame_addr_next
;
2020 frame_data_addr_t ret_addr
;
2023 #error ********** Unimplemented architecture
2027 pthread_stack_frame_decode_np(uintptr_t frame_addr
, uintptr_t *return_addr
)
2029 struct frame_data
*frame
= (struct frame_data
*)frame_addr
;
2032 *return_addr
= (uintptr_t)frame
->ret_addr
;
2035 return (uintptr_t)frame
->frame_addr_next
;
2039 #pragma mark pthread workqueue support routines
2042 PTHREAD_NOEXPORT
void
2043 _pthread_bsdthread_init(struct _pthread_registration_data
*data
)
2045 bzero(data
, sizeof(*data
));
2046 data
->version
= sizeof(struct _pthread_registration_data
);
2047 data
->dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *);
2048 data
->return_to_kernel_offset
= __TSD_RETURN_TO_KERNEL
* sizeof(void *);
2049 data
->tsd_offset
= offsetof(struct _pthread
, tsd
);
2050 data
->mach_thread_self_offset
= __TSD_MACH_THREAD_SELF
* sizeof(void *);
2052 int rv
= __bsdthread_register(thread_start
, start_wqthread
, (int)PTHREAD_SIZE
,
2053 (void*)data
, (uintptr_t)sizeof(*data
), data
->dispatch_queue_offset
);
2056 int required_features
=
2057 PTHREAD_FEATURE_FINEPRIO
|
2058 PTHREAD_FEATURE_BSDTHREADCTL
|
2059 PTHREAD_FEATURE_SETSELF
|
2060 PTHREAD_FEATURE_QOS_MAINTENANCE
|
2061 PTHREAD_FEATURE_QOS_DEFAULT
;
2062 if ((rv
& required_features
) != required_features
) {
2063 PTHREAD_INTERNAL_CRASH(rv
, "Missing required kernel support");
2065 __pthread_supported_features
= rv
;
2069 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2070 * effect of resetting the child's stack_addr_hint before bailing out) and
2071 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2072 * the latter as fatal.
2074 * <rdar://problem/36451838>
2077 pthread_priority_t main_qos
= (pthread_priority_t
)data
->main_qos
;
2079 if (_pthread_priority_thread_qos(main_qos
) != THREAD_QOS_UNSPECIFIED
) {
2080 _pthread_set_main_qos(main_qos
);
2081 main_thread()->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = main_qos
;
2084 if (data
->stack_addr_hint
) {
2085 __pthread_stack_hint
= data
->stack_addr_hint
;
2088 if (__libdispatch_workerfunction
!= NULL
) {
2089 // prepare the kernel for workq action
2090 (void)__workq_open();
2096 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp
)
2098 /* Old thread priorities are inverted from where we have them in
2099 * the new flexible priority scheme. The highest priority is zero,
2100 * up to 2, with background at 3.
2102 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2103 bool overcommit
= (pp
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
);
2104 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2106 switch (_pthread_priority_thread_qos(pp
)) {
2107 case THREAD_QOS_USER_INITIATED
:
2108 return (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
2109 case THREAD_QOS_LEGACY
:
2110 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2111 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2112 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2114 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
,
2115 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED
, 0, 0));
2116 return (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
2117 case THREAD_QOS_UTILITY
:
2118 return (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
2119 case THREAD_QOS_BACKGROUND
:
2120 return (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
2122 PTHREAD_INTERNAL_CRASH(pp
, "Invalid pthread priority for the legacy interface");
2125 PTHREAD_ALWAYS_INLINE
2126 static inline pthread_priority_t
2127 _pthread_wqthread_priority(int flags
)
2129 pthread_priority_t pp
= 0;
2132 if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2133 pp
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2135 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
) {
2136 return pp
| _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
2139 if (flags
& WQ_FLAG_THREAD_OVERCOMMIT
) {
2140 pp
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2142 if (flags
& WQ_FLAG_THREAD_PRIO_QOS
) {
2143 qos
= (thread_qos_t
)(flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2144 pp
= _pthread_priority_make_from_thread_qos(qos
, 0, pp
);
2145 } else if (flags
& WQ_FLAG_THREAD_PRIO_SCHED
) {
2146 pp
|= _PTHREAD_PRIORITY_SCHED_PRI_MASK
;
2147 pp
|= (flags
& WQ_FLAG_THREAD_PRIO_MASK
);
2149 PTHREAD_INTERNAL_CRASH(flags
, "Missing priority");
2156 _pthread_wqthread_setup(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2159 void *stackaddr
= self
;
2160 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
2162 _pthread_struct_init(self
, &_pthread_attr_default
, stackaddr
, stacksize
,
2163 PTHREAD_ALLOCADDR(stackaddr
, stacksize
),
2164 PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
2166 _pthread_set_kernel_thread(self
, kport
);
2168 self
->wqkillset
= 0;
2169 self
->tl_joinable
= false;
2170 self
->cancel_state
|= _PTHREAD_CANCEL_INITIALIZED
;
2172 // Update the running thread count and set childrun bit.
2173 bool thread_tsd_base_set
= (bool)(flags
& WQ_FLAG_THREAD_TSD_BASE_SET
);
2174 _pthread_set_self_internal(self
, !thread_tsd_base_set
);
2175 __pthread_add_thread(self
, false);
2176 __pthread_started_thread(self
);
2179 PTHREAD_NORETURN PTHREAD_NOINLINE
2181 _pthread_wqthread_exit(pthread_t self
)
2183 pthread_priority_t pp
;
2186 pp
= (pthread_priority_t
)self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
];
2187 qos
= _pthread_priority_thread_qos(pp
);
2188 if (qos
== THREAD_QOS_UNSPECIFIED
|| qos
> WORKQ_THREAD_QOS_CLEANUP
) {
2189 // Reset QoS to something low for the cleanup process
2190 pp
= _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP
, 0, 0);
2191 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2194 _pthread_exit(self
, NULL
);
2197 // workqueue entry point from kernel
2199 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
,
2200 void *keventlist
, int flags
, int nkevents
)
2202 if ((flags
& WQ_FLAG_THREAD_REUSE
) == 0) {
2203 _pthread_wqthread_setup(self
, kport
, stacklowaddr
, flags
);
2206 pthread_priority_t pp
;
2207 if (flags
& WQ_FLAG_THREAD_OUTSIDEQOS
) {
2208 self
->wqoutsideqos
= 1;
2209 pp
= _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY
, 0,
2210 _PTHREAD_PRIORITY_FALLBACK_FLAG
);
2212 self
->wqoutsideqos
= 0;
2213 pp
= _pthread_wqthread_priority(flags
);
2216 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
;
2218 // avoid spills on the stack hard to keep used stack space minimal
2219 if (nkevents
== WORKQ_EXIT_THREAD_NKEVENT
) {
2221 } else if (flags
& WQ_FLAG_THREAD_WORKLOOP
) {
2222 self
->fun
= (void *(*)(void*))__libdispatch_workloopfunction
;
2223 self
->wq_retop
= WQOPS_THREAD_WORKLOOP_RETURN
;
2224 self
->wq_kqid_ptr
= ((kqueue_id_t
*)keventlist
- 1);
2225 self
->arg
= keventlist
;
2226 self
->wq_nevents
= nkevents
;
2227 } else if (flags
& WQ_FLAG_THREAD_KEVENT
) {
2228 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
2229 self
->wq_retop
= WQOPS_THREAD_KEVENT_RETURN
;
2230 self
->wq_kqid_ptr
= NULL
;
2231 self
->arg
= keventlist
;
2232 self
->wq_nevents
= nkevents
;
2234 self
->fun
= (void *(*)(void*))__libdispatch_workerfunction
;
2235 self
->wq_retop
= WQOPS_THREAD_RETURN
;
2236 self
->wq_kqid_ptr
= NULL
;
2237 self
->arg
= (void *)(uintptr_t)pp
;
2238 self
->wq_nevents
= 0;
2239 if (os_likely(__workq_newapi
)) {
2240 (*__libdispatch_workerfunction
)(pp
);
2242 _pthread_wqthread_legacy_worker_wrap(pp
);
2248 kevent_errors_retry
:
2249 if (self
->wq_retop
== WQOPS_THREAD_WORKLOOP_RETURN
) {
2250 ((pthread_workqueue_function_workloop_t
)self
->fun
)
2251 (self
->wq_kqid_ptr
, &self
->arg
, &self
->wq_nevents
);
2253 ((pthread_workqueue_function_kevent_t
)self
->fun
)
2254 (&self
->arg
, &self
->wq_nevents
);
2256 int rc
= __workq_kernreturn(self
->wq_retop
, self
->arg
, self
->wq_nevents
, 0);
2257 if (os_unlikely(rc
> 0)) {
2258 self
->wq_nevents
= rc
;
2259 goto kevent_errors_retry
;
2261 if (os_unlikely(rc
< 0)) {
2262 PTHREAD_INTERNAL_CRASH(self
->err_no
, "kevent (workloop) failed");
2266 __workq_kernreturn(self
->wq_retop
, NULL
, 0, 0);
2270 _pthread_wqthread_exit(self
);
2274 #pragma mark pthread workqueue API for libdispatch
2277 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN
== WQ_KEVENT_LIST_LEN
,
2278 "Kernel and userland should agree on the event list size");
2281 pthread_workqueue_setdispatchoffset_np(int offset
)
2283 __libdispatch_offset
= offset
;
2287 pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func
,
2288 pthread_workqueue_function_kevent_t kevent_func
,
2289 pthread_workqueue_function_workloop_t workloop_func
)
2292 if (__libdispatch_workerfunction
== NULL
) {
2293 // Check whether the kernel supports new SPIs
2294 res
= __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, __libdispatch_offset
, kevent_func
!= NULL
? 0x01 : 0x00);
2298 __libdispatch_workerfunction
= queue_func
;
2299 __libdispatch_keventfunction
= kevent_func
;
2300 __libdispatch_workloopfunction
= workloop_func
;
2302 // Prepare the kernel for workq action
2303 (void)__workq_open();
2304 if (__is_threaded
== 0) {
2313 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func
,
2314 pthread_workqueue_function_kevent_t kevent_func
,
2315 pthread_workqueue_function_workloop_t workloop_func
,
2316 int offset
, int flags
)
2322 __workq_newapi
= true;
2323 __libdispatch_offset
= offset
;
2325 int rv
= pthread_workqueue_setdispatch_with_workloop_np(queue_func
, kevent_func
, workloop_func
);
2330 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
,
2331 pthread_workqueue_function_kevent_t kevent_func
,
2332 int offset
, int flags
)
2334 return _pthread_workqueue_init_with_workloop(queue_func
, kevent_func
, NULL
, offset
, flags
);
2338 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2340 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2344 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2346 return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t
)worker_func
, NULL
, NULL
);
2350 _pthread_workqueue_supported(void)
2352 if (os_unlikely(!__pthread_supported_features
)) {
2353 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2356 return __pthread_supported_features
;
2360 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2364 // Cannot add threads without a worker function registered.
2365 if (__libdispatch_workerfunction
== NULL
) {
2369 pthread_priority_t kp
= 0;
2370 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIO_MASK
;
2373 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2374 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2377 #pragma clang diagnostic push
2378 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2379 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2380 #pragma clang diagnostic pop
2382 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2390 _pthread_workqueue_should_narrow(pthread_priority_t pri
)
2392 int res
= __workq_kernreturn(WQOPS_SHOULD_NARROW
, NULL
, (int)pri
, 0);
2400 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2404 if (__libdispatch_workerfunction
== NULL
) {
2409 // <rdar://problem/37687655> Legacy simulators fail to boot
2411 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2412 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2413 // validates and rejects.
2415 // As a workaround, forcefully unset this bit that cannot be set here
2417 priority
&= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG
;
2420 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2428 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2430 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2438 _pthread_workloop_create(uint64_t workloop_id
, uint64_t options
, pthread_attr_t
*attr
)
2440 struct kqueue_workloop_params params
= {
2441 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2442 .kqwlp_id
= workloop_id
,
2450 if (attr
->schedset
) {
2451 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_PRI
;
2452 params
.kqwlp_sched_pri
= attr
->param
.sched_priority
;
2455 if (attr
->policyset
) {
2456 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_SCHED_POL
;
2457 params
.kqwlp_sched_pol
= attr
->policy
;
2460 if (attr
->cpupercentset
) {
2461 params
.kqwlp_flags
|= KQ_WORKLOOP_CREATE_CPU_PERCENT
;
2462 params
.kqwlp_cpu_percent
= attr
->cpupercent
;
2463 params
.kqwlp_cpu_refillms
= attr
->refillms
;
2466 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE
, 0, ¶ms
,
2475 _pthread_workloop_destroy(uint64_t workloop_id
)
2477 struct kqueue_workloop_params params
= {
2478 .kqwlp_version
= sizeof(struct kqueue_workloop_params
),
2479 .kqwlp_id
= workloop_id
,
2482 int res
= __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY
, 0, ¶ms
,
2491 #pragma mark Introspection SPI for libpthread.
2494 static pthread_introspection_hook_t _pthread_introspection_hook
;
2496 pthread_introspection_hook_t
2497 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2499 pthread_introspection_hook_t prev
;
2500 prev
= _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook
, hook
);
2506 _pthread_introspection_hook_callout_thread_create(pthread_t t
)
2508 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2513 _pthread_introspection_thread_create(pthread_t t
)
2515 if (os_fastpath(!_pthread_introspection_hook
)) return;
2516 _pthread_introspection_hook_callout_thread_create(t
);
2521 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2525 if (t
== main_thread()) {
2526 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2527 freesize
= stacksize
+ t
->guardsize
;
2528 freeaddr
= t
->stackaddr
- freesize
;
2530 freesize
= t
->freesize
- PTHREAD_SIZE
;
2531 freeaddr
= t
->freeaddr
;
2533 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2534 freeaddr
, freesize
);
2538 _pthread_introspection_thread_start(pthread_t t
)
2540 if (os_fastpath(!_pthread_introspection_hook
)) return;
2541 _pthread_introspection_hook_callout_thread_start(t
);
2546 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
)
2550 if (t
== main_thread()) {
2551 size_t stacksize
= t
->stackaddr
- t
->stackbottom
;
2552 freesize
= stacksize
+ t
->guardsize
;
2553 freeaddr
= t
->stackaddr
- freesize
;
2555 freesize
= t
->freesize
- PTHREAD_SIZE
;
2556 freeaddr
= t
->freeaddr
;
2558 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2559 freeaddr
, freesize
);
2563 _pthread_introspection_thread_terminate(pthread_t t
)
2565 if (os_fastpath(!_pthread_introspection_hook
)) return;
2566 _pthread_introspection_hook_callout_thread_terminate(t
);
2571 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2573 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2578 _pthread_introspection_thread_destroy(pthread_t t
)
2580 if (os_fastpath(!_pthread_introspection_hook
)) return;
2581 _pthread_introspection_hook_callout_thread_destroy(t
);
2584 #pragma mark libplatform shims
2586 #include <platform/string.h>
2588 // pthread_setup initializes large structures to 0,
2589 // which the compiler turns into a library call to memset.
2591 // To avoid linking against Libc, provide a simple wrapper
2592 // that calls through to the libplatform primitives
2597 memset(void *b
, int c
, size_t len
)
2599 return _platform_memset(b
, c
, len
);
2605 bzero(void *s
, size_t n
)
2607 _platform_bzero(s
, n
);
2613 memcpy(void* a
, const void* b
, unsigned long s
)
2615 return _platform_memmove(a
, b
, s
);