2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
54 #include "workqueue_private.h"
55 #include "introspection_private.h"
56 #include "qos_private.h"
57 #include "tsd_private.h"
63 #include <mach/mach_init.h>
64 #include <mach/mach_vm.h>
66 #include <sys/resource.h>
67 #include <sys/sysctl.h>
68 #include <sys/queue.h>
70 #include <machine/vmparam.h>
71 #define __APPLE_API_PRIVATE
72 #include <machine/cpu_capabilities.h>
73 #include <libkern/OSAtomic.h>
76 #include <platform/string.h>
77 #include <platform/compat.h>
79 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
,
80 void *newp
, size_t newlen
);
81 extern void __exit(int) __attribute__((noreturn
));
83 static void (*exitf
)(int) = __exit
;
84 __private_extern__
void* (*_pthread_malloc
)(size_t) = NULL
;
85 __private_extern__
void (*_pthread_free
)(void *) = NULL
;
91 // This global should be used (carefully) by anyone needing to know if a
92 // pthread (other than the main thread) has been created.
93 int __is_threaded
= 0;
95 int __unix_conforming
= 0;
97 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
98 // list, and the parentcheck, childrun and childexit flags of the pthread
99 // structure. Externally imported by pthread_cancelable.c.
100 __private_extern__ _pthread_lock _pthread_list_lock
= _PTHREAD_LOCK_INITIALIZER
;
101 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
102 static int _pthread_count
= 1;
104 #if PTHREAD_LAYOUT_SPI
106 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
108 .plo_pthread_tsd_base_offset
= offsetof(struct _pthread
, tsd
),
109 .plo_pthread_tsd_base_address_offset
= 0,
110 .plo_pthread_tsd_entry_size
= sizeof(((struct _pthread
*)NULL
)->tsd
[0]),
113 #endif // PTHREAD_LAYOUT_SPI
119 // Mach message notification that a thread needs to be recycled.
120 typedef struct _pthread_reap_msg_t
{
121 mach_msg_header_t header
;
123 mach_msg_trailer_t trailer
;
124 } pthread_reap_msg_t
;
127 * The pthread may be offset into a page. In that event, by contract
128 * with the kernel, the allocation will extend PTHREAD_SIZE from the
129 * start of the next page. There's also one page worth of allocation
130 * below stacksize for the guard page. <rdar://problem/19941744>
132 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
133 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
134 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
136 static pthread_attr_t _pthread_attr_default
= {0};
138 // The main thread's pthread_t
139 static struct _pthread _thread
__attribute__((aligned(64))) = {0};
141 static int default_priority
;
142 static int max_priority
;
143 static int min_priority
;
144 static int pthread_concurrency
;
146 // work queue support data
147 static void (*__libdispatch_workerfunction
)(pthread_priority_t
) = NULL
;
148 static void (*__libdispatch_keventfunction
)(void **events
, int *nevents
) = NULL
;
149 static int __libdispatch_offset
;
151 // supported feature set
152 int __pthread_supported_features
;
155 // Function prototypes
158 // pthread primitives
159 static int _pthread_allocate(pthread_t
*thread
, const pthread_attr_t
*attrs
, void **stack
);
160 static int _pthread_deallocate(pthread_t t
);
162 static void _pthread_terminate(pthread_t t
);
164 static void _pthread_struct_init(pthread_t t
,
165 const pthread_attr_t
*attrs
,
171 extern void _pthread_set_self(pthread_t
);
172 static void _pthread_set_self_internal(pthread_t
, bool needs_tsd_base_set
);
174 static void _pthread_dealloc_reply_port(pthread_t t
);
176 static inline void __pthread_add_thread(pthread_t t
, bool parent
, bool from_mach_thread
);
177 static inline int __pthread_remove_thread(pthread_t t
, bool child
, bool *should_exit
);
179 static int _pthread_find_thread(pthread_t thread
);
181 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
182 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
184 static inline void _pthread_introspection_thread_create(pthread_t t
, bool destroy
);
185 static inline void _pthread_introspection_thread_start(pthread_t t
);
186 static inline void _pthread_introspection_thread_terminate(pthread_t t
, void *freeaddr
, size_t freesize
, bool destroy
);
187 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
189 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread
190 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start
192 void pthread_workqueue_atfork_child(void);
194 static bool __workq_newapi
;
196 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
197 * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
198 * from old API requests to the new kext conventions.
200 #define WORKQUEUE_OVERCOMMIT 0x10000
203 * Flags filed passed to bsdthread_create and back in pthread_start
204 31 <---------------------------------> 0
205 _________________________________________
206 | flags(8) | policy(8) | importance(16) |
207 -----------------------------------------
210 #define PTHREAD_START_CUSTOM 0x01000000
211 #define PTHREAD_START_SETSCHED 0x02000000
212 #define PTHREAD_START_DETACHED 0x04000000
213 #define PTHREAD_START_QOSCLASS 0x08000000
214 #define PTHREAD_START_TSD_BASE_SET 0x10000000
215 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
216 #define PTHREAD_START_POLICY_BITSHIFT 16
217 #define PTHREAD_START_POLICY_MASK 0xff
218 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
220 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
221 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
222 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
223 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
224 extern __uint64_t
__thread_selfid( void );
225 extern int __pthread_canceled(int);
226 extern int __pthread_kill(mach_port_t
, int);
228 extern int __workq_open(void);
229 extern int __workq_kernreturn(int, void *, int, int);
231 #if defined(__i386__) || defined(__x86_64__)
232 static const mach_vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
234 #error no PTHREAD_STACK_HINT for this architecture
237 // Check that offsets of _PTHREAD_STRUCT_DIRECT_*_OFFSET values hasn't changed
238 _Static_assert(offsetof(struct _pthread
, tsd
) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
239 == offsetof(struct _pthread
, thread_id
),
240 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
242 // Allocate a thread structure, stack and guard page.
244 // The thread structure may optionally be placed in the same allocation as the
245 // stack, residing above the top of the stack. This cannot be done if a
246 // custom stack address is provided.
248 // Similarly the guard page cannot be allocated if a custom stack address is
251 // The allocated thread structure is initialized with values that indicate how
252 // it should be freed.
255 _pthread_allocate(pthread_t
*thread
, const pthread_attr_t
*attrs
, void **stack
)
260 mach_vm_address_t allocaddr
= PTHREAD_STACK_HINT
;
261 size_t allocsize
= 0;
262 size_t guardsize
= 0;
263 size_t stacksize
= 0;
265 PTHREAD_ASSERT(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
270 // Allocate a pthread structure if necessary
272 if (attrs
->stackaddr
!= NULL
) {
273 PTHREAD_ASSERT(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
274 *stack
= attrs
->stackaddr
;
275 allocsize
= PTHREAD_SIZE
;
277 guardsize
= attrs
->guardsize
;
278 stacksize
= attrs
->stacksize
;
279 allocsize
= stacksize
+ guardsize
+ PTHREAD_SIZE
;
282 kr
= mach_vm_map(mach_task_self(),
286 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
,
294 if (kr
!= KERN_SUCCESS
) {
295 kr
= mach_vm_allocate(mach_task_self(),
298 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
301 if (kr
== KERN_SUCCESS
) {
302 // The stack grows down.
303 // Set the guard page at the lowest address of the
304 // newly allocated stack. Return the highest address
307 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
310 // Thread structure resides at the top of the stack.
311 t
= (void *)(allocaddr
+ stacksize
+ guardsize
);
313 // Returns the top of the stack.
319 _pthread_struct_init(t
, attrs
,
320 *stack
, attrs
->stacksize
,
321 allocaddr
, allocsize
);
331 _pthread_deallocate(pthread_t t
)
333 // Don't free the main thread.
336 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
337 PTHREAD_ASSERT(ret
== KERN_SUCCESS
);
342 #pragma clang diagnostic push
343 #pragma clang diagnostic ignored "-Wreturn-stack-address"
347 _current_stack_address(void)
353 #pragma clang diagnostic pop
355 // Terminates the thread if called from the currently running thread.
356 PTHREAD_NORETURN PTHREAD_NOINLINE
358 _pthread_terminate(pthread_t t
)
360 PTHREAD_ASSERT(t
== pthread_self());
362 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
363 size_t freesize
= t
->freesize
;
365 // the size of just the stack
366 size_t freesize_stack
= t
->freesize
;
368 // We usually pass our structure+stack to bsdthread_terminate to free, but
369 // if we get told to keep the pthread_t structure around then we need to
370 // adjust the free size and addr in the pthread_t to just refer to the
371 // structure and not the stack. If we do end up deallocating the
372 // structure, this is useless work since no one can read the result, but we
373 // can't do it after the call to pthread_remove_thread because it isn't
374 // safe to dereference t after that.
375 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
376 // Check to ensure the pthread structure itself is part of the
377 // allocation described by freeaddr/freesize, in which case we split and
378 // only deallocate the area below the pthread structure. In the event of a
379 // custom stack, the freeaddr/size will be the pthread structure itself, in
380 // which case we shouldn't free anything (the final else case).
381 freesize_stack
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
383 // describe just the remainder for deallocation when the pthread_t goes away
384 t
->freeaddr
+= freesize_stack
;
385 t
->freesize
-= freesize_stack
;
386 } else if (t
== &_thread
){
387 freeaddr
= t
->stackaddr
- pthread_get_stacksize_np(t
);
388 uintptr_t stackborder
= trunc_page((uintptr_t)_current_stack_address());
389 freesize_stack
= stackborder
- freeaddr
;
394 mach_port_t kport
= _pthread_kernel_thread(t
);
395 semaphore_t joinsem
= t
->joiner_notify
;
397 _pthread_dealloc_reply_port(t
);
399 // After the call to __pthread_remove_thread, it is not safe to
400 // dereference the pthread_t structure.
402 bool destroy
, should_exit
;
403 destroy
= (__pthread_remove_thread(t
, true, &should_exit
) != EBUSY
);
405 if (!destroy
|| t
== &_thread
) {
406 // Use the adjusted freesize of just the stack that we computed above.
407 freesize
= freesize_stack
;
410 // Check if there is nothing to free because the thread has a custom
411 // stack allocation and is joinable.
415 _pthread_introspection_thread_terminate(t
, freeaddr
, freesize
, destroy
);
420 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
421 PTHREAD_ABORT("thread %p didn't terminate", t
);
425 pthread_attr_destroy(pthread_attr_t
*attr
)
428 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
436 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
439 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
440 *detachstate
= attr
->detached
;
447 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
450 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
451 *inheritsched
= attr
->inherit
;
458 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
461 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
462 *param
= attr
->param
;
469 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
472 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
473 *policy
= attr
->policy
;
479 // Default stack size is 512KB; independent of the main thread's stack size.
480 static const size_t DEFAULT_STACK_SIZE
= 512 * 1024;
483 pthread_attr_init(pthread_attr_t
*attr
)
485 attr
->stacksize
= DEFAULT_STACK_SIZE
;
486 attr
->stackaddr
= NULL
;
487 attr
->sig
= _PTHREAD_ATTR_SIG
;
488 attr
->param
.sched_priority
= default_priority
;
489 attr
->param
.quantum
= 10; /* quantum isn't public yet */
490 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
491 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
492 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
495 attr
->guardsize
= vm_page_size
;
496 attr
->qosclass
= _pthread_priority_make_newest(QOS_CLASS_DEFAULT
, 0, 0);
501 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
504 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
505 (detachstate
== PTHREAD_CREATE_JOINABLE
||
506 detachstate
== PTHREAD_CREATE_DETACHED
)) {
507 attr
->detached
= detachstate
;
514 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
517 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
518 (inheritsched
== PTHREAD_INHERIT_SCHED
||
519 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
520 attr
->inherit
= inheritsched
;
527 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
530 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
531 /* TODO: Validate sched_param fields */
532 attr
->param
= *param
;
540 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
543 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
544 (policy
== SCHED_OTHER
||
545 policy
== SCHED_RR
||
546 policy
== SCHED_FIFO
)) {
547 attr
->policy
= policy
;
555 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
558 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
559 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
560 // No attribute yet for the scope.
562 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
570 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
573 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
574 *scope
= PTHREAD_SCOPE_SYSTEM
;
581 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
584 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
585 *stackaddr
= attr
->stackaddr
;
592 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
595 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
596 ((uintptr_t)stackaddr
% vm_page_size
) == 0) {
597 attr
->stackaddr
= stackaddr
;
606 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
609 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
610 *stacksize
= attr
->stacksize
;
617 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
620 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
621 (stacksize
% vm_page_size
) == 0 &&
622 stacksize
>= PTHREAD_STACK_MIN
) {
623 attr
->stacksize
= stacksize
;
630 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
633 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
634 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
635 *stacksize
= attr
->stacksize
;
641 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
642 // address. This is not the same as in pthread_attr_setstackaddr.
644 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
647 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
648 ((uintptr_t)stackaddr
% vm_page_size
) == 0 &&
649 (stacksize
% vm_page_size
) == 0 &&
650 stacksize
>= PTHREAD_STACK_MIN
) {
651 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
652 attr
->stacksize
= stacksize
;
660 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
663 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
664 /* Guardsize of 0 is valid, ot means no guard */
665 if ((guardsize
% vm_page_size
) == 0) {
666 attr
->guardsize
= guardsize
;
675 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
678 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
679 *guardsize
= attr
->guardsize
;
687 * Create and start execution of a new thread.
691 _pthread_body(pthread_t self
, bool needs_tsd_base_set
)
693 _pthread_set_self_internal(self
, needs_tsd_base_set
);
694 __pthread_add_thread(self
, false, false);
695 void *result
= (self
->fun
)(self
->arg
);
697 _pthread_exit(self
, result
);
701 _pthread_start(pthread_t self
,
703 void *(*fun
)(void *),
708 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
709 void *stackaddr
= self
;
710 _pthread_struct_init(self
, &_pthread_attr_default
,
711 stackaddr
, stacksize
,
712 PTHREAD_ALLOCADDR(stackaddr
, stacksize
), PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
714 if (pflags
& PTHREAD_START_SETSCHED
) {
715 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
716 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
719 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
720 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
721 self
->detached
|= PTHREAD_CREATE_DETACHED
;
725 if ((pflags
& PTHREAD_START_QOSCLASS
) != 0) {
726 /* The QoS class is cached in the TSD of the pthread, so to reflect the
727 * class that the kernel brought us up at, the TSD must be primed from the
730 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (pflags
& PTHREAD_START_QOSCLASS_MASK
);
732 /* Give the thread a default QoS tier, of zero. */
733 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
736 bool thread_tsd_bsd_set
= (bool)(pflags
& PTHREAD_START_TSD_BASE_SET
);
738 _pthread_set_kernel_thread(self
, kport
);
742 _pthread_body(self
, !thread_tsd_bsd_set
);
746 _pthread_struct_init(pthread_t t
,
747 const pthread_attr_t
*attrs
,
754 PTHREAD_ASSERT(t
->sig
!= _PTHREAD_SIG
);
757 t
->sig
= _PTHREAD_SIG
;
758 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
;
759 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
760 _PTHREAD_LOCK_INIT(t
->lock
);
762 t
->stackaddr
= stackaddr
;
763 t
->stacksize
= stacksize
;
764 t
->freeaddr
= freeaddr
;
765 t
->freesize
= freesize
;
767 t
->guardsize
= attrs
->guardsize
;
768 t
->detached
= attrs
->detached
;
769 t
->inherit
= attrs
->inherit
;
770 t
->policy
= attrs
->policy
;
771 t
->schedset
= attrs
->schedset
;
772 t
->param
= attrs
->param
;
773 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
776 /* Need to deprecate this in future */
778 _pthread_is_threaded(void)
780 return __is_threaded
;
783 /* Non portable public api to know whether this process has(had) atleast one thread
784 * apart from main thread. There could be race if there is a thread in the process of
785 * creation at the time of call . It does not tell whether there are more than one thread
786 * at this point of time.
789 pthread_is_threaded_np(void)
791 return __is_threaded
;
795 pthread_mach_thread_np(pthread_t t
)
797 mach_port_t kport
= MACH_PORT_NULL
;
799 if (t
== pthread_self()) {
801 * If the call is on self, return the kernel port. We cannot
802 * add this bypass for main thread as it might have exited,
803 * and we should not return stale port info.
805 kport
= _pthread_kernel_thread(t
);
807 (void)_pthread_lookup_thread(t
, &kport
, 0);
814 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
816 struct _pthread
*p
= NULL
;
818 /* No need to wait as mach port is already known */
819 _PTHREAD_LOCK(_pthread_list_lock
);
821 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
822 if (_pthread_kernel_thread(p
) == kernel_thread
) {
827 _PTHREAD_UNLOCK(_pthread_list_lock
);
833 pthread_get_stacksize_np(pthread_t t
)
839 return ESRCH
; // XXX bug?
842 #if !defined(__arm__) && !defined(__arm64__)
843 // The default rlimit based allocations will be provided with a stacksize
844 // of the current limit and a freesize of the max. However, custom
845 // allocations will just have the guard page to free. If we aren't in the
846 // latter case, call into rlimit to determine the current stack size. In
847 // the event that the current limit == max limit then we'll fall down the
848 // fast path, but since it's unlikely that the limit is going to be lowered
849 // after it's been change to the max, we should be fine.
851 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
852 // page. So, we can skip all this there.
853 if (t
== &_thread
&& t
->stacksize
+ vm_page_size
!= t
->freesize
) {
854 // We want to call getrlimit() just once, as it's relatively expensive
855 static size_t rlimit_stack
;
857 if (rlimit_stack
== 0) {
859 int ret
= getrlimit(RLIMIT_STACK
, &limit
);
862 rlimit_stack
= (size_t) limit
.rlim_cur
;
866 if (rlimit_stack
== 0 || rlimit_stack
> t
->freesize
) {
872 #endif /* !defined(__arm__) && !defined(__arm64__) */
874 if (t
== pthread_self() || t
== &_thread
) {
878 _PTHREAD_LOCK(_pthread_list_lock
);
880 ret
= _pthread_find_thread(t
);
884 size
= ret
; // XXX bug?
887 _PTHREAD_UNLOCK(_pthread_list_lock
);
893 pthread_get_stackaddr_np(pthread_t t
)
899 return (void *)(uintptr_t)ESRCH
; // XXX bug?
902 // since the main thread will not get de-allocated from underneath us
903 if (t
== pthread_self() || t
== &_thread
) {
907 _PTHREAD_LOCK(_pthread_list_lock
);
909 ret
= _pthread_find_thread(t
);
913 addr
= (void *)(uintptr_t)ret
; // XXX bug?
916 _PTHREAD_UNLOCK(_pthread_list_lock
);
922 _pthread_reply_port(pthread_t t
)
926 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
);
928 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
];
930 return (mach_port_t
)(uintptr_t)p
;
934 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
)
936 void *p
= (void *)(uintptr_t)reply_port
;
938 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
);
940 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
;
945 _pthread_dealloc_reply_port(pthread_t t
)
947 mach_port_t reply_port
= _pthread_reply_port(t
);
948 if (reply_port
!= MACH_PORT_NULL
) {
949 mig_dealloc_reply_port(reply_port
);
954 pthread_main_thread_np(void)
959 /* returns non-zero if the current thread is the main thread */
961 pthread_main_np(void)
963 pthread_t self
= pthread_self();
965 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
969 /* if we are passed in a pthread_t that is NULL, then we return
970 the current thread's thread_id. So folks don't have to call
971 pthread_self, in addition to us doing it, if they just want
975 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
978 pthread_t self
= pthread_self();
980 if (thread_id
== NULL
) {
984 if (thread
== NULL
|| thread
== self
) {
985 *thread_id
= self
->thread_id
;
987 _PTHREAD_LOCK(_pthread_list_lock
);
988 res
= _pthread_find_thread(thread
);
990 *thread_id
= thread
->thread_id
;
992 _PTHREAD_UNLOCK(_pthread_list_lock
);
998 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1002 if (thread
== NULL
) {
1006 _PTHREAD_LOCK(_pthread_list_lock
);
1007 res
= _pthread_find_thread(thread
);
1009 strlcpy(threadname
, thread
->pthread_name
, len
);
1011 _PTHREAD_UNLOCK(_pthread_list_lock
);
1016 pthread_setname_np(const char *name
)
1019 pthread_t self
= pthread_self();
1026 /* protytype is in pthread_internals.h */
1027 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
1030 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
1032 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
1039 PTHREAD_ALWAYS_INLINE
1041 __pthread_add_thread(pthread_t t
, bool parent
, bool from_mach_thread
)
1043 bool should_deallocate
= false;
1044 bool should_add
= true;
1046 if (from_mach_thread
){
1047 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1049 _PTHREAD_LOCK(_pthread_list_lock
);
1052 // The parent and child threads race to add the thread to the list.
1053 // When called by the parent:
1054 // - set parentcheck to true
1055 // - back off if childrun is true
1056 // When called by the child:
1057 // - set childrun to true
1058 // - back off if parentcheck is true
1062 // child got here first, don't add.
1066 // If the child exits before we check in then it has to keep
1067 // the thread structure memory alive so our dereferences above
1068 // are valid. If it's a detached thread, then no joiner will
1069 // deallocate the thread structure itself. So we do it here.
1072 should_deallocate
= ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
);
1076 if (t
->parentcheck
) {
1077 // Parent got here first, don't add.
1081 // Work queue threads have no parent. Simulate.
1087 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1091 if (from_mach_thread
){
1092 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1094 _PTHREAD_UNLOCK(_pthread_list_lock
);
1098 if (!from_mach_thread
) {
1099 // PR-26275485: Mach threads will likely crash trying to run
1100 // introspection code. Since the fall out from the introspection
1101 // code not seeing the injected thread is likely less than crashing
1102 // in the introspection code, just don't make the call.
1103 _pthread_introspection_thread_create(t
, should_deallocate
);
1105 if (should_deallocate
) {
1106 _pthread_deallocate(t
);
1109 _pthread_introspection_thread_start(t
);
1113 // <rdar://problem/12544957> must always inline this function to avoid epilogues
1114 // Returns EBUSY if the thread structure should be kept alive (is joinable).
1115 // Returns ESRCH if the thread structure is no longer valid (was detached).
1116 PTHREAD_ALWAYS_INLINE
1118 __pthread_remove_thread(pthread_t t
, bool child
, bool *should_exit
)
1122 bool should_remove
= true;
1124 _PTHREAD_LOCK(_pthread_list_lock
);
1126 // When a thread removes itself:
1127 // - Set the childexit flag indicating that the thread has exited.
1128 // - Return false if parentcheck is zero (must keep structure)
1129 // - If the thread is joinable, keep it on the list so that
1130 // the join operation succeeds. Still decrement the running
1131 // thread count so that we exit if no threads are running.
1132 // - Update the running thread count.
1133 // When another thread removes a joinable thread:
1134 // - CAREFUL not to dereference the thread before verifying that the
1135 // reference is still valid using _pthread_find_thread().
1136 // - Remove the thread from the list.
1140 if (t
->parentcheck
== 0) {
1143 if ((t
->detached
& PTHREAD_CREATE_JOINABLE
) != 0) {
1145 should_remove
= false;
1147 *should_exit
= (--_pthread_count
<= 0);
1149 ret
= _pthread_find_thread(t
);
1151 // If we found a thread but it's not joinable, bail.
1152 if ((t
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
1153 should_remove
= false;
1158 if (should_remove
) {
1159 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
1162 _PTHREAD_UNLOCK(_pthread_list_lock
);
1168 _pthread_create(pthread_t
*thread
,
1169 const pthread_attr_t
*attr
,
1170 void *(*start_routine
)(void *),
1172 bool from_mach_thread
)
1175 unsigned int flags
= 0;
1177 pthread_attr_t
*attrs
= (pthread_attr_t
*)attr
;
1178 if (attrs
== NULL
) {
1179 attrs
= &_pthread_attr_default
;
1180 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1184 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
) {
1185 flags
|= PTHREAD_START_DETACHED
;
1188 if (attrs
->schedset
!= 0) {
1189 flags
|= PTHREAD_START_SETSCHED
;
1190 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1191 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1192 } else if (attrs
->qosclass
!= 0) {
1193 flags
|= PTHREAD_START_QOSCLASS
;
1194 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1201 if (attrs
->fastpath
) {
1202 // kernel will allocate thread and stack, pass stacksize.
1203 stack
= (void *)attrs
->stacksize
;
1205 // allocate the thread and its stack
1206 flags
|= PTHREAD_START_CUSTOM
;
1209 res
= _pthread_allocate(&t
, attrs
, &stack
);
1215 t
->fun
= start_routine
;
1219 t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
);
1220 if (t2
== (pthread_t
)-1) {
1221 if (flags
& PTHREAD_START_CUSTOM
) {
1222 // free the thread and stack if we allocated it
1223 _pthread_deallocate(t
);
1231 __pthread_add_thread(t
, true, from_mach_thread
);
1233 // n.b. if a thread is created detached and exits, t will be invalid
1239 pthread_create(pthread_t
*thread
,
1240 const pthread_attr_t
*attr
,
1241 void *(*start_routine
)(void *),
1244 return _pthread_create(thread
, attr
, start_routine
, arg
, false);
1248 pthread_create_from_mach_thread(pthread_t
*thread
,
1249 const pthread_attr_t
*attr
,
1250 void *(*start_routine
)(void *),
1253 return _pthread_create(thread
, attr
, start_routine
, arg
, true);
1257 _pthread_suspended_body(pthread_t self
)
1259 _pthread_set_self(self
);
1260 __pthread_add_thread(self
, false, false);
1261 _pthread_exit(self
, (self
->fun
)(self
->arg
));
1265 pthread_create_suspended_np(pthread_t
*thread
,
1266 const pthread_attr_t
*attr
,
1267 void *(*start_routine
)(void *),
1272 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1274 const pthread_attr_t
*attrs
= attr
;
1275 if (attrs
== NULL
) {
1276 attrs
= &_pthread_attr_default
;
1277 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1282 res
= _pthread_allocate(&t
, attrs
, &stack
);
1290 kr
= thread_create(mach_task_self(), &kernel_thread
);
1291 if (kr
!= KERN_SUCCESS
) {
1292 //PTHREAD_ABORT("thread_create() failed: %d", kern_res);
1293 return EINVAL
; /* Need better error here? */
1296 _pthread_set_kernel_thread(t
, kernel_thread
);
1297 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
1302 t
->fun
= start_routine
;
1304 __pthread_add_thread(t
, true, false);
1306 // Set up a suspended thread.
1307 _pthread_setup(t
, _pthread_suspended_body
, stack
, 1, 0);
1312 pthread_detach(pthread_t thread
)
1316 semaphore_t sema
= SEMAPHORE_NULL
;
1318 res
= _pthread_lookup_thread(thread
, NULL
, 1);
1320 return res
; // Not a valid thread to detach.
1323 _PTHREAD_LOCK(thread
->lock
);
1324 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
) {
1325 if (thread
->detached
& _PTHREAD_EXITED
) {
1326 // Join the thread if it's already exited.
1329 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1330 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1331 sema
= thread
->joiner_notify
;
1336 _PTHREAD_UNLOCK(thread
->lock
);
1339 pthread_join(thread
, NULL
);
1341 semaphore_signal(sema
);
1348 pthread_kill(pthread_t th
, int sig
)
1350 if (sig
< 0 || sig
> NSIG
) {
1354 mach_port_t kport
= MACH_PORT_NULL
;
1355 if (_pthread_lookup_thread(th
, &kport
, 0) != 0) {
1356 return ESRCH
; // Not a valid thread.
1359 // Don't signal workqueue threads.
1360 if (th
->wqthread
!= 0 && th
->wqkillset
== 0) {
1364 int ret
= __pthread_kill(kport
, sig
);
1373 __pthread_workqueue_setkill(int enable
)
1375 pthread_t self
= pthread_self();
1377 _PTHREAD_LOCK(self
->lock
);
1378 self
->wqkillset
= enable
? 1 : 0;
1379 _PTHREAD_UNLOCK(self
->lock
);
1385 __pthread_get_exit_value(pthread_t t
, int conforming
)
1387 const int flags
= (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
);
1388 void *value
= t
->exit_value
;
1390 if ((t
->cancel_state
& flags
) == flags
) {
1391 value
= PTHREAD_CANCELED
;
1397 /* For compatibility... */
1400 _pthread_self(void) {
1401 return pthread_self();
1405 * Terminate a thread.
1407 int __disable_threadsignal(int);
1411 _pthread_exit(pthread_t self
, void *value_ptr
)
1413 struct __darwin_pthread_handler_rec
*handler
;
1415 // Disable signal delivery while we clean up
1416 __disable_threadsignal(1);
1418 // Set cancel state to disable and type to deferred
1419 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1421 while ((handler
= self
->__cleanup_stack
) != 0) {
1422 (handler
->__routine
)(handler
->__arg
);
1423 self
->__cleanup_stack
= handler
->__next
;
1425 _pthread_tsd_cleanup(self
);
1427 _PTHREAD_LOCK(self
->lock
);
1428 self
->detached
|= _PTHREAD_EXITED
;
1429 self
->exit_value
= value_ptr
;
1431 if ((self
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1432 self
->joiner_notify
== SEMAPHORE_NULL
) {
1433 self
->joiner_notify
= (semaphore_t
)os_get_cached_semaphore();
1435 _PTHREAD_UNLOCK(self
->lock
);
1437 // Clear per-thread semaphore cache
1438 os_put_cached_semaphore(SEMAPHORE_NULL
);
1440 _pthread_terminate(self
);
1444 pthread_exit(void *value_ptr
)
1446 pthread_t self
= pthread_self();
1447 if (self
->wqthread
== 0) {
1448 _pthread_exit(self
, value_ptr
);
1450 PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1455 pthread_getschedparam(pthread_t thread
,
1457 struct sched_param
*param
)
1461 if (thread
== NULL
) {
1465 _PTHREAD_LOCK(_pthread_list_lock
);
1467 ret
= _pthread_find_thread(thread
);
1470 *policy
= thread
->policy
;
1473 *param
= thread
->param
;
1477 _PTHREAD_UNLOCK(_pthread_list_lock
);
1483 pthread_setschedparam_internal(pthread_t thread
,
1486 const struct sched_param
*param
)
1488 policy_base_data_t bases
;
1490 mach_msg_type_number_t count
;
1495 bases
.ts
.base_priority
= param
->sched_priority
;
1496 base
= (policy_base_t
)&bases
.ts
;
1497 count
= POLICY_TIMESHARE_BASE_COUNT
;
1500 bases
.fifo
.base_priority
= param
->sched_priority
;
1501 base
= (policy_base_t
)&bases
.fifo
;
1502 count
= POLICY_FIFO_BASE_COUNT
;
1505 bases
.rr
.base_priority
= param
->sched_priority
;
1506 /* quantum isn't public yet */
1507 bases
.rr
.quantum
= param
->quantum
;
1508 base
= (policy_base_t
)&bases
.rr
;
1509 count
= POLICY_RR_BASE_COUNT
;
1514 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1515 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1519 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1521 mach_port_t kport
= MACH_PORT_NULL
;
1525 // since the main thread will not get de-allocated from underneath us
1526 if (t
== pthread_self() || t
== &_thread
) {
1527 kport
= _pthread_kernel_thread(t
);
1530 (void)_pthread_lookup_thread(t
, &kport
, 0);
1533 res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1536 // Ensure the thread is still valid.
1537 _PTHREAD_LOCK(_pthread_list_lock
);
1538 res
= _pthread_find_thread(t
);
1543 _PTHREAD_UNLOCK(_pthread_list_lock
);
1553 sched_get_priority_min(int policy
)
1555 return default_priority
- 16;
1559 sched_get_priority_max(int policy
)
1561 return default_priority
+ 16;
1565 pthread_equal(pthread_t t1
, pthread_t t2
)
1571 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1572 * then _pthread_set_self won't be bound when secondary threads try and start up.
1576 _pthread_set_self(pthread_t p
)
1578 return _pthread_set_self_internal(p
, true);
1582 _pthread_set_self_internal(pthread_t p
, bool needs_tsd_base_set
)
1588 uint64_t tid
= __thread_selfid();
1590 PTHREAD_ABORT("failed to set thread_id");
1593 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
;
1594 p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
;
1597 if (needs_tsd_base_set
) {
1598 _thread_set_tsd_base(&p
->tsd
[0]);
1602 struct _pthread_once_context
{
1603 pthread_once_t
*pthread_once
;
1604 void (*routine
)(void);
1608 __pthread_once_handler(void *context
)
1610 struct _pthread_once_context
*ctx
= context
;
1611 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1613 pthread_cleanup_pop(0);
1614 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1618 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1620 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1622 os_once(&once_control
->once
, &ctx
, __pthread_once_handler
);
1623 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1628 _pthread_testcancel(pthread_t thread
, int isconforming
)
1630 const int flags
= (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
);
1632 _PTHREAD_LOCK(thread
->lock
);
1633 bool canceled
= ((thread
->cancel_state
& flags
) == flags
);
1634 _PTHREAD_UNLOCK(thread
->lock
);
1637 pthread_exit(isconforming
? PTHREAD_CANCELED
: 0);
1642 _pthread_exit_if_canceled(int error
)
1644 if (__unix_conforming
&& ((error
& 0xff) == EINTR
) && (__pthread_canceled(0) == 0)) {
1645 pthread_t self
= pthread_self();
1647 self
->cancel_error
= error
;
1649 pthread_exit(PTHREAD_CANCELED
);
1654 pthread_getconcurrency(void)
1656 return pthread_concurrency
;
1660 pthread_setconcurrency(int new_level
)
1662 if (new_level
< 0) {
1665 pthread_concurrency
= new_level
;
1669 static unsigned long
1670 _pthread_strtoul(const char *p
, const char **endptr
, int base
)
1674 // Expect hex string starting with "0x"
1675 if ((base
== 16 || base
== 0) && p
&& p
[0] == '0' && p
[1] == 'x') {
1679 if ('0' <= c
&& c
<= '9') {
1680 val
= (val
<< 4) + (c
- '0');
1681 } else if ('a' <= c
&& c
<= 'f') {
1682 val
= (val
<< 4) + (c
- 'a' + 10);
1683 } else if ('A' <= c
&& c
<= 'F') {
1684 val
= (val
<< 4) + (c
- 'A' + 10);
1692 *endptr
= (char *)p
;
1697 parse_main_stack_params(const char *apple
[],
1703 const char *p
= _simple_getenv(apple
, "main_stack");
1709 *stackaddr
= _pthread_strtoul(s
, &s
, 16);
1710 if (*s
!= ',') goto out
;
1712 *stacksize
= _pthread_strtoul(s
+ 1, &s
, 16);
1713 if (*s
!= ',') goto out
;
1715 *allocaddr
= _pthread_strtoul(s
+ 1, &s
, 16);
1716 if (*s
!= ',') goto out
;
1718 *allocsize
= _pthread_strtoul(s
+ 1, &s
, 16);
1719 if (*s
!= ',' && *s
!= 0) goto out
;
1723 bzero((char *)p
, strlen(p
));
1727 #if !defined(VARIANT_STATIC)
1731 if (_pthread_malloc
) {
1732 return _pthread_malloc(sz
);
1741 if (_pthread_free
) {
1745 #endif // VARIANT_STATIC
1748 * Perform package initialization - called automatically when application starts
1750 struct ProgramVars
; /* forward reference */
1753 __pthread_init(const struct _libpthread_functions
*pthread_funcs
,
1754 const char *envp
[] __unused
,
1755 const char *apple
[],
1756 const struct ProgramVars
*vars __unused
)
1758 // Save our provided pushed-down functions
1759 if (pthread_funcs
) {
1760 exitf
= pthread_funcs
->exit
;
1762 if (pthread_funcs
->version
>= 2) {
1763 _pthread_malloc
= pthread_funcs
->malloc
;
1764 _pthread_free
= pthread_funcs
->free
;
1769 // Get host information
1773 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1774 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1775 host_priority_info_data_t priority_info
;
1776 host_t host
= mach_host_self();
1777 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1778 if (kr
!= KERN_SUCCESS
) {
1779 PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr
));
1781 default_priority
= priority_info
.user_priority
;
1782 min_priority
= priority_info
.minimum_priority
;
1783 max_priority
= priority_info
.maximum_priority
;
1785 mach_port_deallocate(mach_task_self(), host
);
1788 // Set up the main thread structure
1791 // Get the address and size of the main thread's stack from the kernel.
1792 void *stackaddr
= 0;
1793 size_t stacksize
= 0;
1794 void *allocaddr
= 0;
1795 size_t allocsize
= 0;
1796 if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) ||
1797 stackaddr
== NULL
|| stacksize
== 0) {
1798 // Fall back to previous bevhaior.
1799 size_t len
= sizeof(stackaddr
);
1800 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1801 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1802 #if defined(__LP64__)
1803 stackaddr
= (void *)USRSTACK64
;
1805 stackaddr
= (void *)USRSTACK
;
1808 stacksize
= DFLSSIZ
;
1813 pthread_t thread
= &_thread
;
1814 pthread_attr_init(&_pthread_attr_default
);
1815 _pthread_struct_init(thread
, &_pthread_attr_default
,
1816 stackaddr
, stacksize
,
1817 allocaddr
, allocsize
);
1818 thread
->detached
= PTHREAD_CREATE_JOINABLE
;
1820 // Finish initialization with common code that is reinvoked on the
1821 // child side of a fork.
1823 // Finishes initialization of main thread attributes.
1824 // Initializes the thread list and add the main thread.
1825 // Calls _pthread_set_self() to prepare the main thread for execution.
1826 __pthread_fork_child_internal(thread
);
1828 // Set up kernel entry points with __bsdthread_register.
1829 pthread_workqueue_atfork_child();
1831 // Have pthread_key do its init envvar checks.
1832 _pthread_key_global_init(envp
);
1844 PTHREAD_NOEXPORT
void
1845 __pthread_fork_child_internal(pthread_t p
)
1847 TAILQ_INIT(&__pthread_head
);
1848 _PTHREAD_LOCK_INIT(_pthread_list_lock
);
1850 // Re-use the main thread's static storage if no thread was provided.
1852 if (_thread
.tsd
[0] != 0) {
1853 bzero(&_thread
, sizeof(struct _pthread
));
1858 _PTHREAD_LOCK_INIT(p
->lock
);
1859 _pthread_set_kernel_thread(p
, mach_thread_self());
1860 _pthread_set_reply_port(p
, mach_reply_port());
1861 p
->__cleanup_stack
= NULL
;
1862 p
->joiner_notify
= SEMAPHORE_NULL
;
1863 p
->joiner
= MACH_PORT_NULL
;
1864 p
->detached
|= _PTHREAD_CREATE_PARENT
;
1865 p
->tsd
[__TSD_SEMAPHORE_CACHE
] = SEMAPHORE_NULL
;
1867 // Initialize the list of threads with the new main thread.
1868 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
1871 _pthread_set_self(p
);
1872 _pthread_introspection_thread_start(p
);
1876 * Query/update the cancelability 'state' of a thread
1878 PTHREAD_NOEXPORT
int
1879 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
1884 case PTHREAD_CANCEL_ENABLE
:
1886 __pthread_canceled(1);
1889 case PTHREAD_CANCEL_DISABLE
:
1891 __pthread_canceled(2);
1898 self
= pthread_self();
1899 _PTHREAD_LOCK(self
->lock
);
1901 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
1903 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
1904 self
->cancel_state
|= state
;
1905 _PTHREAD_UNLOCK(self
->lock
);
1907 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
1912 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
1914 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
1916 _PTHREAD_LOCK(self
->lock
);
1917 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
1918 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
1919 if (value_ptr
== PTHREAD_CANCELED
) {
1921 self
->detached
|= _PTHREAD_WASCANCEL
;
1924 _PTHREAD_UNLOCK(self
->lock
);
1928 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
1930 // Returns ESRCH if the thread was not created joinable.
1931 int ret
= __pthread_remove_thread(thread
, false, NULL
);
1937 *value_ptr
= __pthread_get_exit_value(thread
, conforming
);
1939 _pthread_introspection_thread_destroy(thread
);
1940 _pthread_deallocate(thread
);
1944 /* ALWAYS called with list lock and return with list lock */
1946 _pthread_find_thread(pthread_t thread
)
1948 if (thread
!= NULL
) {
1951 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1953 if (_pthread_kernel_thread(thread
) == MACH_PORT_NULL
) {
1954 _PTHREAD_UNLOCK(_pthread_list_lock
);
1956 _PTHREAD_LOCK(_pthread_list_lock
);
1967 _pthread_lookup_thread(pthread_t thread
, mach_port_t
*portp
, int only_joinable
)
1969 mach_port_t kport
= MACH_PORT_NULL
;
1972 if (thread
== NULL
) {
1976 _PTHREAD_LOCK(_pthread_list_lock
);
1978 ret
= _pthread_find_thread(thread
);
1980 // Fail if we only want joinable threads and the thread found is
1981 // not in the detached state.
1982 if (only_joinable
!= 0 && (thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0) {
1985 kport
= _pthread_kernel_thread(thread
);
1989 _PTHREAD_UNLOCK(_pthread_list_lock
);
1991 if (portp
!= NULL
) {
1999 _pthread_clear_qos_tsd(mach_port_t thread_port
)
2001 if (thread_port
== MACH_PORT_NULL
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) {
2002 /* Clear the current thread's TSD, that can be done inline. */
2003 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0));
2007 _PTHREAD_LOCK(_pthread_list_lock
);
2009 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2010 mach_port_t kp
= _pthread_kernel_thread(p
);
2011 if (thread_port
== kp
) {
2012 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
2017 _PTHREAD_UNLOCK(_pthread_list_lock
);
2021 /***** pthread workqueue support routines *****/
2023 PTHREAD_NOEXPORT
void
2024 pthread_workqueue_atfork_child(void)
2026 struct _pthread_registration_data data
= {};
2027 data
.version
= sizeof(struct _pthread_registration_data
);
2028 data
.dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *);
2029 data
.tsd_offset
= offsetof(struct _pthread
, tsd
);
2031 int rv
= __bsdthread_register(thread_start
,
2032 start_wqthread
, (int)PTHREAD_SIZE
,
2033 (void*)&data
, (uintptr_t)sizeof(data
),
2034 data
.dispatch_queue_offset
);
2037 __pthread_supported_features
= rv
;
2040 pthread_priority_t main_qos
= (pthread_priority_t
)data
.main_qos
;
2042 if (_pthread_priority_get_qos_newest(main_qos
) != QOS_CLASS_UNSPECIFIED
) {
2043 _pthread_set_main_qos(main_qos
);
2044 _thread
.tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = main_qos
;
2047 if (__libdispatch_workerfunction
!= NULL
) {
2048 // prepare the kernel for workq action
2049 (void)__workq_open();
2053 // workqueue entry point from kernel
2055 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
, void *keventlist
, int flags
, int nkevents
)
2057 PTHREAD_ASSERT(flags
& WQ_FLAG_THREAD_NEWSPI
);
2059 int thread_reuse
= flags
& WQ_FLAG_THREAD_REUSE
;
2060 int thread_class
= flags
& WQ_FLAG_THREAD_PRIOMASK
;
2061 int overcommit
= (flags
& WQ_FLAG_THREAD_OVERCOMMIT
) != 0;
2062 int kevent
= flags
& WQ_FLAG_THREAD_KEVENT
;
2063 PTHREAD_ASSERT((!kevent
) || (__libdispatch_keventfunction
!= NULL
));
2065 pthread_priority_t priority
= 0;
2066 unsigned long priority_flags
= 0;
2069 priority_flags
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2070 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
)
2071 priority_flags
|= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
2073 priority_flags
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2075 if ((__pthread_supported_features
& PTHREAD_FEATURE_QOS_MAINTENANCE
) == 0) {
2076 priority
= _pthread_priority_make_version2(thread_class
, 0, priority_flags
);
2078 priority
= _pthread_priority_make_newest(thread_class
, 0, priority_flags
);
2081 if (thread_reuse
== 0) {
2082 // New thread created by kernel, needs initialization.
2083 void *stackaddr
= self
;
2084 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
2086 _pthread_struct_init(self
, &_pthread_attr_default
,
2087 stackaddr
, stacksize
,
2088 PTHREAD_ALLOCADDR(stackaddr
, stacksize
), PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
2090 _pthread_set_kernel_thread(self
, kport
);
2092 self
->wqkillset
= 0;
2094 // Not a joinable thread.
2095 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2096 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2098 // Update the running thread count and set childrun bit.
2099 bool thread_tsd_base_set
= (bool)(flags
& WQ_FLAG_THREAD_TSD_BASE_SET
);
2100 _pthread_set_self_internal(self
, !thread_tsd_base_set
);
2101 _pthread_introspection_thread_create(self
, false);
2102 __pthread_add_thread(self
, false, false);
2105 // If we're running with fine-grained priority, we also need to
2106 // set this thread to have the QoS class provided to use by the kernel
2107 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2108 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(thread_class
, 0, priority_flags
));
2112 PTHREAD_ASSERT(self
);
2113 PTHREAD_ASSERT(self
== pthread_self());
2117 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
2119 self
->fun
= (void *(*)(void *))__libdispatch_workerfunction
;
2121 self
->arg
= (void *)(uintptr_t)thread_class
;
2123 if (kevent
&& keventlist
&& nkevents
> 0){
2124 kevent_errors_retry
:
2125 (*__libdispatch_keventfunction
)(&keventlist
, &nkevents
);
2127 int errors_out
= __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, keventlist
, nkevents
, 0);
2128 if (errors_out
> 0){
2129 nkevents
= errors_out
;
2130 goto kevent_errors_retry
;
2131 } else if (errors_out
< 0){
2132 PTHREAD_ABORT("kevent return produced an error: %d", errno
);
2136 (*__libdispatch_keventfunction
)(NULL
, NULL
);
2138 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, NULL
, 0, 0);
2142 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2143 if (!__workq_newapi
) {
2144 /* Old thread priorities are inverted from where we have them in
2145 * the new flexible priority scheme. The highest priority is zero,
2146 * up to 2, with background at 3.
2148 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2150 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2152 if ((__pthread_supported_features
& PTHREAD_FEATURE_QOS_DEFAULT
) == 0) {
2153 /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */
2154 switch (thread_class
) {
2155 case QOS_CLASS_USER_INTERACTIVE
:
2156 thread_class
= QOS_CLASS_USER_INITIATED
;
2158 case QOS_CLASS_USER_INITIATED
:
2159 thread_class
= QOS_CLASS_DEFAULT
;
2166 switch (thread_class
) {
2167 /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */
2168 case QOS_CLASS_USER_INITIATED
:
2169 (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
2172 case QOS_CLASS_DEFAULT
:
2173 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2174 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2175 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2177 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED
, 0, 0));
2178 (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
2181 case QOS_CLASS_UTILITY
:
2182 (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
2185 case QOS_CLASS_BACKGROUND
:
2186 (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
2189 /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
2193 /* "New" API, where dispatch is expecting to be given the thread priority */
2194 (*__libdispatch_workerfunction
)(priority
);
2197 /* We're the new library running on an old kext, so thread_class is really the workq priority. */
2198 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2199 int options
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2200 (*func
)(thread_class
, options
, NULL
);
2203 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
2207 pthread_priority_t current_priority
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
);
2208 if ((current_priority
& _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
) ||
2209 (_pthread_priority_get_qos_newest(current_priority
) > WQ_THREAD_CLEANUP_QOS
)) {
2210 // Reset QoS to something low for the cleanup process
2211 pthread_priority_t priority
= _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS
, 0, 0);
2212 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, priority
);
2216 _pthread_exit(self
, NULL
);
2219 /***** pthread workqueue API for libdispatch *****/
2222 pthread_workqueue_setdispatchoffset_np(int offset
)
2224 __libdispatch_offset
= offset
;
2228 pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func
, pthread_workqueue_function_kevent_t kevent_func
)
2231 if (__libdispatch_workerfunction
== NULL
) {
2232 // Check whether the kernel supports new SPIs
2233 res
= __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, __libdispatch_offset
, kevent_func
!= NULL
? 0x01 : 0x00);
2237 __libdispatch_workerfunction
= queue_func
;
2238 __libdispatch_keventfunction
= kevent_func
;
2240 // Prepare the kernel for workq action
2241 (void)__workq_open();
2242 if (__is_threaded
== 0) {
2251 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
, pthread_workqueue_function_kevent_t kevent_func
, int offset
, int flags
)
2257 __workq_newapi
= true;
2258 __libdispatch_offset
= offset
;
2260 int rv
= pthread_workqueue_setdispatch_with_kevent_np(queue_func
, kevent_func
);
2265 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2267 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2271 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2273 return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t
)worker_func
, NULL
);
2277 _pthread_workqueue_supported(void)
2279 return __pthread_supported_features
;
2283 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2287 // Cannot add threads without a worker function registered.
2288 if (__libdispatch_workerfunction
== NULL
) {
2292 pthread_priority_t kp
= 0;
2294 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2295 /* The new kernel API takes the new QoS class + relative priority style of
2296 * priority. This entry point is here for compatibility with old libdispatch
2297 * versions (ie. the simulator). We request the corresponding new bracket
2298 * from the kernel, then on the way out run all dispatch queues that were
2302 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
2305 /* To make sure the library does not issue more threads to dispatch than
2306 * were requested, the total number of active requests is recorded in
2309 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2310 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2313 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2316 /* Running on the old kernel, queue_priority is what we pass directly to
2319 kp
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
2321 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2322 kp
|= WORKQUEUE_OVERCOMMIT
;
2326 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2334 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2338 if (__libdispatch_workerfunction
== NULL
) {
2342 if ((__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) == 0) {
2346 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2354 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2356 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2364 * Introspection SPI for libpthread.
2367 static pthread_introspection_hook_t _pthread_introspection_hook
;
2369 pthread_introspection_hook_t
2370 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2372 if (os_slowpath(!hook
)) {
2373 PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
2375 pthread_introspection_hook_t prev
;
2376 prev
= __sync_swap(&_pthread_introspection_hook
, hook
);
2382 _pthread_introspection_hook_callout_thread_create(pthread_t t
, bool destroy
)
2384 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2386 if (!destroy
) return;
2387 _pthread_introspection_thread_destroy(t
);
2391 _pthread_introspection_thread_create(pthread_t t
, bool destroy
)
2393 if (os_fastpath(!_pthread_introspection_hook
)) return;
2394 _pthread_introspection_hook_callout_thread_create(t
, destroy
);
2399 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2403 if (t
== &_thread
) {
2404 freesize
= t
->stacksize
+ t
->guardsize
;
2405 freeaddr
= t
->stackaddr
- freesize
;
2407 freesize
= t
->freesize
- PTHREAD_SIZE
;
2408 freeaddr
= t
->freeaddr
;
2410 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2411 freeaddr
, freesize
);
2415 _pthread_introspection_thread_start(pthread_t t
)
2417 if (os_fastpath(!_pthread_introspection_hook
)) return;
2418 _pthread_introspection_hook_callout_thread_start(t
);
2423 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
,
2424 void *freeaddr
, size_t freesize
, bool destroy
)
2426 if (destroy
&& freesize
) {
2427 freesize
-= PTHREAD_SIZE
;
2429 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2430 freeaddr
, freesize
);
2431 if (!destroy
) return;
2432 _pthread_introspection_thread_destroy(t
);
2436 _pthread_introspection_thread_terminate(pthread_t t
, void *freeaddr
,
2437 size_t freesize
, bool destroy
)
2439 if (os_fastpath(!_pthread_introspection_hook
)) return;
2440 _pthread_introspection_hook_callout_thread_terminate(t
, freeaddr
, freesize
,
2446 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2448 if (t
== &_thread
) return;
2449 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2454 _pthread_introspection_thread_destroy(pthread_t t
)
2456 if (os_fastpath(!_pthread_introspection_hook
)) return;
2457 _pthread_introspection_hook_callout_thread_destroy(t
);