2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
54 #include "workqueue_private.h"
55 #include "introspection_private.h"
56 #include "qos_private.h"
62 #include <mach/mach_init.h>
63 #include <mach/mach_vm.h>
65 #include <sys/resource.h>
66 #include <sys/sysctl.h>
67 #include <sys/queue.h>
69 #include <machine/vmparam.h>
70 #define __APPLE_API_PRIVATE
71 #include <machine/cpu_capabilities.h>
72 #include <libkern/OSAtomic.h>
75 #include <platform/string.h>
76 #include <platform/compat.h>
78 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
,
79 void *newp
, size_t newlen
);
80 extern void __exit(int) __attribute__((noreturn
));
82 static void (*exitf
)(int) = __exit
;
83 __private_extern__
void* (*_pthread_malloc
)(size_t) = NULL
;
84 __private_extern__
void (*_pthread_free
)(void *) = NULL
;
90 // This global should be used (carefully) by anyone needing to know if a
91 // pthread (other than the main thread) has been created.
92 int __is_threaded
= 0;
94 int __unix_conforming
= 0;
96 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
97 // list, and the parentcheck, childrun and childexit flags of the pthread
98 // structure. Externally imported by pthread_cancelable.c.
99 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
100 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
101 static int _pthread_count
= 1;
103 #if PTHREAD_LAYOUT_SPI
105 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
107 .plo_pthread_tsd_base_offset
= offsetof(struct _pthread
, tsd
),
108 .plo_pthread_tsd_base_address_offset
= 0,
109 .plo_pthread_tsd_entry_size
= sizeof(((struct _pthread
*)NULL
)->tsd
[0]),
112 #endif // PTHREAD_LAYOUT_SPI
118 // Mach message notification that a thread needs to be recycled.
119 typedef struct _pthread_reap_msg_t
{
120 mach_msg_header_t header
;
122 mach_msg_trailer_t trailer
;
123 } pthread_reap_msg_t
;
125 #define pthreadsize ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
126 static pthread_attr_t _pthread_attr_default
= {0};
128 // The main thread's pthread_t
129 static struct _pthread _thread
__attribute__((aligned(4096))) = {0};
131 static int default_priority
;
132 static int max_priority
;
133 static int min_priority
;
134 static int pthread_concurrency
;
136 // work queue support data
137 static void (*__libdispatch_workerfunction
)(pthread_priority_t
) = NULL
;
138 static void (*__libdispatch_keventfunction
)(void **events
, int *nevents
) = NULL
;
139 static int __libdispatch_offset
;
141 // supported feature set
142 int __pthread_supported_features
;
145 // Function prototypes
148 // pthread primitives
149 static int _pthread_allocate(pthread_t
*thread
, const pthread_attr_t
*attrs
, void **stack
);
150 static int _pthread_deallocate(pthread_t t
);
152 static void _pthread_terminate(pthread_t t
);
154 static void _pthread_struct_init(pthread_t t
,
155 const pthread_attr_t
*attrs
,
160 extern void _pthread_set_self(pthread_t
);
162 static void _pthread_dealloc_reply_port(pthread_t t
);
164 static inline void __pthread_add_thread(pthread_t t
, bool parent
);
165 static inline int __pthread_remove_thread(pthread_t t
, bool child
, bool *should_exit
);
167 static int _pthread_find_thread(pthread_t thread
);
169 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
170 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
172 static inline void _pthread_introspection_thread_create(pthread_t t
, bool destroy
);
173 static inline void _pthread_introspection_thread_start(pthread_t t
);
174 static inline void _pthread_introspection_thread_terminate(pthread_t t
, void *freeaddr
, size_t freesize
, bool destroy
);
175 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
177 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread
178 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start
180 void pthread_workqueue_atfork_child(void);
182 static bool __workq_newapi
;
184 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
185 * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
186 * from old API requests to the new kext conventions.
188 #define WORKQUEUE_OVERCOMMIT 0x10000
191 * Flags filed passed to bsdthread_create and back in pthread_start
192 31 <---------------------------------> 0
193 _________________________________________
194 | flags(8) | policy(8) | importance(16) |
195 -----------------------------------------
198 #define PTHREAD_START_CUSTOM 0x01000000
199 #define PTHREAD_START_SETSCHED 0x02000000
200 #define PTHREAD_START_DETACHED 0x04000000
201 #define PTHREAD_START_QOSCLASS 0x08000000
202 #define PTHREAD_START_QOSCLASS_MASK 0xffffff
203 #define PTHREAD_START_POLICY_BITSHIFT 16
204 #define PTHREAD_START_POLICY_MASK 0xff
205 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
207 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
208 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
209 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
210 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
211 extern __uint64_t
__thread_selfid( void );
212 extern int __pthread_canceled(int);
213 extern int __pthread_kill(mach_port_t
, int);
215 extern int __workq_open(void);
216 extern int __workq_kernreturn(int, void *, int, int);
218 #if defined(__i386__) || defined(__x86_64__)
219 static const mach_vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
221 #error no PTHREAD_STACK_HINT for this architecture
224 #if defined(__i386__) && defined(static_assert)
225 // Check for regression of <rdar://problem/13249323>
226 static_assert(offsetof(struct _pthread
, err_no
) == 68);
229 // Allocate a thread structure, stack and guard page.
231 // The thread structure may optionally be placed in the same allocation as the
232 // stack, residing above the top of the stack. This cannot be done if a
233 // custom stack address is provided.
235 // Similarly the guard page cannot be allocated if a custom stack address is
238 // The allocated thread structure is initialized with values that indicate how
239 // it should be freed.
242 _pthread_allocate(pthread_t
*thread
, const pthread_attr_t
*attrs
, void **stack
)
247 mach_vm_address_t allocaddr
= PTHREAD_STACK_HINT
;
248 size_t allocsize
= 0;
249 size_t guardsize
= 0;
250 size_t stacksize
= 0;
252 PTHREAD_ASSERT(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
257 // Allocate a pthread structure if necessary
259 if (attrs
->stackaddr
!= NULL
) {
260 PTHREAD_ASSERT(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
261 *stack
= attrs
->stackaddr
;
262 allocsize
= pthreadsize
;
264 guardsize
= attrs
->guardsize
;
265 stacksize
= attrs
->stacksize
;
266 allocsize
= stacksize
+ guardsize
+ pthreadsize
;
269 kr
= mach_vm_map(mach_task_self(),
273 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
,
281 if (kr
!= KERN_SUCCESS
) {
282 kr
= mach_vm_allocate(mach_task_self(),
285 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
288 if (kr
== KERN_SUCCESS
) {
289 // The stack grows down.
290 // Set the guard page at the lowest address of the
291 // newly allocated stack. Return the highest address
294 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
297 // Thread structure resides at the top of the stack.
298 t
= (void *)(allocaddr
+ stacksize
+ guardsize
);
300 // Returns the top of the stack.
306 _pthread_struct_init(t
, attrs
, *stack
, attrs
->stacksize
, 0);
307 t
->freeaddr
= (void *)allocaddr
;
308 t
->freesize
= allocsize
;
318 _pthread_deallocate(pthread_t t
)
320 // Don't free the main thread.
323 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
324 PTHREAD_ASSERT(ret
== KERN_SUCCESS
);
329 // Terminates the thread if called from the currently running thread.
332 _pthread_terminate(pthread_t t
)
334 PTHREAD_ASSERT(t
== pthread_self());
336 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
337 size_t freesize
= t
->freesize
;
339 mach_port_t kport
= _pthread_kernel_thread(t
);
340 semaphore_t joinsem
= t
->joiner_notify
;
342 _pthread_dealloc_reply_port(t
);
344 // If the pthread_t sticks around after the __bsdthread_terminate, we'll
345 // need to free it later
347 // After the call to __pthread_remove_thread, it is only safe to
348 // dereference the pthread_t structure if EBUSY has been returned.
350 bool destroy
, should_exit
;
351 destroy
= (__pthread_remove_thread(t
, true, &should_exit
) != EBUSY
);
354 // Don't free the main thread.
356 } else if (!destroy
) {
357 // We were told to keep the pthread_t structure around. In the common
358 // case, the pthread structure itself is part of the allocation
359 // described by freeaddr/freesize, in which case we need to split and
360 // only deallocate the area below the pthread structure. In the event
361 // of a custom stack, the freeaddr/size will be the pthread structure
362 // itself, in which case we shouldn't free anything.
363 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
364 freesize
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
365 t
->freeaddr
+= freesize
;
366 t
->freesize
-= freesize
;
374 _pthread_introspection_thread_terminate(t
, freeaddr
, freesize
, destroy
);
379 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
380 PTHREAD_ABORT("thread %p didn't terminate", t
);
384 pthread_attr_destroy(pthread_attr_t
*attr
)
387 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
395 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
398 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
399 *detachstate
= attr
->detached
;
406 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
409 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
410 *inheritsched
= attr
->inherit
;
417 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
420 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
421 *param
= attr
->param
;
428 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
431 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
432 *policy
= attr
->policy
;
438 // Default stack size is 512KB; independent of the main thread's stack size.
439 static const size_t DEFAULT_STACK_SIZE
= 512 * 1024;
442 pthread_attr_init(pthread_attr_t
*attr
)
444 attr
->stacksize
= DEFAULT_STACK_SIZE
;
445 attr
->stackaddr
= NULL
;
446 attr
->sig
= _PTHREAD_ATTR_SIG
;
447 attr
->param
.sched_priority
= default_priority
;
448 attr
->param
.quantum
= 10; /* quantum isn't public yet */
449 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
450 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
451 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
454 attr
->guardsize
= vm_page_size
;
455 attr
->qosclass
= _pthread_priority_make_newest(QOS_CLASS_DEFAULT
, 0, 0);
460 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
463 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
464 (detachstate
== PTHREAD_CREATE_JOINABLE
||
465 detachstate
== PTHREAD_CREATE_DETACHED
)) {
466 attr
->detached
= detachstate
;
473 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
476 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
477 (inheritsched
== PTHREAD_INHERIT_SCHED
||
478 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
479 attr
->inherit
= inheritsched
;
486 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
489 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
490 /* TODO: Validate sched_param fields */
491 attr
->param
= *param
;
499 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
502 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
503 (policy
== SCHED_OTHER
||
504 policy
== SCHED_RR
||
505 policy
== SCHED_FIFO
)) {
506 attr
->policy
= policy
;
514 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
517 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
518 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
519 // No attribute yet for the scope.
521 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
529 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
532 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
533 *scope
= PTHREAD_SCOPE_SYSTEM
;
540 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
543 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
544 *stackaddr
= attr
->stackaddr
;
551 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
554 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
555 ((uintptr_t)stackaddr
% vm_page_size
) == 0) {
556 attr
->stackaddr
= stackaddr
;
565 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
568 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
569 *stacksize
= attr
->stacksize
;
576 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
579 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
580 (stacksize
% vm_page_size
) == 0 &&
581 stacksize
>= PTHREAD_STACK_MIN
) {
582 attr
->stacksize
= stacksize
;
589 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
592 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
593 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
594 *stacksize
= attr
->stacksize
;
600 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
601 // address. This is not the same as in pthread_attr_setstackaddr.
603 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
606 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
607 ((uintptr_t)stackaddr
% vm_page_size
) == 0 &&
608 (stacksize
% vm_page_size
) == 0 &&
609 stacksize
>= PTHREAD_STACK_MIN
) {
610 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
611 attr
->stacksize
= stacksize
;
619 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
622 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
623 /* Guardsize of 0 is valid, ot means no guard */
624 if ((guardsize
% vm_page_size
) == 0) {
625 attr
->guardsize
= guardsize
;
634 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
637 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
638 *guardsize
= attr
->guardsize
;
646 * Create and start execution of a new thread.
650 _pthread_body(pthread_t self
)
652 _pthread_set_self(self
);
653 __pthread_add_thread(self
, false);
654 _pthread_exit(self
, (self
->fun
)(self
->arg
));
658 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void *arg
, size_t stacksize
, unsigned int pflags
)
660 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
661 uintptr_t stackaddr
= self
;
662 _pthread_struct_init(self
, &_pthread_attr_default
, stackaddr
, stacksize
, 1);
664 if (pflags
& PTHREAD_START_SETSCHED
) {
665 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
666 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
669 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
670 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
671 self
->detached
|= PTHREAD_CREATE_DETACHED
;
675 if ((pflags
& PTHREAD_START_QOSCLASS
) != 0) {
676 /* The QoS class is cached in the TSD of the pthread, so to reflect the
677 * class that the kernel brought us up at, the TSD must be primed from the
680 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (pflags
& PTHREAD_START_QOSCLASS_MASK
);
682 /* Give the thread a default QoS tier, of zero. */
683 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
686 _pthread_set_kernel_thread(self
, kport
);
694 _pthread_struct_init(pthread_t t
,
695 const pthread_attr_t
*attrs
,
700 t
->sig
= _PTHREAD_SIG
;
701 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
;
702 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
705 t
->stacksize
= stacksize
;
706 t
->stackaddr
= stackaddr
;
708 t
->kernalloc
= kernalloc
;
711 * The pthread may be offset into a page. In that event, by contract
712 * with the kernel, the allocation will extend pthreadsize from the
713 * start of the next page. There's also one page worth of allocation
714 * below stacksize for the guard page. <rdar://problem/19941744>
716 t
->freeaddr
= (stackaddr
- stacksize
) - vm_page_size
;
717 t
->freesize
= (round_page((uintptr_t)stackaddr
) + pthreadsize
) - (uintptr_t)t
->freeaddr
;
720 t
->guardsize
= attrs
->guardsize
;
721 t
->detached
= attrs
->detached
;
722 t
->inherit
= attrs
->inherit
;
723 t
->policy
= attrs
->policy
;
724 t
->schedset
= attrs
->schedset
;
725 t
->param
= attrs
->param
;
726 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
729 /* Need to deprecate this in future */
731 _pthread_is_threaded(void)
733 return __is_threaded
;
736 /* Non portable public api to know whether this process has(had) atleast one thread
737 * apart from main thread. There could be race if there is a thread in the process of
738 * creation at the time of call . It does not tell whether there are more than one thread
739 * at this point of time.
742 pthread_is_threaded_np(void)
744 return __is_threaded
;
748 pthread_mach_thread_np(pthread_t t
)
750 mach_port_t kport
= MACH_PORT_NULL
;
752 if (t
== pthread_self()) {
754 * If the call is on self, return the kernel port. We cannot
755 * add this bypass for main thread as it might have exited,
756 * and we should not return stale port info.
758 kport
= _pthread_kernel_thread(t
);
760 (void)_pthread_lookup_thread(t
, &kport
, 0);
767 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
769 struct _pthread
*p
= NULL
;
771 /* No need to wait as mach port is already known */
772 LOCK(_pthread_list_lock
);
774 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
775 if (_pthread_kernel_thread(p
) == kernel_thread
) {
780 UNLOCK(_pthread_list_lock
);
786 pthread_get_stacksize_np(pthread_t t
)
792 return ESRCH
; // XXX bug?
795 // since the main thread will not get de-allocated from underneath us
796 if (t
== pthread_self() || t
== &_thread
) {
800 LOCK(_pthread_list_lock
);
802 ret
= _pthread_find_thread(t
);
806 size
= ret
; // XXX bug?
809 UNLOCK(_pthread_list_lock
);
815 pthread_get_stackaddr_np(pthread_t t
)
821 return (void *)(uintptr_t)ESRCH
; // XXX bug?
824 // since the main thread will not get de-allocated from underneath us
825 if (t
== pthread_self() || t
== &_thread
) {
829 LOCK(_pthread_list_lock
);
831 ret
= _pthread_find_thread(t
);
835 addr
= (void *)(uintptr_t)ret
; // XXX bug?
838 UNLOCK(_pthread_list_lock
);
844 _pthread_reply_port(pthread_t t
)
848 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
);
850 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
];
852 return (mach_port_t
)(uintptr_t)p
;
856 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
)
858 void *p
= (void *)(uintptr_t)reply_port
;
860 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
);
862 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
;
867 _pthread_dealloc_reply_port(pthread_t t
)
869 mach_port_t reply_port
= _pthread_reply_port(t
);
870 if (reply_port
!= MACH_PORT_NULL
) {
871 mig_dealloc_reply_port(reply_port
);
876 pthread_main_thread_np(void)
881 /* returns non-zero if the current thread is the main thread */
883 pthread_main_np(void)
885 pthread_t self
= pthread_self();
887 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
891 /* if we are passed in a pthread_t that is NULL, then we return
892 the current thread's thread_id. So folks don't have to call
893 pthread_self, in addition to us doing it, if they just want
897 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
900 pthread_t self
= pthread_self();
902 if (thread_id
== NULL
) {
906 if (thread
== NULL
|| thread
== self
) {
907 *thread_id
= self
->thread_id
;
909 LOCK(_pthread_list_lock
);
910 res
= _pthread_find_thread(thread
);
912 *thread_id
= thread
->thread_id
;
914 UNLOCK(_pthread_list_lock
);
920 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
924 if (thread
== NULL
) {
928 LOCK(_pthread_list_lock
);
929 res
= _pthread_find_thread(thread
);
931 strlcpy(threadname
, thread
->pthread_name
, len
);
933 UNLOCK(_pthread_list_lock
);
938 pthread_setname_np(const char *name
)
941 pthread_t self
= pthread_self();
948 /* protytype is in pthread_internals.h */
949 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
952 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
954 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
961 PTHREAD_ALWAYS_INLINE
963 __pthread_add_thread(pthread_t t
, bool parent
)
965 bool should_deallocate
= false;
966 bool should_add
= true;
968 LOCK(_pthread_list_lock
);
970 // The parent and child threads race to add the thread to the list.
971 // When called by the parent:
972 // - set parentcheck to true
973 // - back off if childrun is true
974 // When called by the child:
975 // - set childrun to true
976 // - back off if parentcheck is true
980 // child got here first, don't add.
984 // If the child exits before we check in then it has to keep
985 // the thread structure memory alive so our dereferences above
986 // are valid. If it's a detached thread, then no joiner will
987 // deallocate the thread structure itself. So we do it here.
990 should_deallocate
= ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
);
994 if (t
->parentcheck
) {
995 // Parent got here first, don't add.
999 // Work queue threads have no parent. Simulate.
1005 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1009 UNLOCK(_pthread_list_lock
);
1012 _pthread_introspection_thread_create(t
, should_deallocate
);
1013 if (should_deallocate
) {
1014 _pthread_deallocate(t
);
1017 _pthread_introspection_thread_start(t
);
1021 // <rdar://problem/12544957> must always inline this function to avoid epilogues
1022 // Returns EBUSY if the thread structure should be kept alive (is joinable).
1023 // Returns ESRCH if the thread structure is no longer valid (was detached).
1024 PTHREAD_ALWAYS_INLINE
1026 __pthread_remove_thread(pthread_t t
, bool child
, bool *should_exit
)
1030 bool should_remove
= true;
1032 LOCK(_pthread_list_lock
);
1034 // When a thread removes itself:
1035 // - Set the childexit flag indicating that the thread has exited.
1036 // - Return false if parentcheck is zero (must keep structure)
1037 // - If the thread is joinable, keep it on the list so that
1038 // the join operation succeeds. Still decrement the running
1039 // thread count so that we exit if no threads are running.
1040 // - Update the running thread count.
1041 // When another thread removes a joinable thread:
1042 // - CAREFUL not to dereference the thread before verifying that the
1043 // reference is still valid using _pthread_find_thread().
1044 // - Remove the thread from the list.
1048 if (t
->parentcheck
== 0) {
1051 if ((t
->detached
& PTHREAD_CREATE_JOINABLE
) != 0) {
1053 should_remove
= false;
1055 *should_exit
= (--_pthread_count
<= 0);
1057 ret
= _pthread_find_thread(t
);
1059 // If we found a thread but it's not joinable, bail.
1060 if ((t
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
1061 should_remove
= false;
1066 if (should_remove
) {
1067 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
1070 UNLOCK(_pthread_list_lock
);
1076 pthread_create(pthread_t
*thread
,
1077 const pthread_attr_t
*attr
,
1078 void *(*start_routine
)(void *),
1082 unsigned int flags
= 0;
1084 pthread_attr_t
*attrs
= (pthread_attr_t
*)attr
;
1085 if (attrs
== NULL
) {
1086 attrs
= &_pthread_attr_default
;
1087 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1091 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
) {
1092 flags
|= PTHREAD_START_DETACHED
;
1095 if (attrs
->schedset
!= 0) {
1096 flags
|= PTHREAD_START_SETSCHED
;
1097 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1098 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1099 } else if (attrs
->qosclass
!= 0) {
1100 flags
|= PTHREAD_START_QOSCLASS
;
1101 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1108 if (attrs
->fastpath
) {
1109 // kernel will allocate thread and stack, pass stacksize.
1110 stack
= (void *)attrs
->stacksize
;
1112 // allocate the thread and its stack
1113 flags
|= PTHREAD_START_CUSTOM
;
1116 res
= _pthread_allocate(&t
, attrs
, &stack
);
1122 t
->fun
= start_routine
;
1126 t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
);
1127 if (t2
== (pthread_t
)-1) {
1128 if (flags
& PTHREAD_START_CUSTOM
) {
1129 // free the thread and stack if we allocated it
1130 _pthread_deallocate(t
);
1138 __pthread_add_thread(t
, true);
1140 // XXX if a thread is created detached and exits, t will be invalid
1146 pthread_create_suspended_np(pthread_t
*thread
,
1147 const pthread_attr_t
*attr
,
1148 void *(*start_routine
)(void *),
1153 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1155 const pthread_attr_t
*attrs
= attr
;
1156 if (attrs
== NULL
) {
1157 attrs
= &_pthread_attr_default
;
1158 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1163 res
= _pthread_allocate(&t
, attrs
, &stack
);
1171 kr
= thread_create(mach_task_self(), &kernel_thread
);
1172 if (kr
!= KERN_SUCCESS
) {
1173 //PTHREAD_ABORT("thread_create() failed: %d", kern_res);
1174 return EINVAL
; /* Need better error here? */
1177 _pthread_set_kernel_thread(t
, kernel_thread
);
1178 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
1183 t
->fun
= start_routine
;
1185 __pthread_add_thread(t
, true);
1187 // Set up a suspended thread.
1188 _pthread_setup(t
, _pthread_body
, stack
, 1, 0);
1193 pthread_detach(pthread_t thread
)
1197 semaphore_t sema
= SEMAPHORE_NULL
;
1199 res
= _pthread_lookup_thread(thread
, NULL
, 1);
1201 return res
; // Not a valid thread to detach.
1205 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
) {
1206 if (thread
->detached
& _PTHREAD_EXITED
) {
1207 // Join the thread if it's already exited.
1210 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1211 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1212 sema
= thread
->joiner_notify
;
1217 UNLOCK(thread
->lock
);
1220 pthread_join(thread
, NULL
);
1222 semaphore_signal(sema
);
1229 pthread_kill(pthread_t th
, int sig
)
1231 if (sig
< 0 || sig
> NSIG
) {
1235 mach_port_t kport
= MACH_PORT_NULL
;
1236 if (_pthread_lookup_thread(th
, &kport
, 0) != 0) {
1237 return ESRCH
; // Not a valid thread.
1240 // Don't signal workqueue threads.
1241 if (th
->wqthread
!= 0 && th
->wqkillset
== 0) {
1245 int ret
= __pthread_kill(kport
, sig
);
1254 __pthread_workqueue_setkill(int enable
)
1256 pthread_t self
= pthread_self();
1259 self
->wqkillset
= enable
? 1 : 0;
1266 __pthread_get_exit_value(pthread_t t
, int conforming
)
1268 const int flags
= (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
);
1269 void *value
= t
->exit_value
;
1271 if ((t
->cancel_state
& flags
) == flags
) {
1272 value
= PTHREAD_CANCELED
;
1278 /* For compatibility... */
1281 _pthread_self(void) {
1282 return pthread_self();
1286 * Terminate a thread.
1288 int __disable_threadsignal(int);
1292 _pthread_exit(pthread_t self
, void *value_ptr
)
1294 struct __darwin_pthread_handler_rec
*handler
;
1296 // Disable signal delivery while we clean up
1297 __disable_threadsignal(1);
1299 // Set cancel state to disable and type to deferred
1300 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1302 while ((handler
= self
->__cleanup_stack
) != 0) {
1303 (handler
->__routine
)(handler
->__arg
);
1304 self
->__cleanup_stack
= handler
->__next
;
1306 _pthread_tsd_cleanup(self
);
1309 self
->detached
|= _PTHREAD_EXITED
;
1310 self
->exit_value
= value_ptr
;
1312 if ((self
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1313 self
->joiner_notify
== SEMAPHORE_NULL
) {
1314 self
->joiner_notify
= (semaphore_t
)os_get_cached_semaphore();
1318 // Clear per-thread semaphore cache
1319 os_put_cached_semaphore(SEMAPHORE_NULL
);
1321 _pthread_terminate(self
);
1325 pthread_exit(void *value_ptr
)
1327 pthread_t self
= pthread_self();
1328 if (self
->wqthread
== 0) {
1329 _pthread_exit(self
, value_ptr
);
1331 PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1336 pthread_getschedparam(pthread_t thread
,
1338 struct sched_param
*param
)
1342 if (thread
== NULL
) {
1346 LOCK(_pthread_list_lock
);
1348 ret
= _pthread_find_thread(thread
);
1351 *policy
= thread
->policy
;
1354 *param
= thread
->param
;
1358 UNLOCK(_pthread_list_lock
);
1364 pthread_setschedparam_internal(pthread_t thread
,
1367 const struct sched_param
*param
)
1369 policy_base_data_t bases
;
1371 mach_msg_type_number_t count
;
1376 bases
.ts
.base_priority
= param
->sched_priority
;
1377 base
= (policy_base_t
)&bases
.ts
;
1378 count
= POLICY_TIMESHARE_BASE_COUNT
;
1381 bases
.fifo
.base_priority
= param
->sched_priority
;
1382 base
= (policy_base_t
)&bases
.fifo
;
1383 count
= POLICY_FIFO_BASE_COUNT
;
1386 bases
.rr
.base_priority
= param
->sched_priority
;
1387 /* quantum isn't public yet */
1388 bases
.rr
.quantum
= param
->quantum
;
1389 base
= (policy_base_t
)&bases
.rr
;
1390 count
= POLICY_RR_BASE_COUNT
;
1395 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1396 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1400 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1402 mach_port_t kport
= MACH_PORT_NULL
;
1406 // since the main thread will not get de-allocated from underneath us
1407 if (t
== pthread_self() || t
== &_thread
) {
1408 kport
= _pthread_kernel_thread(t
);
1411 (void)_pthread_lookup_thread(t
, &kport
, 0);
1414 res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1417 // Ensure the thread is still valid.
1418 LOCK(_pthread_list_lock
);
1419 res
= _pthread_find_thread(t
);
1424 UNLOCK(_pthread_list_lock
);
1434 sched_get_priority_min(int policy
)
1436 return default_priority
- 16;
1440 sched_get_priority_max(int policy
)
1442 return default_priority
+ 16;
1446 pthread_equal(pthread_t t1
, pthread_t t2
)
1451 // Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1452 // then _pthread_set_self won't be bound when secondary threads try and start up.
1455 _pthread_set_self(pthread_t p
)
1457 extern void __pthread_set_self(void *);
1463 uint64_t tid
= __thread_selfid();
1465 PTHREAD_ABORT("failed to set thread_id");
1468 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
;
1469 p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
;
1471 __pthread_set_self(&p
->tsd
[0]);
1474 struct _pthread_once_context
{
1475 pthread_once_t
*pthread_once
;
1476 void (*routine
)(void);
1480 __pthread_once_handler(void *context
)
1482 struct _pthread_once_context
*ctx
= context
;
1483 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1485 pthread_cleanup_pop(0);
1486 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1490 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1492 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1494 os_once(&once_control
->once
, &ctx
, __pthread_once_handler
);
1495 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1500 _pthread_testcancel(pthread_t thread
, int isconforming
)
1502 const int flags
= (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
);
1505 bool canceled
= ((thread
->cancel_state
& flags
) == flags
);
1506 UNLOCK(thread
->lock
);
1509 pthread_exit(isconforming
? PTHREAD_CANCELED
: 0);
1514 _pthread_exit_if_canceled(int error
)
1516 if (__unix_conforming
&& ((error
& 0xff) == EINTR
) && (__pthread_canceled(0) == 0)) {
1517 pthread_t self
= pthread_self();
1519 self
->cancel_error
= error
;
1521 pthread_exit(PTHREAD_CANCELED
);
1526 pthread_getconcurrency(void)
1528 return pthread_concurrency
;
1532 pthread_setconcurrency(int new_level
)
1534 if (new_level
< 0) {
1537 pthread_concurrency
= new_level
;
1542 _pthread_set_pfz(uintptr_t address
)
1546 #if !defined(PTHREAD_TARGET_EOS) && !defined(VARIANT_DYLD)
1550 if (_pthread_malloc
) {
1551 return _pthread_malloc(sz
);
1560 if (_pthread_free
) {
1567 * Perform package initialization - called automatically when application starts
1569 struct ProgramVars
; /* forward reference */
1572 __pthread_init(const struct _libpthread_functions
*pthread_funcs
, const char *envp
[] __unused
,
1573 const char *apple
[] __unused
, const struct ProgramVars
*vars __unused
)
1575 // Save our provided pushed-down functions
1576 if (pthread_funcs
) {
1577 exitf
= pthread_funcs
->exit
;
1579 if (pthread_funcs
->version
>= 2) {
1580 _pthread_malloc
= pthread_funcs
->malloc
;
1581 _pthread_free
= pthread_funcs
->free
;
1586 // Get host information
1590 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1591 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1592 host_priority_info_data_t priority_info
;
1593 host_t host
= mach_host_self();
1594 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1595 if (kr
!= KERN_SUCCESS
) {
1596 PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr
));
1598 default_priority
= priority_info
.user_priority
;
1599 min_priority
= priority_info
.minimum_priority
;
1600 max_priority
= priority_info
.maximum_priority
;
1602 mach_port_deallocate(mach_task_self(), host
);
1605 // Set up the main thread structure
1609 size_t stacksize
= DFLSSIZ
;
1610 size_t len
= sizeof(stackaddr
);
1611 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1612 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1613 stackaddr
= (void *)USRSTACK
;
1616 pthread_t thread
= &_thread
;
1617 pthread_attr_init(&_pthread_attr_default
);
1618 _pthread_struct_init(thread
, &_pthread_attr_default
, stackaddr
, stacksize
, 0);
1619 thread
->detached
= PTHREAD_CREATE_JOINABLE
;
1621 // Finish initialization with common code that is reinvoked on the
1622 // child side of a fork.
1624 // Finishes initialization of main thread attributes.
1625 // Initializes the thread list and add the main thread.
1626 // Calls _pthread_set_self() to prepare the main thread for execution.
1627 __pthread_fork_child_internal(thread
);
1629 // Set up kernel entry points with __bsdthread_register.
1630 pthread_workqueue_atfork_child();
1632 // Have pthread_key do its init envvar checks.
1633 _pthread_key_global_init(envp
);
1645 PTHREAD_NOEXPORT
void
1646 __pthread_fork_child_internal(pthread_t p
)
1648 TAILQ_INIT(&__pthread_head
);
1649 LOCK_INIT(_pthread_list_lock
);
1651 // Re-use the main thread's static storage if no thread was provided.
1653 if (_thread
.tsd
[0] != 0) {
1654 bzero(&_thread
, sizeof(struct _pthread
));
1660 _pthread_set_kernel_thread(p
, mach_thread_self());
1661 _pthread_set_reply_port(p
, mach_reply_port());
1662 p
->__cleanup_stack
= NULL
;
1663 p
->joiner_notify
= SEMAPHORE_NULL
;
1664 p
->joiner
= MACH_PORT_NULL
;
1665 p
->detached
|= _PTHREAD_CREATE_PARENT
;
1666 p
->tsd
[__TSD_SEMAPHORE_CACHE
] = SEMAPHORE_NULL
;
1668 // Initialize the list of threads with the new main thread.
1669 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
1672 _pthread_set_self(p
);
1673 _pthread_introspection_thread_start(p
);
1677 * Query/update the cancelability 'state' of a thread
1679 PTHREAD_NOEXPORT
int
1680 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
1685 case PTHREAD_CANCEL_ENABLE
:
1687 __pthread_canceled(1);
1690 case PTHREAD_CANCEL_DISABLE
:
1692 __pthread_canceled(2);
1699 self
= pthread_self();
1702 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
1704 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
1705 self
->cancel_state
|= state
;
1708 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
1713 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
1715 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
1718 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
1719 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
1720 if (value_ptr
== PTHREAD_CANCELED
) {
1722 self
->detached
|= _PTHREAD_WASCANCEL
;
1729 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
1731 // Returns ESRCH if the thread was not created joinable.
1732 int ret
= __pthread_remove_thread(thread
, false, NULL
);
1738 *value_ptr
= __pthread_get_exit_value(thread
, conforming
);
1740 _pthread_introspection_thread_destroy(thread
);
1741 _pthread_deallocate(thread
);
1745 /* ALWAYS called with list lock and return with list lock */
1747 _pthread_find_thread(pthread_t thread
)
1749 if (thread
!= NULL
) {
1752 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1754 if (_pthread_kernel_thread(thread
) == MACH_PORT_NULL
) {
1755 UNLOCK(_pthread_list_lock
);
1757 LOCK(_pthread_list_lock
);
1768 _pthread_lookup_thread(pthread_t thread
, mach_port_t
*portp
, int only_joinable
)
1770 mach_port_t kport
= MACH_PORT_NULL
;
1773 if (thread
== NULL
) {
1777 LOCK(_pthread_list_lock
);
1779 ret
= _pthread_find_thread(thread
);
1781 // Fail if we only want joinable threads and the thread found is
1782 // not in the detached state.
1783 if (only_joinable
!= 0 && (thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0) {
1786 kport
= _pthread_kernel_thread(thread
);
1790 UNLOCK(_pthread_list_lock
);
1792 if (portp
!= NULL
) {
1800 _pthread_clear_qos_tsd(mach_port_t thread_port
)
1802 if (thread_port
== MACH_PORT_NULL
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) {
1803 /* Clear the current thread's TSD, that can be done inline. */
1804 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0));
1808 LOCK(_pthread_list_lock
);
1810 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1811 mach_port_t kp
= _pthread_kernel_thread(p
);
1812 if (thread_port
== kp
) {
1813 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
1818 UNLOCK(_pthread_list_lock
);
1822 /***** pthread workqueue support routines *****/
1824 PTHREAD_NOEXPORT
void
1825 pthread_workqueue_atfork_child(void)
1827 struct _pthread_registration_data data
= {
1828 .dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *),
1831 int rv
= __bsdthread_register(thread_start
,
1835 (uintptr_t)sizeof(data
),
1836 data
.dispatch_queue_offset
);
1839 __pthread_supported_features
= rv
;
1842 if (_pthread_priority_get_qos_newest(data
.main_qos
) != QOS_CLASS_UNSPECIFIED
) {
1843 _pthread_set_main_qos(data
.main_qos
);
1844 _thread
.tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = data
.main_qos
;
1847 if (__libdispatch_workerfunction
!= NULL
) {
1848 // prepare the kernel for workq action
1849 (void)__workq_open();
1853 // workqueue entry point from kernel
1855 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
, void *keventlist
, int flags
, int nkevents
)
1857 PTHREAD_ASSERT(flags
& WQ_FLAG_THREAD_NEWSPI
);
1859 int thread_reuse
= flags
& WQ_FLAG_THREAD_REUSE
;
1860 int thread_class
= flags
& WQ_FLAG_THREAD_PRIOMASK
;
1861 int overcommit
= (flags
& WQ_FLAG_THREAD_OVERCOMMIT
) != 0;
1862 int kevent
= flags
& WQ_FLAG_THREAD_KEVENT
;
1863 PTHREAD_ASSERT((!kevent
) || (__libdispatch_keventfunction
!= NULL
));
1865 pthread_priority_t priority
= 0;
1866 unsigned long priority_flags
= 0;
1869 priority_flags
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
1870 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
)
1871 priority_flags
|= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
1873 if ((__pthread_supported_features
& PTHREAD_FEATURE_QOS_MAINTENANCE
) == 0) {
1874 priority
= _pthread_priority_make_version2(thread_class
, 0, priority_flags
);
1876 priority
= _pthread_priority_make_newest(thread_class
, 0, priority_flags
);
1879 if (thread_reuse
== 0) {
1880 // New thread created by kernel, needs initialization.
1881 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
1882 _pthread_struct_init(self
, &_pthread_attr_default
, (void*)self
, stacksize
, 1);
1884 _pthread_set_kernel_thread(self
, kport
);
1886 self
->wqkillset
= 0;
1888 // Not a joinable thread.
1889 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1890 self
->detached
|= PTHREAD_CREATE_DETACHED
;
1892 // Update the running thread count and set childrun bit.
1893 // XXX this should be consolidated with pthread_body().
1894 _pthread_set_self(self
);
1895 _pthread_introspection_thread_create(self
, false);
1896 __pthread_add_thread(self
, false);
1899 // If we're running with fine-grained priority, we also need to
1900 // set this thread to have the QoS class provided to use by the kernel
1901 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
1902 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(thread_class
, 0, priority_flags
));
1906 PTHREAD_ASSERT(self
);
1907 PTHREAD_ASSERT(self
== pthread_self());
1911 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
1913 self
->fun
= (void *(*)(void *))__libdispatch_workerfunction
;
1915 self
->arg
= (void *)(uintptr_t)thread_class
;
1917 if (kevent
&& keventlist
){
1918 kevent_errors_retry
:
1919 (*__libdispatch_keventfunction
)(&keventlist
, &nkevents
);
1921 int errors_out
= __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, keventlist
, nkevents
, 0);
1922 if (errors_out
> 0){
1923 nkevents
= errors_out
;
1924 goto kevent_errors_retry
;
1925 } else if (errors_out
< 0){
1926 PTHREAD_ABORT("kevent return produced an error: %d", errno
);
1928 _pthread_exit(self
, NULL
);
1930 (*__libdispatch_keventfunction
)(NULL
, NULL
);
1932 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
1933 _pthread_exit(self
, NULL
);
1936 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
1937 if (!__workq_newapi
) {
1938 /* Old thread priorities are inverted from where we have them in
1939 * the new flexible priority scheme. The highest priority is zero,
1940 * up to 2, with background at 3.
1942 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
1944 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
1946 if ((__pthread_supported_features
& PTHREAD_FEATURE_QOS_DEFAULT
) == 0) {
1947 /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */
1948 switch (thread_class
) {
1949 case QOS_CLASS_USER_INTERACTIVE
:
1950 thread_class
= QOS_CLASS_USER_INITIATED
;
1952 case QOS_CLASS_USER_INITIATED
:
1953 thread_class
= QOS_CLASS_DEFAULT
;
1960 switch (thread_class
) {
1961 /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */
1962 case QOS_CLASS_USER_INITIATED
:
1963 (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
1966 case QOS_CLASS_DEFAULT
:
1967 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
1968 * picked up by NSThread (et al) and transported around the system. So change the TSD to
1969 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
1971 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED
, 0, 0));
1972 (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
1975 case QOS_CLASS_UTILITY
:
1976 (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
1979 case QOS_CLASS_BACKGROUND
:
1980 (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
1983 /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
1987 /* "New" API, where dispatch is expecting to be given the thread priority */
1988 (*__libdispatch_workerfunction
)(priority
);
1991 /* We're the new library running on an old kext, so thread_class is really the workq priority. */
1992 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
1993 int options
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
1994 (*func
)(thread_class
, options
, NULL
);
1997 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
1998 _pthread_exit(self
, NULL
);
2001 /***** pthread workqueue API for libdispatch *****/
2004 pthread_workqueue_setdispatchoffset_np(int offset
)
2006 __libdispatch_offset
= offset
;
2010 pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func
, pthread_workqueue_function_kevent_t kevent_func
)
2013 if (__libdispatch_workerfunction
== NULL
) {
2014 // Check whether the kernel supports new SPIs
2015 res
= __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, __libdispatch_offset
, kevent_func
!= NULL
? 0x01 : 0x00);
2019 __libdispatch_workerfunction
= queue_func
;
2020 __libdispatch_keventfunction
= kevent_func
;
2022 // Prepare the kernel for workq action
2023 (void)__workq_open();
2024 if (__is_threaded
== 0) {
2033 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
, pthread_workqueue_function_kevent_t kevent_func
, int offset
, int flags
)
2039 __workq_newapi
= true;
2040 __libdispatch_offset
= offset
;
2042 int rv
= pthread_workqueue_setdispatch_with_kevent_np(queue_func
, kevent_func
);
2047 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2049 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2053 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2055 return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t
)worker_func
, NULL
);
2059 _pthread_workqueue_supported(void)
2061 return __pthread_supported_features
;
2065 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2069 // Cannot add threads without a worker function registered.
2070 if (__libdispatch_workerfunction
== NULL
) {
2074 pthread_priority_t kp
= 0;
2076 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2077 /* The new kernel API takes the new QoS class + relative priority style of
2078 * priority. This entry point is here for compatibility with old libdispatch
2079 * versions (ie. the simulator). We request the corresponding new bracket
2080 * from the kernel, then on the way out run all dispatch queues that were
2084 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
2087 /* To make sure the library does not issue more threads to dispatch than
2088 * were requested, the total number of active requests is recorded in
2091 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2092 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2095 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2098 /* Running on the old kernel, queue_priority is what we pass directly to
2101 kp
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
2103 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2104 kp
|= WORKQUEUE_OVERCOMMIT
;
2108 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2116 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2120 if (__libdispatch_workerfunction
== NULL
) {
2124 if ((__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) == 0) {
2128 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2136 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2138 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2146 * Introspection SPI for libpthread.
2149 static pthread_introspection_hook_t _pthread_introspection_hook
;
2151 pthread_introspection_hook_t
2152 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2154 if (os_slowpath(!hook
)) {
2155 PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
2157 pthread_introspection_hook_t prev
;
2158 prev
= __sync_swap(&_pthread_introspection_hook
, hook
);
2164 _pthread_introspection_hook_callout_thread_create(pthread_t t
, bool destroy
)
2166 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2168 if (!destroy
) return;
2169 _pthread_introspection_thread_destroy(t
);
2173 _pthread_introspection_thread_create(pthread_t t
, bool destroy
)
2175 if (os_fastpath(!_pthread_introspection_hook
)) return;
2176 _pthread_introspection_hook_callout_thread_create(t
, destroy
);
2181 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2185 if (t
== &_thread
) {
2186 freesize
= t
->stacksize
+ t
->guardsize
;
2187 freeaddr
= t
->stackaddr
- freesize
;
2189 freesize
= t
->freesize
- pthreadsize
;
2190 freeaddr
= t
->freeaddr
;
2192 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2193 freeaddr
, freesize
);
2197 _pthread_introspection_thread_start(pthread_t t
)
2199 if (os_fastpath(!_pthread_introspection_hook
)) return;
2200 _pthread_introspection_hook_callout_thread_start(t
);
2205 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
,
2206 void *freeaddr
, size_t freesize
, bool destroy
)
2208 if (destroy
&& freesize
) {
2209 freesize
-= pthreadsize
;
2211 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2212 freeaddr
, freesize
);
2213 if (!destroy
) return;
2214 _pthread_introspection_thread_destroy(t
);
2218 _pthread_introspection_thread_terminate(pthread_t t
, void *freeaddr
,
2219 size_t freesize
, bool destroy
)
2221 if (os_fastpath(!_pthread_introspection_hook
)) return;
2222 _pthread_introspection_hook_callout_thread_terminate(t
, freeaddr
, freesize
,
2228 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2230 if (t
== &_thread
) return;
2231 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2236 _pthread_introspection_thread_destroy(pthread_t t
)
2238 if (os_fastpath(!_pthread_introspection_hook
)) return;
2239 _pthread_introspection_hook_callout_thread_destroy(t
);