2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
64 #include <mach/mach_init.h>
65 #include <mach/mach_vm.h>
67 #include <sys/resource.h>
68 #include <sys/sysctl.h>
69 #include <sys/queue.h>
71 #include <machine/vmparam.h>
72 #define __APPLE_API_PRIVATE
73 #include <machine/cpu_capabilities.h>
76 #include <platform/string.h>
77 #include <platform/compat.h>
79 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
,
80 void *newp
, size_t newlen
);
81 extern void __exit(int) __attribute__((noreturn
));
82 extern int __pthread_kill(mach_port_t
, int);
84 extern struct _pthread _thread
;
85 extern int default_priority
;
92 static void (*exitf
)(int) = __exit
;
93 PTHREAD_NOEXPORT
void* (*_pthread_malloc
)(size_t) = NULL
;
94 PTHREAD_NOEXPORT
void (*_pthread_free
)(void *) = NULL
;
98 int _pthread_debuglog
;
99 uint64_t _pthread_debugstart
;
102 // This global should be used (carefully) by anyone needing to know if a
103 // pthread (other than the main thread) has been created.
104 int __is_threaded
= 0;
106 int __unix_conforming
= 0;
108 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
109 // list, and the parentcheck, childrun and childexit flags of the pthread
110 // structure. Externally imported by pthread_cancelable.c.
111 PTHREAD_NOEXPORT _pthread_lock _pthread_list_lock
= _PTHREAD_LOCK_INITIALIZER
;
112 PTHREAD_NOEXPORT
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
113 static int _pthread_count
= 1;
115 #if PTHREAD_LAYOUT_SPI
117 const struct pthread_layout_offsets_s pthread_layout_offsets
= {
119 .plo_pthread_tsd_base_offset
= offsetof(struct _pthread
, tsd
),
120 .plo_pthread_tsd_base_address_offset
= 0,
121 .plo_pthread_tsd_entry_size
= sizeof(((struct _pthread
*)NULL
)->tsd
[0]),
124 #endif // PTHREAD_LAYOUT_SPI
130 // Mach message notification that a thread needs to be recycled.
131 typedef struct _pthread_reap_msg_t
{
132 mach_msg_header_t header
;
134 mach_msg_trailer_t trailer
;
135 } pthread_reap_msg_t
;
138 * The pthread may be offset into a page. In that event, by contract
139 * with the kernel, the allocation will extend PTHREAD_SIZE from the
140 * start of the next page. There's also one page worth of allocation
141 * below stacksize for the guard page. <rdar://problem/19941744>
143 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
144 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
145 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
147 static pthread_attr_t _pthread_attr_default
= { };
149 // The main thread's pthread_t
150 PTHREAD_NOEXPORT
struct _pthread _thread
__attribute__((aligned(64))) = { };
152 PTHREAD_NOEXPORT
int default_priority
;
153 static int max_priority
;
154 static int min_priority
;
155 static int pthread_concurrency
;
157 // work queue support data
158 static void (*__libdispatch_workerfunction
)(pthread_priority_t
) = NULL
;
159 static void (*__libdispatch_keventfunction
)(void **events
, int *nevents
) = NULL
;
160 static void (*__libdispatch_workloopfunction
)(uint64_t *workloop_id
, void **events
, int *nevents
) = NULL
;
161 static int __libdispatch_offset
;
163 // supported feature set
164 int __pthread_supported_features
;
165 static bool __workq_newapi
;
168 // Function prototypes
171 // pthread primitives
172 static int _pthread_allocate(pthread_t
*thread
, const pthread_attr_t
*attrs
, void **stack
);
173 static int _pthread_deallocate(pthread_t t
);
175 static void _pthread_terminate_invoke(pthread_t t
);
177 static inline void _pthread_struct_init(pthread_t t
,
178 const pthread_attr_t
*attrs
,
184 static inline void _pthread_set_self_internal(pthread_t
, bool needs_tsd_base_set
);
186 static void _pthread_dealloc_reply_port(pthread_t t
);
187 static void _pthread_dealloc_special_reply_port(pthread_t t
);
189 static inline void __pthread_add_thread(pthread_t t
, const pthread_attr_t
*attr
, bool parent
, bool from_mach_thread
);
190 static inline int __pthread_remove_thread(pthread_t t
, bool child
, bool *should_exit
);
192 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
194 static inline void _pthread_introspection_thread_create(pthread_t t
, bool destroy
);
195 static inline void _pthread_introspection_thread_start(pthread_t t
);
196 static inline void _pthread_introspection_thread_terminate(pthread_t t
, void *freeaddr
, size_t freesize
, bool destroy
);
197 static inline void _pthread_introspection_thread_destroy(pthread_t t
);
199 extern void _pthread_set_self(pthread_t
);
200 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread
201 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start
203 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
204 * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
205 * from old API requests to the new kext conventions.
207 #define WORKQUEUE_OVERCOMMIT 0x10000
210 * Flags filed passed to bsdthread_create and back in pthread_start
211 31 <---------------------------------> 0
212 _________________________________________
213 | flags(8) | policy(8) | importance(16) |
214 -----------------------------------------
217 #define PTHREAD_START_CUSTOM 0x01000000
218 #define PTHREAD_START_SETSCHED 0x02000000
219 #define PTHREAD_START_DETACHED 0x04000000
220 #define PTHREAD_START_QOSCLASS 0x08000000
221 #define PTHREAD_START_TSD_BASE_SET 0x10000000
222 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
223 #define PTHREAD_START_POLICY_BITSHIFT 16
224 #define PTHREAD_START_POLICY_MASK 0xff
225 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
227 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
228 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
229 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
230 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
231 extern __uint64_t
__thread_selfid( void );
233 extern int __workq_open(void);
234 extern int __workq_kernreturn(int, void *, int, int);
236 #if defined(__i386__) || defined(__x86_64__)
237 static const mach_vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
239 #error no PTHREAD_STACK_HINT for this architecture
242 // Check that offsets of _PTHREAD_STRUCT_DIRECT_*_OFFSET values hasn't changed
243 _Static_assert(offsetof(struct _pthread
, tsd
) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
244 == offsetof(struct _pthread
, thread_id
),
245 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
247 // Allocate a thread structure, stack and guard page.
249 // The thread structure may optionally be placed in the same allocation as the
250 // stack, residing above the top of the stack. This cannot be done if a
251 // custom stack address is provided.
253 // Similarly the guard page cannot be allocated if a custom stack address is
256 // The allocated thread structure is initialized with values that indicate how
257 // it should be freed.
260 _pthread_allocate(pthread_t
*thread
, const pthread_attr_t
*attrs
, void **stack
)
265 mach_vm_address_t allocaddr
= PTHREAD_STACK_HINT
;
266 size_t allocsize
= 0;
267 size_t guardsize
= 0;
268 size_t stacksize
= 0;
270 PTHREAD_ASSERT(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
275 // Allocate a pthread structure if necessary
277 if (attrs
->stackaddr
!= NULL
) {
278 PTHREAD_ASSERT(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
279 *stack
= attrs
->stackaddr
;
280 allocsize
= PTHREAD_SIZE
;
282 guardsize
= attrs
->guardsize
;
283 stacksize
= attrs
->stacksize
;
284 allocsize
= stacksize
+ guardsize
+ PTHREAD_SIZE
;
287 kr
= mach_vm_map(mach_task_self(),
291 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
,
299 if (kr
!= KERN_SUCCESS
) {
300 kr
= mach_vm_allocate(mach_task_self(),
303 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
306 if (kr
== KERN_SUCCESS
) {
307 // The stack grows down.
308 // Set the guard page at the lowest address of the
309 // newly allocated stack. Return the highest address
312 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
315 // Thread structure resides at the top of the stack.
316 t
= (void *)(allocaddr
+ stacksize
+ guardsize
);
318 // Returns the top of the stack.
324 _pthread_struct_init(t
, attrs
,
325 *stack
, attrs
->stacksize
,
326 allocaddr
, allocsize
);
336 _pthread_deallocate(pthread_t t
)
338 // Don't free the main thread.
341 ret
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
);
342 PTHREAD_ASSERT(ret
== KERN_SUCCESS
);
347 #pragma clang diagnostic push
348 #pragma clang diagnostic ignored "-Wreturn-stack-address"
352 _pthread_current_stack_address(void)
358 #pragma clang diagnostic pop
360 // Terminates the thread if called from the currently running thread.
361 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
363 _pthread_terminate(pthread_t t
)
365 PTHREAD_ASSERT(t
== pthread_self());
367 uintptr_t freeaddr
= (uintptr_t)t
->freeaddr
;
368 size_t freesize
= t
->freesize
;
370 // the size of just the stack
371 size_t freesize_stack
= t
->freesize
;
373 // We usually pass our structure+stack to bsdthread_terminate to free, but
374 // if we get told to keep the pthread_t structure around then we need to
375 // adjust the free size and addr in the pthread_t to just refer to the
376 // structure and not the stack. If we do end up deallocating the
377 // structure, this is useless work since no one can read the result, but we
378 // can't do it after the call to pthread_remove_thread because it isn't
379 // safe to dereference t after that.
380 if ((void*)t
> t
->freeaddr
&& (void*)t
< t
->freeaddr
+ t
->freesize
){
381 // Check to ensure the pthread structure itself is part of the
382 // allocation described by freeaddr/freesize, in which case we split and
383 // only deallocate the area below the pthread structure. In the event of a
384 // custom stack, the freeaddr/size will be the pthread structure itself, in
385 // which case we shouldn't free anything (the final else case).
386 freesize_stack
= trunc_page((uintptr_t)t
- (uintptr_t)freeaddr
);
388 // describe just the remainder for deallocation when the pthread_t goes away
389 t
->freeaddr
+= freesize_stack
;
390 t
->freesize
-= freesize_stack
;
391 } else if (t
== &_thread
){
392 freeaddr
= t
->stackaddr
- pthread_get_stacksize_np(t
);
393 uintptr_t stackborder
= trunc_page((uintptr_t)_pthread_current_stack_address());
394 freesize_stack
= stackborder
- freeaddr
;
399 mach_port_t kport
= _pthread_kernel_thread(t
);
400 semaphore_t joinsem
= t
->joiner_notify
;
402 _pthread_dealloc_special_reply_port(t
);
403 _pthread_dealloc_reply_port(t
);
405 // After the call to __pthread_remove_thread, it is not safe to
406 // dereference the pthread_t structure.
408 bool destroy
, should_exit
;
409 destroy
= (__pthread_remove_thread(t
, true, &should_exit
) != EBUSY
);
411 if (!destroy
|| t
== &_thread
) {
412 // Use the adjusted freesize of just the stack that we computed above.
413 freesize
= freesize_stack
;
416 // Check if there is nothing to free because the thread has a custom
417 // stack allocation and is joinable.
421 _pthread_introspection_thread_terminate(t
, freeaddr
, freesize
, destroy
);
426 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
427 PTHREAD_ABORT("thread %p didn't terminate", t
);
432 _pthread_terminate_invoke(pthread_t t
)
434 _pthread_terminate(t
);
438 pthread_attr_destroy(pthread_attr_t
*attr
)
441 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
449 pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
452 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
453 *detachstate
= attr
->detached
;
460 pthread_attr_getinheritsched(const pthread_attr_t
*attr
, int *inheritsched
)
463 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
464 *inheritsched
= attr
->inherit
;
471 pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
474 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
475 *param
= attr
->param
;
482 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
, int *policy
)
485 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
486 *policy
= attr
->policy
;
492 // Default stack size is 512KB; independent of the main thread's stack size.
493 static const size_t DEFAULT_STACK_SIZE
= 512 * 1024;
496 pthread_attr_init(pthread_attr_t
*attr
)
498 attr
->stacksize
= DEFAULT_STACK_SIZE
;
499 attr
->stackaddr
= NULL
;
500 attr
->sig
= _PTHREAD_ATTR_SIG
;
501 attr
->param
.sched_priority
= default_priority
;
502 attr
->param
.quantum
= 10; /* quantum isn't public yet */
503 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
504 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
505 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
508 attr
->guardsize
= vm_page_size
;
509 attr
->qosclass
= _pthread_priority_make_newest(QOS_CLASS_DEFAULT
, 0, 0);
514 pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
517 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
518 (detachstate
== PTHREAD_CREATE_JOINABLE
||
519 detachstate
== PTHREAD_CREATE_DETACHED
)) {
520 attr
->detached
= detachstate
;
527 pthread_attr_setinheritsched(pthread_attr_t
*attr
, int inheritsched
)
530 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
531 (inheritsched
== PTHREAD_INHERIT_SCHED
||
532 inheritsched
== PTHREAD_EXPLICIT_SCHED
)) {
533 attr
->inherit
= inheritsched
;
540 pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
543 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
544 /* TODO: Validate sched_param fields */
545 attr
->param
= *param
;
553 pthread_attr_setschedpolicy(pthread_attr_t
*attr
, int policy
)
556 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
557 (policy
== SCHED_OTHER
||
558 policy
== SCHED_RR
||
559 policy
== SCHED_FIFO
)) {
560 attr
->policy
= policy
;
568 pthread_attr_setscope(pthread_attr_t
*attr
, int scope
)
571 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
572 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
573 // No attribute yet for the scope.
575 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
583 pthread_attr_getscope(const pthread_attr_t
*attr
, int *scope
)
586 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
587 *scope
= PTHREAD_SCOPE_SYSTEM
;
594 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
597 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
598 *stackaddr
= attr
->stackaddr
;
605 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
608 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
609 ((uintptr_t)stackaddr
% vm_page_size
) == 0) {
610 attr
->stackaddr
= stackaddr
;
619 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
622 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
623 *stacksize
= attr
->stacksize
;
630 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
633 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
634 (stacksize
% vm_page_size
) == 0 &&
635 stacksize
>= PTHREAD_STACK_MIN
) {
636 attr
->stacksize
= stacksize
;
643 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
646 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
647 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
648 *stacksize
= attr
->stacksize
;
654 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
655 // address. This is not the same as in pthread_attr_setstackaddr.
657 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
660 if (attr
->sig
== _PTHREAD_ATTR_SIG
&&
661 ((uintptr_t)stackaddr
% vm_page_size
) == 0 &&
662 (stacksize
% vm_page_size
) == 0 &&
663 stacksize
>= PTHREAD_STACK_MIN
) {
664 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
665 attr
->stacksize
= stacksize
;
673 pthread_attr_setguardsize(pthread_attr_t
*attr
, size_t guardsize
)
676 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
677 /* Guardsize of 0 is valid, ot means no guard */
678 if ((guardsize
% vm_page_size
) == 0) {
679 attr
->guardsize
= guardsize
;
688 pthread_attr_getguardsize(const pthread_attr_t
*attr
, size_t *guardsize
)
691 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
692 *guardsize
= attr
->guardsize
;
700 * Create and start execution of a new thread.
702 PTHREAD_NOINLINE PTHREAD_NORETURN
704 _pthread_body(pthread_t self
, bool needs_tsd_base_set
)
706 _pthread_set_self_internal(self
, needs_tsd_base_set
);
707 __pthread_add_thread(self
, NULL
, false, false);
708 void *result
= (self
->fun
)(self
->arg
);
710 _pthread_exit(self
, result
);
715 _pthread_start(pthread_t self
,
717 void *(*fun
)(void *),
722 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
723 void *stackaddr
= self
;
724 _pthread_struct_init(self
, &_pthread_attr_default
,
725 stackaddr
, stacksize
,
726 PTHREAD_ALLOCADDR(stackaddr
, stacksize
), PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
728 if (pflags
& PTHREAD_START_SETSCHED
) {
729 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
730 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
733 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
734 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
735 self
->detached
|= PTHREAD_CREATE_DETACHED
;
739 if ((pflags
& PTHREAD_START_QOSCLASS
) != 0) {
740 /* The QoS class is cached in the TSD of the pthread, so to reflect the
741 * class that the kernel brought us up at, the TSD must be primed from the
744 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (pflags
& PTHREAD_START_QOSCLASS_MASK
);
746 /* Give the thread a default QoS tier, of zero. */
747 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
750 bool thread_tsd_bsd_set
= (bool)(pflags
& PTHREAD_START_TSD_BASE_SET
);
753 PTHREAD_ASSERT(MACH_PORT_VALID(kport
));
754 PTHREAD_ASSERT(_pthread_kernel_thread(self
) == kport
);
756 // will mark the thread initialized
757 _pthread_markcancel_if_canceled(self
, kport
);
762 _pthread_body(self
, !thread_tsd_bsd_set
);
765 PTHREAD_ALWAYS_INLINE
767 _pthread_struct_init(pthread_t t
,
768 const pthread_attr_t
*attrs
,
775 PTHREAD_ASSERT(t
->sig
!= _PTHREAD_SIG
);
778 t
->sig
= _PTHREAD_SIG
;
779 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
;
780 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
781 _PTHREAD_LOCK_INIT(t
->lock
);
783 t
->stackaddr
= stackaddr
;
784 t
->stacksize
= stacksize
;
785 t
->freeaddr
= freeaddr
;
786 t
->freesize
= freesize
;
788 t
->guardsize
= attrs
->guardsize
;
789 t
->detached
= attrs
->detached
;
790 t
->inherit
= attrs
->inherit
;
791 t
->policy
= attrs
->policy
;
792 t
->schedset
= attrs
->schedset
;
793 t
->param
= attrs
->param
;
794 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
797 /* Need to deprecate this in future */
799 _pthread_is_threaded(void)
801 return __is_threaded
;
804 /* Non portable public api to know whether this process has(had) atleast one thread
805 * apart from main thread. There could be race if there is a thread in the process of
806 * creation at the time of call . It does not tell whether there are more than one thread
807 * at this point of time.
810 pthread_is_threaded_np(void)
812 return __is_threaded
;
816 PTHREAD_NOEXPORT_VARIANT
818 pthread_mach_thread_np(pthread_t t
)
820 mach_port_t kport
= MACH_PORT_NULL
;
821 (void)_pthread_is_valid(t
, 0, &kport
);
825 PTHREAD_NOEXPORT_VARIANT
827 pthread_from_mach_thread_np(mach_port_t kernel_thread
)
829 struct _pthread
*p
= NULL
;
831 /* No need to wait as mach port is already known */
832 _PTHREAD_LOCK(_pthread_list_lock
);
834 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
835 if (_pthread_kernel_thread(p
) == kernel_thread
) {
840 _PTHREAD_UNLOCK(_pthread_list_lock
);
845 PTHREAD_NOEXPORT_VARIANT
847 pthread_get_stacksize_np(pthread_t t
)
852 return ESRCH
; // XXX bug?
855 #if !defined(__arm__) && !defined(__arm64__)
856 // The default rlimit based allocations will be provided with a stacksize
857 // of the current limit and a freesize of the max. However, custom
858 // allocations will just have the guard page to free. If we aren't in the
859 // latter case, call into rlimit to determine the current stack size. In
860 // the event that the current limit == max limit then we'll fall down the
861 // fast path, but since it's unlikely that the limit is going to be lowered
862 // after it's been change to the max, we should be fine.
864 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
865 // page. So, we can skip all this there.
866 if (t
== &_thread
&& t
->stacksize
+ vm_page_size
!= t
->freesize
) {
867 // We want to call getrlimit() just once, as it's relatively expensive
868 static size_t rlimit_stack
;
870 if (rlimit_stack
== 0) {
872 int ret
= getrlimit(RLIMIT_STACK
, &limit
);
875 rlimit_stack
= (size_t) limit
.rlim_cur
;
879 if (rlimit_stack
== 0 || rlimit_stack
> t
->freesize
) {
885 #endif /* !defined(__arm__) && !defined(__arm64__) */
887 if (t
== pthread_self() || t
== &_thread
) {
891 _PTHREAD_LOCK(_pthread_list_lock
);
893 if (_pthread_is_valid_locked(t
)) {
896 size
= ESRCH
; // XXX bug?
899 _PTHREAD_UNLOCK(_pthread_list_lock
);
904 PTHREAD_NOEXPORT_VARIANT
906 pthread_get_stackaddr_np(pthread_t t
)
911 return (void *)(uintptr_t)ESRCH
; // XXX bug?
914 // since the main thread will not get de-allocated from underneath us
915 if (t
== pthread_self() || t
== &_thread
) {
919 _PTHREAD_LOCK(_pthread_list_lock
);
921 if (_pthread_is_valid_locked(t
)) {
924 addr
= (void *)(uintptr_t)ESRCH
; // XXX bug?
927 _PTHREAD_UNLOCK(_pthread_list_lock
);
934 _pthread_reply_port(pthread_t t
)
938 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
);
940 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
];
942 return (mach_port_t
)(uintptr_t)p
;
946 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
)
948 void *p
= (void *)(uintptr_t)reply_port
;
950 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
);
952 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
;
957 _pthread_dealloc_reply_port(pthread_t t
)
959 mach_port_t reply_port
= _pthread_reply_port(t
);
960 if (reply_port
!= MACH_PORT_NULL
) {
961 mig_dealloc_reply_port(reply_port
);
966 _pthread_special_reply_port(pthread_t t
)
970 p
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
);
972 p
= t
->tsd
[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
];
974 return (mach_port_t
)(uintptr_t)p
;
978 _pthread_dealloc_special_reply_port(pthread_t t
)
980 mach_port_t special_reply_port
= _pthread_special_reply_port(t
);
981 if (special_reply_port
!= MACH_PORT_NULL
) {
982 mach_port_mod_refs(mach_task_self(), special_reply_port
,
983 MACH_PORT_RIGHT_RECEIVE
, -1);
988 pthread_main_thread_np(void)
993 /* returns non-zero if the current thread is the main thread */
995 pthread_main_np(void)
997 pthread_t self
= pthread_self();
999 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1003 /* if we are passed in a pthread_t that is NULL, then we return
1004 the current thread's thread_id. So folks don't have to call
1005 pthread_self, in addition to us doing it, if they just want
1008 PTHREAD_NOEXPORT_VARIANT
1010 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
)
1013 pthread_t self
= pthread_self();
1015 if (thread_id
== NULL
) {
1019 if (thread
== NULL
|| thread
== self
) {
1020 *thread_id
= self
->thread_id
;
1022 _PTHREAD_LOCK(_pthread_list_lock
);
1023 if (!_pthread_is_valid_locked(thread
)) {
1025 } else if (thread
->thread_id
== 0) {
1028 *thread_id
= thread
->thread_id
;
1030 _PTHREAD_UNLOCK(_pthread_list_lock
);
1035 PTHREAD_NOEXPORT_VARIANT
1037 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1041 if (thread
== NULL
) {
1045 _PTHREAD_LOCK(_pthread_list_lock
);
1046 if (_pthread_is_valid_locked(thread
)) {
1047 strlcpy(threadname
, thread
->pthread_name
, len
);
1051 _PTHREAD_UNLOCK(_pthread_list_lock
);
1057 pthread_setname_np(const char *name
)
1060 pthread_t self
= pthread_self();
1067 /* protytype is in pthread_internals.h */
1068 res
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
);
1071 strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
);
1073 bzero(self
->pthread_name
, MAXTHREADNAMESIZE
);
1080 PTHREAD_ALWAYS_INLINE
1082 __pthread_add_thread(pthread_t t
, const pthread_attr_t
*attrs
,
1083 bool parent
, bool from_mach_thread
)
1085 bool should_deallocate
= false;
1086 bool should_add
= true;
1088 mach_port_t kport
= _pthread_kernel_thread(t
);
1089 if (os_slowpath(!MACH_PORT_VALID(kport
))) {
1090 PTHREAD_CLIENT_CRASH(kport
,
1091 "Unable to allocate thread port, possible port leak");
1094 if (from_mach_thread
) {
1095 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1097 _PTHREAD_LOCK(_pthread_list_lock
);
1100 // The parent and child threads race to add the thread to the list.
1101 // When called by the parent:
1102 // - set parentcheck to true
1103 // - back off if childrun is true
1104 // When called by the child:
1105 // - set childrun to true
1106 // - back off if parentcheck is true
1110 // child got here first, don't add.
1114 // If the child exits before we check in then it has to keep
1115 // the thread structure memory alive so our dereferences above
1116 // are valid. If it's a detached thread, then no joiner will
1117 // deallocate the thread structure itself. So we do it here.
1120 should_deallocate
= ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
);
1124 if (t
->parentcheck
) {
1125 // Parent got here first, don't add.
1129 // Work queue threads have no parent. Simulate.
1135 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1139 * Set some initial values which we know in the pthread structure in
1140 * case folks try to get the values before the thread can set them.
1142 if (parent
&& attrs
&& attrs
->schedset
== 0) {
1143 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = attrs
->qosclass
;
1147 if (from_mach_thread
){
1148 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
);
1150 _PTHREAD_UNLOCK(_pthread_list_lock
);
1154 if (!from_mach_thread
) {
1155 // PR-26275485: Mach threads will likely crash trying to run
1156 // introspection code. Since the fall out from the introspection
1157 // code not seeing the injected thread is likely less than crashing
1158 // in the introspection code, just don't make the call.
1159 _pthread_introspection_thread_create(t
, should_deallocate
);
1161 if (should_deallocate
) {
1162 _pthread_deallocate(t
);
1165 _pthread_introspection_thread_start(t
);
1169 // <rdar://problem/12544957> must always inline this function to avoid epilogues
1170 // Returns EBUSY if the thread structure should be kept alive (is joinable).
1171 // Returns ESRCH if the thread structure is no longer valid (was detached).
1172 PTHREAD_ALWAYS_INLINE
1174 __pthread_remove_thread(pthread_t t
, bool child
, bool *should_exit
)
1178 bool should_remove
= true;
1180 _PTHREAD_LOCK(_pthread_list_lock
);
1182 // When a thread removes itself:
1183 // - Set the childexit flag indicating that the thread has exited.
1184 // - Return false if parentcheck is zero (must keep structure)
1185 // - If the thread is joinable, keep it on the list so that
1186 // the join operation succeeds. Still decrement the running
1187 // thread count so that we exit if no threads are running.
1188 // - Update the running thread count.
1189 // When another thread removes a joinable thread:
1190 // - CAREFUL not to dereference the thread before verifying that the
1191 // reference is still valid using _pthread_is_valid_locked().
1192 // - Remove the thread from the list.
1196 if (t
->parentcheck
== 0) {
1199 if ((t
->detached
& PTHREAD_CREATE_JOINABLE
) != 0) {
1201 should_remove
= false;
1203 *should_exit
= (--_pthread_count
<= 0);
1204 } else if (!_pthread_is_valid_locked(t
)) {
1206 should_remove
= false;
1207 } else if ((t
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
1208 // If we found a thread but it's not joinable, bail.
1210 should_remove
= false;
1211 } else if (t
->parentcheck
== 0) {
1212 // If we're not the child thread *and* the parent has not finished
1213 // creating the thread yet, then we are another thread that's joining
1214 // and we cannot deallocate the pthread.
1217 if (should_remove
) {
1218 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
1221 _PTHREAD_UNLOCK(_pthread_list_lock
);
1227 _pthread_create(pthread_t
*thread
,
1228 const pthread_attr_t
*attr
,
1229 void *(*start_routine
)(void *),
1231 bool from_mach_thread
)
1234 unsigned int flags
= 0;
1236 pthread_attr_t
*attrs
= (pthread_attr_t
*)attr
;
1237 if (attrs
== NULL
) {
1238 attrs
= &_pthread_attr_default
;
1239 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1243 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
) {
1244 flags
|= PTHREAD_START_DETACHED
;
1247 if (attrs
->schedset
!= 0) {
1248 flags
|= PTHREAD_START_SETSCHED
;
1249 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1250 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1251 } else if (attrs
->qosclass
!= 0) {
1252 flags
|= PTHREAD_START_QOSCLASS
;
1253 flags
|= (attrs
->qosclass
& PTHREAD_START_QOSCLASS_MASK
);
1260 if (attrs
->fastpath
) {
1261 // kernel will allocate thread and stack, pass stacksize.
1262 stack
= (void *)attrs
->stacksize
;
1264 // allocate the thread and its stack
1265 flags
|= PTHREAD_START_CUSTOM
;
1268 res
= _pthread_allocate(&t
, attrs
, &stack
);
1274 t
->fun
= start_routine
;
1278 t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
);
1279 if (t2
== (pthread_t
)-1) {
1280 if (errno
== EMFILE
) {
1281 PTHREAD_CLIENT_CRASH(0,
1282 "Unable to allocate thread port, possible port leak");
1284 if (flags
& PTHREAD_START_CUSTOM
) {
1285 // free the thread and stack if we allocated it
1286 _pthread_deallocate(t
);
1294 __pthread_add_thread(t
, attrs
, true, from_mach_thread
);
1296 // n.b. if a thread is created detached and exits, t will be invalid
1302 pthread_create(pthread_t
*thread
,
1303 const pthread_attr_t
*attr
,
1304 void *(*start_routine
)(void *),
1307 return _pthread_create(thread
, attr
, start_routine
, arg
, false);
1311 pthread_create_from_mach_thread(pthread_t
*thread
,
1312 const pthread_attr_t
*attr
,
1313 void *(*start_routine
)(void *),
1316 return _pthread_create(thread
, attr
, start_routine
, arg
, true);
1321 _pthread_suspended_body(pthread_t self
)
1323 _pthread_set_self(self
);
1324 __pthread_add_thread(self
, NULL
, false, false);
1325 _pthread_exit(self
, (self
->fun
)(self
->arg
));
1329 pthread_create_suspended_np(pthread_t
*thread
,
1330 const pthread_attr_t
*attr
,
1331 void *(*start_routine
)(void *),
1336 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1338 const pthread_attr_t
*attrs
= attr
;
1339 if (attrs
== NULL
) {
1340 attrs
= &_pthread_attr_default
;
1341 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1346 res
= _pthread_allocate(&t
, attrs
, &stack
);
1354 kr
= thread_create(mach_task_self(), &kernel_thread
);
1355 if (kr
!= KERN_SUCCESS
) {
1356 //PTHREAD_ABORT("thread_create() failed: %d", kern_res);
1357 return EINVAL
; /* Need better error here? */
1360 _pthread_set_kernel_thread(t
, kernel_thread
);
1361 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
1366 t
->fun
= start_routine
;
1368 t
->cancel_state
|= _PTHREAD_CANCEL_INITIALIZED
;
1369 __pthread_add_thread(t
, NULL
, true, false);
1371 // Set up a suspended thread.
1372 _pthread_setup(t
, _pthread_suspended_body
, stack
, 1, 0);
1377 PTHREAD_NOEXPORT_VARIANT
1379 pthread_detach(pthread_t thread
)
1383 semaphore_t sema
= SEMAPHORE_NULL
;
1385 if (!_pthread_is_valid(thread
, PTHREAD_IS_VALID_LOCK_THREAD
, NULL
)) {
1386 return ESRCH
; // Not a valid thread to detach.
1389 if ((thread
->detached
& PTHREAD_CREATE_DETACHED
) ||
1390 !(thread
->detached
& PTHREAD_CREATE_JOINABLE
)) {
1392 } else if (thread
->detached
& _PTHREAD_EXITED
) {
1393 // Join the thread if it's already exited.
1396 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1397 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1398 sema
= thread
->joiner_notify
;
1401 _PTHREAD_UNLOCK(thread
->lock
);
1404 pthread_join(thread
, NULL
);
1406 semaphore_signal(sema
);
1412 PTHREAD_NOEXPORT_VARIANT
1414 pthread_kill(pthread_t th
, int sig
)
1416 if (sig
< 0 || sig
> NSIG
) {
1420 mach_port_t kport
= MACH_PORT_NULL
;
1421 if (!_pthread_is_valid(th
, 0, &kport
)) {
1422 return ESRCH
; // Not a valid thread.
1425 // Don't signal workqueue threads.
1426 if (th
->wqthread
!= 0 && th
->wqkillset
== 0) {
1430 int ret
= __pthread_kill(kport
, sig
);
1438 PTHREAD_NOEXPORT_VARIANT
1440 __pthread_workqueue_setkill(int enable
)
1442 pthread_t self
= pthread_self();
1444 _PTHREAD_LOCK(self
->lock
);
1445 self
->wqkillset
= enable
? 1 : 0;
1446 _PTHREAD_UNLOCK(self
->lock
);
1452 /* For compatibility... */
1455 _pthread_self(void) {
1456 return pthread_self();
1460 * Terminate a thread.
1462 int __disable_threadsignal(int);
1466 _pthread_exit(pthread_t self
, void *value_ptr
)
1468 struct __darwin_pthread_handler_rec
*handler
;
1470 // Disable signal delivery while we clean up
1471 __disable_threadsignal(1);
1473 // Set cancel state to disable and type to deferred
1474 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1476 while ((handler
= self
->__cleanup_stack
) != 0) {
1477 (handler
->__routine
)(handler
->__arg
);
1478 self
->__cleanup_stack
= handler
->__next
;
1480 _pthread_tsd_cleanup(self
);
1482 _PTHREAD_LOCK(self
->lock
);
1483 self
->detached
|= _PTHREAD_EXITED
;
1484 self
->exit_value
= value_ptr
;
1486 if ((self
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1487 self
->joiner_notify
== SEMAPHORE_NULL
) {
1488 self
->joiner_notify
= (semaphore_t
)os_get_cached_semaphore();
1490 _PTHREAD_UNLOCK(self
->lock
);
1492 // Clear per-thread semaphore cache
1493 os_put_cached_semaphore(SEMAPHORE_NULL
);
1495 _pthread_terminate_invoke(self
);
1499 pthread_exit(void *value_ptr
)
1501 pthread_t self
= pthread_self();
1502 if (self
->wqthread
== 0) {
1503 _pthread_exit(self
, value_ptr
);
1505 PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1510 PTHREAD_NOEXPORT_VARIANT
1512 pthread_getschedparam(pthread_t thread
,
1514 struct sched_param
*param
)
1518 if (thread
== NULL
) {
1522 _PTHREAD_LOCK(_pthread_list_lock
);
1524 if (_pthread_is_valid_locked(thread
)) {
1526 *policy
= thread
->policy
;
1529 *param
= thread
->param
;
1535 _PTHREAD_UNLOCK(_pthread_list_lock
);
1541 PTHREAD_ALWAYS_INLINE
1543 pthread_setschedparam_internal(pthread_t thread
,
1546 const struct sched_param
*param
)
1548 policy_base_data_t bases
;
1550 mach_msg_type_number_t count
;
1555 bases
.ts
.base_priority
= param
->sched_priority
;
1556 base
= (policy_base_t
)&bases
.ts
;
1557 count
= POLICY_TIMESHARE_BASE_COUNT
;
1560 bases
.fifo
.base_priority
= param
->sched_priority
;
1561 base
= (policy_base_t
)&bases
.fifo
;
1562 count
= POLICY_FIFO_BASE_COUNT
;
1565 bases
.rr
.base_priority
= param
->sched_priority
;
1566 /* quantum isn't public yet */
1567 bases
.rr
.quantum
= param
->quantum
;
1568 base
= (policy_base_t
)&bases
.rr
;
1569 count
= POLICY_RR_BASE_COUNT
;
1574 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1575 return (ret
!= KERN_SUCCESS
) ? EINVAL
: 0;
1579 PTHREAD_NOEXPORT_VARIANT
1581 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param
*param
)
1583 mach_port_t kport
= MACH_PORT_NULL
;
1587 // since the main thread will not get de-allocated from underneath us
1588 if (t
== pthread_self() || t
== &_thread
) {
1589 kport
= _pthread_kernel_thread(t
);
1592 (void)_pthread_is_valid(t
, 0, &kport
);
1595 res
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1598 // Ensure the thread is still valid.
1599 _PTHREAD_LOCK(_pthread_list_lock
);
1600 if (_pthread_is_valid_locked(t
)) {
1606 _PTHREAD_UNLOCK(_pthread_list_lock
);
1617 sched_get_priority_min(int policy
)
1619 return default_priority
- 16;
1623 sched_get_priority_max(int policy
)
1625 return default_priority
+ 16;
1629 pthread_equal(pthread_t t1
, pthread_t t2
)
1635 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1636 * then _pthread_set_self won't be bound when secondary threads try and start up.
1640 _pthread_set_self(pthread_t p
)
1642 return _pthread_set_self_internal(p
, true);
1645 PTHREAD_ALWAYS_INLINE
1647 _pthread_set_self_internal(pthread_t p
, bool needs_tsd_base_set
)
1653 uint64_t tid
= __thread_selfid();
1655 PTHREAD_ABORT("failed to set thread_id");
1658 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
;
1659 p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
;
1662 if (needs_tsd_base_set
) {
1663 _thread_set_tsd_base(&p
->tsd
[0]);
1668 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1669 PTHREAD_ALWAYS_INLINE
1671 _os_once_acquire(os_once_t
*predicate
, void *context
, os_function_t function
)
1673 if (OS_EXPECT(os_atomic_load(predicate
, acquire
), ~0l) != ~0l) {
1674 _os_once(predicate
, context
, function
);
1675 OS_COMPILER_CAN_ASSUME(*predicate
== ~0l);
1679 struct _pthread_once_context
{
1680 pthread_once_t
*pthread_once
;
1681 void (*routine
)(void);
1685 __pthread_once_handler(void *context
)
1687 struct _pthread_once_context
*ctx
= context
;
1688 pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
);
1690 pthread_cleanup_pop(0);
1691 ctx
->pthread_once
->sig
= _PTHREAD_ONCE_SIG
;
1694 PTHREAD_NOEXPORT_VARIANT
1696 pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1698 struct _pthread_once_context ctx
= { once_control
, init_routine
};
1700 _os_once_acquire(&once_control
->once
, &ctx
, __pthread_once_handler
);
1701 } while (once_control
->sig
== _PTHREAD_ONCE_SIG_init
);
1707 pthread_getconcurrency(void)
1709 return pthread_concurrency
;
1713 pthread_setconcurrency(int new_level
)
1715 if (new_level
< 0) {
1718 pthread_concurrency
= new_level
;
1722 static unsigned long
1723 _pthread_strtoul(const char *p
, const char **endptr
, int base
)
1727 // Expect hex string starting with "0x"
1728 if ((base
== 16 || base
== 0) && p
&& p
[0] == '0' && p
[1] == 'x') {
1732 if ('0' <= c
&& c
<= '9') {
1733 val
= (val
<< 4) + (c
- '0');
1734 } else if ('a' <= c
&& c
<= 'f') {
1735 val
= (val
<< 4) + (c
- 'a' + 10);
1736 } else if ('A' <= c
&& c
<= 'F') {
1737 val
= (val
<< 4) + (c
- 'A' + 10);
1745 *endptr
= (char *)p
;
1750 parse_main_stack_params(const char *apple
[],
1756 const char *p
= _simple_getenv(apple
, "main_stack");
1762 *stackaddr
= _pthread_strtoul(s
, &s
, 16);
1763 if (*s
!= ',') goto out
;
1765 *stacksize
= _pthread_strtoul(s
+ 1, &s
, 16);
1766 if (*s
!= ',') goto out
;
1768 *allocaddr
= _pthread_strtoul(s
+ 1, &s
, 16);
1769 if (*s
!= ',') goto out
;
1771 *allocsize
= _pthread_strtoul(s
+ 1, &s
, 16);
1772 if (*s
!= ',' && *s
!= 0) goto out
;
1776 bzero((char *)p
, strlen(p
));
1780 #if !defined(VARIANT_STATIC)
1784 if (_pthread_malloc
) {
1785 return _pthread_malloc(sz
);
1794 if (_pthread_free
) {
1798 #endif // VARIANT_STATIC
1801 * Perform package initialization - called automatically when application starts
1803 struct ProgramVars
; /* forward reference */
1806 __pthread_init(const struct _libpthread_functions
*pthread_funcs
,
1807 const char *envp
[] __unused
,
1808 const char *apple
[],
1809 const struct ProgramVars
*vars __unused
)
1811 // Save our provided pushed-down functions
1812 if (pthread_funcs
) {
1813 exitf
= pthread_funcs
->exit
;
1815 if (pthread_funcs
->version
>= 2) {
1816 _pthread_malloc
= pthread_funcs
->malloc
;
1817 _pthread_free
= pthread_funcs
->free
;
1822 // Get host information
1826 host_flavor_t flavor
= HOST_PRIORITY_INFO
;
1827 mach_msg_type_number_t count
= HOST_PRIORITY_INFO_COUNT
;
1828 host_priority_info_data_t priority_info
;
1829 host_t host
= mach_host_self();
1830 kr
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
);
1831 if (kr
!= KERN_SUCCESS
) {
1832 PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr
));
1834 default_priority
= priority_info
.user_priority
;
1835 min_priority
= priority_info
.minimum_priority
;
1836 max_priority
= priority_info
.maximum_priority
;
1838 mach_port_deallocate(mach_task_self(), host
);
1841 // Set up the main thread structure
1844 // Get the address and size of the main thread's stack from the kernel.
1845 void *stackaddr
= 0;
1846 size_t stacksize
= 0;
1847 void *allocaddr
= 0;
1848 size_t allocsize
= 0;
1849 if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) ||
1850 stackaddr
== NULL
|| stacksize
== 0) {
1851 // Fall back to previous bevhaior.
1852 size_t len
= sizeof(stackaddr
);
1853 int mib
[] = { CTL_KERN
, KERN_USRSTACK
};
1854 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) {
1855 #if defined(__LP64__)
1856 stackaddr
= (void *)USRSTACK64
;
1858 stackaddr
= (void *)USRSTACK
;
1861 stacksize
= DFLSSIZ
;
1866 pthread_t thread
= &_thread
;
1867 pthread_attr_init(&_pthread_attr_default
);
1868 _pthread_struct_init(thread
, &_pthread_attr_default
,
1869 stackaddr
, stacksize
,
1870 allocaddr
, allocsize
);
1871 thread
->detached
= PTHREAD_CREATE_JOINABLE
;
1873 // Finish initialization with common code that is reinvoked on the
1874 // child side of a fork.
1876 // Finishes initialization of main thread attributes.
1877 // Initializes the thread list and add the main thread.
1878 // Calls _pthread_set_self() to prepare the main thread for execution.
1879 _pthread_main_thread_init(thread
);
1881 struct _pthread_registration_data registration_data
;
1882 // Set up kernel entry points with __bsdthread_register.
1883 _pthread_bsdthread_init(®istration_data
);
1885 // Have pthread_key and pthread_mutex do their init envvar checks.
1886 _pthread_key_global_init(envp
);
1887 _pthread_mutex_global_init(envp
, ®istration_data
);
1889 #if PTHREAD_DEBUG_LOG
1890 _SIMPLE_STRING path
= _simple_salloc();
1891 _simple_sprintf(path
, "/var/tmp/libpthread.%d.log", getpid());
1892 _pthread_debuglog
= open(_simple_string(path
),
1893 O_WRONLY
| O_APPEND
| O_CREAT
| O_NOFOLLOW
| O_CLOEXEC
, 0666);
1894 _simple_sfree(path
);
1895 _pthread_debugstart
= mach_absolute_time();
1901 PTHREAD_NOEXPORT
void
1902 _pthread_main_thread_init(pthread_t p
)
1904 TAILQ_INIT(&__pthread_head
);
1905 _PTHREAD_LOCK_INIT(_pthread_list_lock
);
1907 // Re-use the main thread's static storage if no thread was provided.
1909 if (_thread
.tsd
[0] != 0) {
1910 bzero(&_thread
, sizeof(struct _pthread
));
1915 _PTHREAD_LOCK_INIT(p
->lock
);
1916 _pthread_set_kernel_thread(p
, mach_thread_self());
1917 _pthread_set_reply_port(p
, mach_reply_port());
1918 p
->__cleanup_stack
= NULL
;
1919 p
->joiner_notify
= SEMAPHORE_NULL
;
1920 p
->joiner
= MACH_PORT_NULL
;
1921 p
->detached
|= _PTHREAD_CREATE_PARENT
;
1922 p
->tsd
[__TSD_SEMAPHORE_CACHE
] = (void*)SEMAPHORE_NULL
;
1923 p
->cancel_state
|= _PTHREAD_CANCEL_INITIALIZED
;
1925 // Initialize the list of threads with the new main thread.
1926 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
1929 _pthread_set_self(p
);
1930 _pthread_introspection_thread_start(p
);
1934 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
1936 int ret
= __pthread_remove_thread(thread
, false, NULL
);
1937 if (ret
!= 0 && ret
!= EBUSY
) {
1938 // Returns ESRCH if the thread was not created joinable.
1943 *value_ptr
= _pthread_get_exit_value(thread
, conforming
);
1945 _pthread_introspection_thread_destroy(thread
);
1947 // __pthread_remove_thread returns EBUSY if the parent has not
1948 // finished creating the thread (and is still expecting the pthread_t
1950 _pthread_deallocate(thread
);
1970 pthread_yield_np(void)
1977 PTHREAD_NOEXPORT_VARIANT
1979 _pthread_clear_qos_tsd(mach_port_t thread_port
)
1981 if (thread_port
== MACH_PORT_NULL
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) {
1982 /* Clear the current thread's TSD, that can be done inline. */
1983 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0));
1987 _PTHREAD_LOCK(_pthread_list_lock
);
1989 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1990 mach_port_t kp
= _pthread_kernel_thread(p
);
1991 if (thread_port
== kp
) {
1992 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED
, 0, 0);
1997 _PTHREAD_UNLOCK(_pthread_list_lock
);
2002 /***** pthread workqueue support routines *****/
2004 PTHREAD_NOEXPORT
void
2005 _pthread_bsdthread_init(struct _pthread_registration_data
*data
)
2007 bzero(data
, sizeof(*data
));
2008 data
->version
= sizeof(struct _pthread_registration_data
);
2009 data
->dispatch_queue_offset
= __PTK_LIBDISPATCH_KEY0
* sizeof(void *);
2010 data
->return_to_kernel_offset
= __TSD_RETURN_TO_KERNEL
* sizeof(void *);
2011 data
->tsd_offset
= offsetof(struct _pthread
, tsd
);
2012 data
->mach_thread_self_offset
= __TSD_MACH_THREAD_SELF
* sizeof(void *);
2014 int rv
= __bsdthread_register(thread_start
,
2015 start_wqthread
, (int)PTHREAD_SIZE
,
2016 (void*)data
, (uintptr_t)sizeof(*data
),
2017 data
->dispatch_queue_offset
);
2020 if ((rv
& PTHREAD_FEATURE_QOS_DEFAULT
) == 0) {
2021 PTHREAD_INTERNAL_CRASH(rv
,
2022 "Missing required support for QOS_CLASS_DEFAULT");
2024 if ((rv
& PTHREAD_FEATURE_QOS_MAINTENANCE
) == 0) {
2025 PTHREAD_INTERNAL_CRASH(rv
,
2026 "Missing required support for QOS_CLASS_MAINTENANCE");
2028 __pthread_supported_features
= rv
;
2032 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2033 * effect of resetting the child's stack_addr_hint before bailing out) and
2034 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2035 * the latter as fatal.
2037 * <rdar://problem/36451838>
2040 pthread_priority_t main_qos
= (pthread_priority_t
)data
->main_qos
;
2042 if (_pthread_priority_get_qos_newest(main_qos
) != QOS_CLASS_UNSPECIFIED
) {
2043 _pthread_set_main_qos(main_qos
);
2044 _thread
.tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = main_qos
;
2047 if (__libdispatch_workerfunction
!= NULL
) {
2048 // prepare the kernel for workq action
2049 (void)__workq_open();
2053 // workqueue entry point from kernel
2056 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
, void *keventlist
, int flags
, int nkevents
)
2058 PTHREAD_ASSERT(flags
& WQ_FLAG_THREAD_NEWSPI
);
2060 bool thread_reuse
= flags
& WQ_FLAG_THREAD_REUSE
;
2061 bool overcommit
= flags
& WQ_FLAG_THREAD_OVERCOMMIT
;
2062 bool kevent
= flags
& WQ_FLAG_THREAD_KEVENT
;
2063 bool workloop
= (flags
& WQ_FLAG_THREAD_WORKLOOP
) &&
2064 __libdispatch_workloopfunction
!= NULL
;
2065 PTHREAD_ASSERT((!kevent
) || (__libdispatch_keventfunction
!= NULL
));
2066 PTHREAD_ASSERT(!workloop
|| kevent
);
2068 pthread_priority_t priority
= 0;
2069 unsigned long priority_flags
= 0;
2072 priority_flags
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2073 if (flags
& WQ_FLAG_THREAD_EVENT_MANAGER
)
2074 priority_flags
|= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
;
2076 priority_flags
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
;
2078 int thread_class
= flags
& WQ_FLAG_THREAD_PRIOMASK
;
2079 priority
= _pthread_priority_make_newest(thread_class
, 0, priority_flags
);
2081 if (!thread_reuse
) {
2082 // New thread created by kernel, needs initialization.
2083 void *stackaddr
= self
;
2084 size_t stacksize
= (uintptr_t)self
- (uintptr_t)stacklowaddr
;
2086 _pthread_struct_init(self
, &_pthread_attr_default
,
2087 stackaddr
, stacksize
,
2088 PTHREAD_ALLOCADDR(stackaddr
, stacksize
), PTHREAD_ALLOCSIZE(stackaddr
, stacksize
));
2090 _pthread_set_kernel_thread(self
, kport
);
2092 self
->wqkillset
= 0;
2093 self
->cancel_state
|= _PTHREAD_CANCEL_INITIALIZED
;
2095 // Not a joinable thread.
2096 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2097 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2099 // Update the running thread count and set childrun bit.
2100 bool thread_tsd_base_set
= (bool)(flags
& WQ_FLAG_THREAD_TSD_BASE_SET
);
2101 _pthread_set_self_internal(self
, !thread_tsd_base_set
);
2102 _pthread_introspection_thread_create(self
, false);
2103 __pthread_add_thread(self
, NULL
, false, false);
2106 // If we're running with fine-grained priority, we also need to
2107 // set this thread to have the QoS class provided to use by the kernel
2108 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2109 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(thread_class
, 0, priority_flags
));
2113 PTHREAD_ASSERT(self
);
2114 PTHREAD_ASSERT(self
== pthread_self());
2118 self
->fun
= (void *(*)(void*))__libdispatch_workloopfunction
;
2120 self
->fun
= (void *(*)(void*))__libdispatch_keventfunction
;
2122 self
->fun
= (void *(*)(void*))__libdispatch_workerfunction
;
2124 self
->arg
= (void *)(uintptr_t)thread_class
;
2126 if (kevent
&& keventlist
&& nkevents
> 0){
2128 kevent_errors_retry
:
2131 kqueue_id_t kevent_id
= *(kqueue_id_t
*)((char*)keventlist
- sizeof(kqueue_id_t
));
2132 kqueue_id_t kevent_id_in
= kevent_id
;
2133 (__libdispatch_workloopfunction
)(&kevent_id
, &keventlist
, &nkevents
);
2134 PTHREAD_ASSERT(kevent_id
== kevent_id_in
|| nkevents
== 0);
2135 errors_out
= __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN
, keventlist
, nkevents
, 0);
2137 (__libdispatch_keventfunction
)(&keventlist
, &nkevents
);
2138 errors_out
= __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, keventlist
, nkevents
, 0);
2141 if (errors_out
> 0){
2142 nkevents
= errors_out
;
2143 goto kevent_errors_retry
;
2144 } else if (errors_out
< 0){
2145 PTHREAD_ABORT("kevent return produced an error: %d", errno
);
2150 (__libdispatch_workloopfunction
)(0, NULL
, NULL
);
2151 __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN
, NULL
, 0, -1);
2153 (__libdispatch_keventfunction
)(NULL
, NULL
);
2154 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN
, NULL
, 0, 0);
2160 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2161 if (!__workq_newapi
) {
2162 /* Old thread priorities are inverted from where we have them in
2163 * the new flexible priority scheme. The highest priority is zero,
2164 * up to 2, with background at 3.
2166 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2168 int opts
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2170 if ((__pthread_supported_features
& PTHREAD_FEATURE_QOS_DEFAULT
) == 0) {
2171 /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */
2172 switch (thread_class
) {
2173 case QOS_CLASS_USER_INTERACTIVE
:
2174 thread_class
= QOS_CLASS_USER_INITIATED
;
2176 case QOS_CLASS_USER_INITIATED
:
2177 thread_class
= QOS_CLASS_DEFAULT
;
2184 switch (thread_class
) {
2185 /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */
2186 case QOS_CLASS_USER_INITIATED
:
2187 (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
);
2190 case QOS_CLASS_DEFAULT
:
2191 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2192 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2193 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2195 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED
, 0, 0));
2196 (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
);
2199 case QOS_CLASS_UTILITY
:
2200 (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
);
2203 case QOS_CLASS_BACKGROUND
:
2204 (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
);
2207 /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
2211 /* "New" API, where dispatch is expecting to be given the thread priority */
2212 (*__libdispatch_workerfunction
)(priority
);
2215 /* We're the new library running on an old kext, so thread_class is really the workq priority. */
2216 pthread_workqueue_function_t func
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
;
2217 int options
= overcommit
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
: 0;
2218 (*func
)(thread_class
, options
, NULL
);
2221 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
2225 pthread_priority_t current_priority
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
);
2226 if ((current_priority
& _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
) ||
2227 (_pthread_priority_get_qos_newest(current_priority
) > WQ_THREAD_CLEANUP_QOS
)) {
2228 // Reset QoS to something low for the cleanup process
2229 priority
= _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS
, 0, 0);
2230 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, priority
);
2234 _pthread_exit(self
, NULL
);
2237 /***** pthread workqueue API for libdispatch *****/
2239 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN
== WQ_KEVENT_LIST_LEN
,
2240 "Kernel and userland should agree on the event list size");
2243 pthread_workqueue_setdispatchoffset_np(int offset
)
2245 __libdispatch_offset
= offset
;
2249 pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func
,
2250 pthread_workqueue_function_kevent_t kevent_func
,
2251 pthread_workqueue_function_workloop_t workloop_func
)
2254 if (__libdispatch_workerfunction
== NULL
) {
2255 // Check whether the kernel supports new SPIs
2256 res
= __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, __libdispatch_offset
, kevent_func
!= NULL
? 0x01 : 0x00);
2260 __libdispatch_workerfunction
= queue_func
;
2261 __libdispatch_keventfunction
= kevent_func
;
2262 __libdispatch_workloopfunction
= workloop_func
;
2264 // Prepare the kernel for workq action
2265 (void)__workq_open();
2266 if (__is_threaded
== 0) {
2275 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func
,
2276 pthread_workqueue_function_kevent_t kevent_func
,
2277 pthread_workqueue_function_workloop_t workloop_func
,
2278 int offset
, int flags
)
2284 __workq_newapi
= true;
2285 __libdispatch_offset
= offset
;
2287 int rv
= pthread_workqueue_setdispatch_with_workloop_np(queue_func
, kevent_func
, workloop_func
);
2292 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
,
2293 pthread_workqueue_function_kevent_t kevent_func
,
2294 int offset
, int flags
)
2296 return _pthread_workqueue_init_with_workloop(queue_func
, kevent_func
, NULL
, offset
, flags
);
2300 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
)
2302 return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
);
2306 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
)
2308 return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t
)worker_func
, NULL
, NULL
);
2312 _pthread_workqueue_supported(void)
2314 if (os_unlikely(!__pthread_supported_features
)) {
2315 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2318 return __pthread_supported_features
;
2322 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
2326 // Cannot add threads without a worker function registered.
2327 if (__libdispatch_workerfunction
== NULL
) {
2331 pthread_priority_t kp
= 0;
2333 if (__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) {
2334 /* The new kernel API takes the new QoS class + relative priority style of
2335 * priority. This entry point is here for compatibility with old libdispatch
2336 * versions (ie. the simulator). We request the corresponding new bracket
2337 * from the kernel, then on the way out run all dispatch queues that were
2341 int compat_priority
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
2344 /* To make sure the library does not issue more threads to dispatch than
2345 * were requested, the total number of active requests is recorded in
2348 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2349 flags
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
;
2352 #pragma clang diagnostic push
2353 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2354 kp
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
);
2355 #pragma clang diagnostic pop
2358 /* Running on the old kernel, queue_priority is what we pass directly to
2361 kp
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
2363 if (options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) {
2364 kp
|= WORKQUEUE_OVERCOMMIT
;
2368 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
);
2376 _pthread_workqueue_should_narrow(pthread_priority_t pri
)
2378 int res
= __workq_kernreturn(WQOPS_SHOULD_NARROW
, NULL
, (int)pri
, 0);
2386 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
)
2390 if (__libdispatch_workerfunction
== NULL
) {
2394 if ((__pthread_supported_features
& PTHREAD_FEATURE_FINEPRIO
) == 0) {
2398 res
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
);
2406 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
)
2408 int res
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0);
2416 * Introspection SPI for libpthread.
2419 static pthread_introspection_hook_t _pthread_introspection_hook
;
2421 pthread_introspection_hook_t
2422 pthread_introspection_hook_install(pthread_introspection_hook_t hook
)
2424 pthread_introspection_hook_t prev
;
2425 prev
= _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook
, hook
);
2431 _pthread_introspection_hook_callout_thread_create(pthread_t t
, bool destroy
)
2433 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
,
2435 if (!destroy
) return;
2436 _pthread_introspection_thread_destroy(t
);
2440 _pthread_introspection_thread_create(pthread_t t
, bool destroy
)
2442 if (os_fastpath(!_pthread_introspection_hook
)) return;
2443 _pthread_introspection_hook_callout_thread_create(t
, destroy
);
2448 _pthread_introspection_hook_callout_thread_start(pthread_t t
)
2452 if (t
== &_thread
) {
2453 freesize
= t
->stacksize
+ t
->guardsize
;
2454 freeaddr
= t
->stackaddr
- freesize
;
2456 freesize
= t
->freesize
- PTHREAD_SIZE
;
2457 freeaddr
= t
->freeaddr
;
2459 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
,
2460 freeaddr
, freesize
);
2464 _pthread_introspection_thread_start(pthread_t t
)
2466 if (os_fastpath(!_pthread_introspection_hook
)) return;
2467 _pthread_introspection_hook_callout_thread_start(t
);
2472 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
,
2473 void *freeaddr
, size_t freesize
, bool destroy
)
2475 if (destroy
&& freesize
) {
2476 freesize
-= PTHREAD_SIZE
;
2478 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
,
2479 freeaddr
, freesize
);
2480 if (!destroy
) return;
2481 _pthread_introspection_thread_destroy(t
);
2485 _pthread_introspection_thread_terminate(pthread_t t
, void *freeaddr
,
2486 size_t freesize
, bool destroy
)
2488 if (os_fastpath(!_pthread_introspection_hook
)) return;
2489 _pthread_introspection_hook_callout_thread_terminate(t
, freeaddr
, freesize
,
2495 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
)
2497 if (t
== &_thread
) return;
2498 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
,
2503 _pthread_introspection_thread_destroy(pthread_t t
)
2505 if (os_fastpath(!_pthread_introspection_hook
)) return;
2506 _pthread_introspection_hook_callout_thread_destroy(t
);