2  * Copyright (c) 2000-2013 Apple Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. Please obtain a copy of the License at 
  10  * http://www.opensource.apple.com/apsl/ and read it before using this 
  13  * The Original Code and all software distributed under the License are 
  14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  18  * Please see the License for the specific language governing rights and 
  19  * limitations under the License. 
  21  * @APPLE_LICENSE_HEADER_END@ 
  24  * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 
  27  * Permission to use, copy, modify, and distribute this software and 
  28  * its documentation for any purpose and without fee is hereby granted, 
  29  * provided that the above copyright notice appears in all copies and 
  30  * that both the copyright notice and this permission notice appear in 
  31  * supporting documentation. 
  33  * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
  34  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
  35  * FOR A PARTICULAR PURPOSE. 
  37  * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR 
  38  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
  39  * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
  40  * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
  41  * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
  49  * POSIX Pthread Library 
  55 #include "workqueue_private.h" 
  56 #include "introspection_private.h" 
  57 #include "qos_private.h" 
  58 #include "tsd_private.h" 
  59 #include "pthread/stack_np.h" 
  60 #include "offsets.h" // included to validate the offsets at build time 
  66 #include <mach/mach_init.h> 
  67 #include <mach/mach_vm.h> 
  68 #include <mach/mach_sync_ipc.h> 
  70 #include <sys/resource.h> 
  71 #include <sys/sysctl.h> 
  72 #include <sys/queue.h> 
  73 #include <sys/ulock.h> 
  75 #include <machine/vmparam.h> 
  76 #define __APPLE_API_PRIVATE 
  77 #include <machine/cpu_capabilities.h> 
  80 #include <platform/string.h> 
  81 #include <platform/compat.h> 
  83 extern int __sysctl(int *name
, u_int namelen
, void *oldp
, size_t *oldlenp
, 
  84                 void *newp
, size_t newlen
); 
  85 extern void __exit(int) __attribute__((noreturn
)); 
  86 extern int __pthread_kill(mach_port_t
, int); 
  88 extern void _pthread_joiner_wake(pthread_t thread
); 
  91 PTHREAD_NOEXPORT 
extern struct _pthread 
*_main_thread_ptr
; 
  92 #define main_thread() (_main_thread_ptr) 
  93 #endif // VARIANT_DYLD 
  95 // Default stack size is 512KB; independent of the main thread's stack size. 
  96 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024) 
 104  * The pthread may be offset into a page.  In that event, by contract 
 105  * with the kernel, the allocation will extend PTHREAD_SIZE from the 
 106  * start of the next page.  There's also one page worth of allocation 
 107  * below stacksize for the guard page. <rdar://problem/19941744> 
 109 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread))) 
 110 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size) 
 111 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize)) 
 113 static const pthread_attr_t _pthread_attr_default 
= { 
 114         .sig       
= _PTHREAD_ATTR_SIG
, 
 116         .detached  
= PTHREAD_CREATE_JOINABLE
, 
 117         .inherit   
= _PTHREAD_DEFAULT_INHERITSCHED
, 
 118         .policy    
= _PTHREAD_DEFAULT_POLICY
, 
 119         .defaultguardpage 
= true, 
 120         // compile time constant for _pthread_default_priority(0) 
 121         .qosclass  
= (1U << (THREAD_QOS_LEGACY 
- 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT
)) | 
 122                         ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK
), 
 125 #if PTHREAD_LAYOUT_SPI 
 127 const struct pthread_layout_offsets_s pthread_layout_offsets 
= { 
 129         .plo_pthread_tsd_base_offset 
= offsetof(struct _pthread
, tsd
), 
 130         .plo_pthread_tsd_base_address_offset 
= 0, 
 131         .plo_pthread_tsd_entry_size 
= sizeof(((struct _pthread 
*)NULL
)->tsd
[0]), 
 134 #endif // PTHREAD_LAYOUT_SPI 
 137 // Global exported variables 
 140 // This global should be used (carefully) by anyone needing to know if a 
 141 // pthread (other than the main thread) has been created. 
 142 int __is_threaded 
= 0; 
 143 int __unix_conforming 
= 0; 
 146 // Global internal variables 
 149 // _pthread_list_lock protects _pthread_count, access to the __pthread_head 
 150 // list. Externally imported by pthread_cancelable.c. 
 151 struct __pthread_list __pthread_head 
= TAILQ_HEAD_INITIALIZER(__pthread_head
); 
 152 _pthread_lock _pthread_list_lock 
= _PTHREAD_LOCK_INITIALIZER
; 
 157 // The main thread's pthread_t 
 158 struct _pthread _main_thread 
__attribute__((aligned(64))) = { }; 
 159 #define main_thread() (&_main_thread) 
 160 #else // VARIANT_DYLD 
 161 struct _pthread 
*_main_thread_ptr
; 
 162 #endif // VARIANT_DYLD 
 164 #if PTHREAD_DEBUG_LOG 
 166 int _pthread_debuglog
; 
 167 uint64_t _pthread_debugstart
; 
 171 // Global static variables 
 173 static bool __workq_newapi
; 
 174 static uint8_t default_priority
; 
 176 static uint8_t max_priority
; 
 177 static uint8_t min_priority
; 
 178 #endif // !VARIANT_DYLD 
 179 static int _pthread_count 
= 1; 
 180 static int pthread_concurrency
; 
 181 static uintptr_t _pthread_ptr_munge_token
; 
 183 static void (*exitf
)(int) = __exit
; 
 185 static void *(*_pthread_malloc
)(size_t) = NULL
; 
 186 static void (*_pthread_free
)(void *) = NULL
; 
 187 #endif // !VARIANT_DYLD 
 189 // work queue support data 
 192 __pthread_invalid_keventfunction(void **events
, int *nevents
) 
 194         PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup"); 
 199 __pthread_invalid_workloopfunction(uint64_t *workloop_id
, void **events
, int *nevents
) 
 201         PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup"); 
 203 static pthread_workqueue_function2_t __libdispatch_workerfunction
; 
 204 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction 
= &__pthread_invalid_keventfunction
; 
 205 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction 
= &__pthread_invalid_workloopfunction
; 
 206 static int __libdispatch_offset
; 
 207 static int __pthread_supported_features
; // supported feature set 
 209 #if defined(__i386__) || defined(__x86_64__) 
 210 static mach_vm_address_t __pthread_stack_hint 
= 0xB0000000; 
 212 #error no __pthread_stack_hint for this architecture 
 216 // Function prototypes 
 219 // pthread primitives 
 220 static inline void _pthread_struct_init(pthread_t t
, const pthread_attr_t 
*attrs
, 
 221                 void *stack
, size_t stacksize
, void *freeaddr
, size_t freesize
); 
 224 static void _pthread_set_self_dyld(void); 
 225 #endif // VARIANT_DYLD 
 226 static inline void _pthread_set_self_internal(pthread_t
, bool needs_tsd_base_set
); 
 228 static void _pthread_dealloc_reply_port(pthread_t t
); 
 229 static void _pthread_dealloc_special_reply_port(pthread_t t
); 
 231 static inline void __pthread_started_thread(pthread_t t
); 
 233 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
; 
 235 static inline void _pthread_introspection_thread_create(pthread_t t
); 
 236 static inline void _pthread_introspection_thread_start(pthread_t t
); 
 237 static inline void _pthread_introspection_thread_terminate(pthread_t t
); 
 238 static inline void _pthread_introspection_thread_destroy(pthread_t t
); 
 240 extern void _pthread_set_self(pthread_t
); 
 241 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void *stackaddr
, void *unused
, int reuse
); // trampoline into _pthread_wqthread 
 242 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
); // trampoline into _pthread_start 
 245  * Flags filed passed to bsdthread_create and back in pthread_start 
 246  * 31  <---------------------------------> 0 
 247  * _________________________________________ 
 248  * | flags(8) | policy(8) | importance(16) | 
 249  * ----------------------------------------- 
 251 #define PTHREAD_START_CUSTOM            0x01000000 // <rdar://problem/34501401> 
 252 #define PTHREAD_START_SETSCHED          0x02000000 
 253 // was PTHREAD_START_DETACHED           0x04000000 
 254 #define PTHREAD_START_QOSCLASS          0x08000000 
 255 #define PTHREAD_START_TSD_BASE_SET      0x10000000 
 256 #define PTHREAD_START_SUSPENDED         0x20000000 
 257 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff 
 258 #define PTHREAD_START_POLICY_BITSHIFT 16 
 259 #define PTHREAD_START_POLICY_MASK 0xff 
 260 #define PTHREAD_START_IMPORTANCE_MASK 0xffff 
 262 #if (!defined(__OPEN_SOURCE__) && TARGET_OS_OSX) || OS_VARIANT_RESOLVED // 40703288 
 263 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, 
 264                 const struct sched_param 
*); 
 267 extern pthread_t 
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t  thread
, unsigned int flags
); 
 268 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, void *, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
); 
 269 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
); 
 270 extern __uint64_t 
__thread_selfid( void ); 
 273 _Static_assert(offsetof(struct _pthread
, tsd
) == 224, "TSD LP64 offset"); 
 275 _Static_assert(offsetof(struct _pthread
, tsd
) == 176, "TSD ILP32 offset"); 
 277 _Static_assert(offsetof(struct _pthread
, tsd
) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
 
 278                 == offsetof(struct _pthread
, thread_id
), 
 279                 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct"); 
 281 #pragma mark pthread attrs 
 283 _Static_assert(sizeof(struct _pthread_attr_t
) == sizeof(__darwin_pthread_attr_t
), 
 284                 "internal pthread_attr_t == external pthread_attr_t"); 
 287 pthread_attr_destroy(pthread_attr_t 
*attr
) 
 290         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 298 pthread_attr_getdetachstate(const pthread_attr_t 
*attr
, int *detachstate
) 
 301         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 302                 *detachstate 
= attr
->detached
; 
 309 pthread_attr_getinheritsched(const pthread_attr_t 
*attr
, int *inheritsched
) 
 312         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 313                 *inheritsched 
= attr
->inherit
; 
 319 static PTHREAD_ALWAYS_INLINE 
void 
 320 _pthread_attr_get_schedparam(const pthread_attr_t 
*attr
, 
 321                 struct sched_param 
*param
) 
 323         if (attr
->schedset
) { 
 324                 *param 
= attr
->param
; 
 326                 param
->sched_priority 
= default_priority
; 
 327                 param
->quantum 
= 10; /* quantum isn't public yet */ 
 332 pthread_attr_getschedparam(const pthread_attr_t 
*attr
, struct sched_param 
*param
) 
 335         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 336                 _pthread_attr_get_schedparam(attr
, param
); 
 343 pthread_attr_getschedpolicy(const pthread_attr_t 
*attr
, int *policy
) 
 346         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 347                 *policy 
= attr
->policy
; 
 354 pthread_attr_init(pthread_attr_t 
*attr
) 
 356         *attr 
= _pthread_attr_default
; 
 361 pthread_attr_setdetachstate(pthread_attr_t 
*attr
, int detachstate
) 
 364         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& 
 365                         (detachstate 
== PTHREAD_CREATE_JOINABLE 
|| 
 366                         detachstate 
== PTHREAD_CREATE_DETACHED
)) { 
 367                 attr
->detached 
= detachstate
; 
 374 pthread_attr_setinheritsched(pthread_attr_t 
*attr
, int inheritsched
) 
 377         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& 
 378                         (inheritsched 
== PTHREAD_INHERIT_SCHED 
|| 
 379                         inheritsched 
== PTHREAD_EXPLICIT_SCHED
)) { 
 380                 attr
->inherit 
= inheritsched
; 
 387 pthread_attr_setschedparam(pthread_attr_t 
*attr
, const struct sched_param 
*param
) 
 390         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 391                 /* TODO: Validate sched_param fields */ 
 392                 attr
->param 
= *param
; 
 400 pthread_attr_setschedpolicy(pthread_attr_t 
*attr
, int policy
) 
 403         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& (policy 
== SCHED_OTHER 
|| 
 404                         policy 
== SCHED_RR 
|| policy 
== SCHED_FIFO
)) { 
 405                 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy
)) { 
 406                         /* non-fixedpri policy should remove cpupercent */ 
 407                         attr
->cpupercentset 
= 0; 
 409                 attr
->policy 
= policy
; 
 417 pthread_attr_setscope(pthread_attr_t 
*attr
, int scope
) 
 420         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 421                 if (scope 
== PTHREAD_SCOPE_SYSTEM
) { 
 422                         // No attribute yet for the scope. 
 424                 } else if (scope 
== PTHREAD_SCOPE_PROCESS
) { 
 432 pthread_attr_getscope(const pthread_attr_t 
*attr
, int *scope
) 
 435         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 436                 *scope 
= PTHREAD_SCOPE_SYSTEM
; 
 443 pthread_attr_getstackaddr(const pthread_attr_t 
*attr
, void **stackaddr
) 
 446         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 447                 *stackaddr 
= attr
->stackaddr
; 
 454 pthread_attr_setstackaddr(pthread_attr_t 
*attr
, void *stackaddr
) 
 457         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& 
 458                         ((uintptr_t)stackaddr 
% vm_page_size
) == 0) { 
 459                 attr
->stackaddr 
= stackaddr
; 
 460                 attr
->defaultguardpage 
= false; 
 468 _pthread_attr_stacksize(const pthread_attr_t 
*attr
) 
 470         return attr
->stacksize 
? attr
->stacksize 
: DEFAULT_STACK_SIZE
; 
 474 pthread_attr_getstacksize(const pthread_attr_t 
*attr
, size_t *stacksize
) 
 477         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 478                 *stacksize 
= _pthread_attr_stacksize(attr
); 
 485 pthread_attr_setstacksize(pthread_attr_t 
*attr
, size_t stacksize
) 
 488         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& 
 489                         (stacksize 
% vm_page_size
) == 0 && 
 490                         stacksize 
>= PTHREAD_STACK_MIN
) { 
 491                 attr
->stacksize 
= stacksize
; 
 498 pthread_attr_getstack(const pthread_attr_t 
*attr
, void **stackaddr
, size_t * stacksize
) 
 501         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 502                 *stackaddr 
= (void *)((uintptr_t)attr
->stackaddr 
- attr
->stacksize
); 
 503                 *stacksize 
= _pthread_attr_stacksize(attr
); 
 509 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte 
 510 // address. This is not the same as in pthread_attr_setstackaddr. 
 512 pthread_attr_setstack(pthread_attr_t 
*attr
, void *stackaddr
, size_t stacksize
) 
 515         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& 
 516                         ((uintptr_t)stackaddr 
% vm_page_size
) == 0 && 
 517                         (stacksize 
% vm_page_size
) == 0 && 
 518                         stacksize 
>= PTHREAD_STACK_MIN
) { 
 519                 attr
->stackaddr 
= (void *)((uintptr_t)stackaddr 
+ stacksize
); 
 520                 attr
->stacksize 
= stacksize
; 
 527 pthread_attr_setguardsize(pthread_attr_t 
*attr
, size_t guardsize
) 
 530         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& (guardsize 
% vm_page_size
) == 0) { 
 531                 /* Guardsize of 0 is valid, means no guard */ 
 532                 attr
->defaultguardpage 
= false; 
 533                 attr
->guardsize 
= guardsize
; 
 540 _pthread_attr_guardsize(const pthread_attr_t 
*attr
) 
 542         return attr
->defaultguardpage 
? vm_page_size 
: attr
->guardsize
; 
 546 pthread_attr_getguardsize(const pthread_attr_t 
*attr
, size_t *guardsize
) 
 549         if (attr
->sig 
== _PTHREAD_ATTR_SIG
) { 
 550                 *guardsize 
= _pthread_attr_guardsize(attr
); 
 557 pthread_attr_setcpupercent_np(pthread_attr_t 
*attr
, int percent
, 
 558                 unsigned long refillms
) 
 561         if (attr
->sig 
== _PTHREAD_ATTR_SIG 
&& percent 
< UINT8_MAX 
&& 
 562                         refillms 
< _PTHREAD_ATTR_REFILLMS_MAX 
&& attr
->policyset 
&& 
 563                         _PTHREAD_POLICY_IS_FIXEDPRI(attr
->policy
)) { 
 564                 attr
->cpupercent 
= percent
; 
 565                 attr
->refillms 
= (uint32_t)(refillms 
& 0x00ffffff); 
 566                 attr
->cpupercentset 
= 1; 
 572 #pragma mark pthread lifetime 
 574 // Allocate a thread structure, stack and guard page. 
 576 // The thread structure may optionally be placed in the same allocation as the 
 577 // stack, residing above the top of the stack. This cannot be done if a 
 578 // custom stack address is provided. 
 580 // Similarly the guard page cannot be allocated if a custom stack address is 
 583 // The allocated thread structure is initialized with values that indicate how 
 584 // it should be freed. 
 587 _pthread_allocate(const pthread_attr_t 
*attrs
, void **stack
) 
 589         mach_vm_address_t allocaddr 
= __pthread_stack_hint
; 
 590         size_t allocsize
, guardsize
, stacksize
; 
 594         PTHREAD_ASSERT(attrs
->stacksize 
== 0 || 
 595                         attrs
->stacksize 
>= PTHREAD_STACK_MIN
); 
 597         // Allocate a pthread structure if necessary 
 599         if (attrs
->stackaddr 
!= NULL
) { 
 600                 PTHREAD_ASSERT(((uintptr_t)attrs
->stackaddr 
% vm_page_size
) == 0); 
 601                 allocsize 
= PTHREAD_SIZE
; 
 603                 // <rdar://problem/42588315> if the attrs struct specifies a custom 
 604                 // stack address but not a custom size, using ->stacksize here instead 
 605                 // of _pthread_attr_stacksize stores stacksize as zero, indicating 
 606                 // that the stack size is unknown. 
 607                 stacksize 
= attrs
->stacksize
; 
 609                 guardsize 
= _pthread_attr_guardsize(attrs
); 
 610                 stacksize 
= _pthread_attr_stacksize(attrs
) + PTHREAD_T_OFFSET
; 
 611                 allocsize 
= stacksize 
+ guardsize 
+ PTHREAD_SIZE
; 
 612                 allocsize 
= mach_vm_round_page(allocsize
); 
 615         kr 
= mach_vm_map(mach_task_self(), &allocaddr
, allocsize
, vm_page_size 
- 1, 
 616                          VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
, 
 617                          0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
); 
 619         if (kr 
!= KERN_SUCCESS
) { 
 620                 kr 
= mach_vm_allocate(mach_task_self(), &allocaddr
, allocsize
, 
 621                                  VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
); 
 623         if (kr 
!= KERN_SUCCESS
) { 
 628         // The stack grows down. 
 629         // Set the guard page at the lowest address of the 
 630         // newly allocated stack. Return the highest address 
 633                 (void)mach_vm_protect(mach_task_self(), allocaddr
, guardsize
, 
 634                                 FALSE
, VM_PROT_NONE
); 
 637         // Thread structure resides at the top of the stack (when using a 
 638         // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t 
 640         t 
= (pthread_t
)(allocaddr 
+ allocsize 
- PTHREAD_SIZE
); 
 641         if (attrs
->stackaddr
) { 
 642                 *stack 
= attrs
->stackaddr
; 
 647         _pthread_struct_init(t
, attrs
, *stack
, stacksize
, allocaddr
, allocsize
); 
 653 _pthread_deallocate(pthread_t t
, bool from_mach_thread
) 
 657         // Don't free the main thread. 
 658         if (t 
!= main_thread()) { 
 659                 if (!from_mach_thread
) { // see __pthread_add_thread 
 660                         _pthread_introspection_thread_destroy(t
); 
 662                 ret 
= mach_vm_deallocate(mach_task_self(), t
->freeaddr
, t
->freesize
); 
 663                 PTHREAD_ASSERT(ret 
== KERN_SUCCESS
); 
 667 #pragma clang diagnostic push 
 668 #pragma clang diagnostic ignored "-Wreturn-stack-address" 
 672 _pthread_current_stack_address(void) 
 678 #pragma clang diagnostic pop 
 681 _pthread_joiner_wake(pthread_t thread
) 
 683         uint32_t *exit_gate 
= &thread
->tl_exit_gate
; 
 686                 int ret 
= __ulock_wake(UL_UNFAIR_LOCK 
| ULF_NO_ERRNO
, exit_gate
, 0); 
 687                 if (ret 
== 0 || ret 
== -ENOENT
) { 
 691                         PTHREAD_INTERNAL_CRASH(-ret
, "pthread_join() wake failure"); 
 696 // Terminates the thread if called from the currently running thread. 
 697 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
 
 699 _pthread_terminate(pthread_t t
, void *exit_value
) 
 701         PTHREAD_ASSERT(t 
== pthread_self()); 
 703         _pthread_introspection_thread_terminate(t
); 
 705         uintptr_t freeaddr 
= (uintptr_t)t
->freeaddr
; 
 706         size_t freesize 
= t
->freesize
; 
 709         // the size of just the stack 
 710         size_t freesize_stack 
= t
->freesize
; 
 712         // We usually pass our structure+stack to bsdthread_terminate to free, but 
 713         // if we get told to keep the pthread_t structure around then we need to 
 714         // adjust the free size and addr in the pthread_t to just refer to the 
 715         // structure and not the stack.  If we do end up deallocating the 
 716         // structure, this is useless work since no one can read the result, but we 
 717         // can't do it after the call to pthread_remove_thread because it isn't 
 718         // safe to dereference t after that. 
 719         if ((void*)t 
> t
->freeaddr 
&& (void*)t 
< t
->freeaddr 
+ t
->freesize
){ 
 720                 // Check to ensure the pthread structure itself is part of the 
 721                 // allocation described by freeaddr/freesize, in which case we split and 
 722                 // only deallocate the area below the pthread structure.  In the event of a 
 723                 // custom stack, the freeaddr/size will be the pthread structure itself, in 
 724                 // which case we shouldn't free anything (the final else case). 
 725                 freesize_stack 
= trunc_page((uintptr_t)t 
- (uintptr_t)freeaddr
); 
 727                 // describe just the remainder for deallocation when the pthread_t goes away 
 728                 t
->freeaddr 
+= freesize_stack
; 
 729                 t
->freesize 
-= freesize_stack
; 
 730         } else if (t 
== main_thread()) { 
 731                 freeaddr 
= t
->stackaddr 
- pthread_get_stacksize_np(t
); 
 732                 uintptr_t stackborder 
= trunc_page((uintptr_t)_pthread_current_stack_address()); 
 733                 freesize_stack 
= stackborder 
- freeaddr
; 
 738         mach_port_t kport 
= _pthread_kernel_thread(t
); 
 739         bool keep_thread_struct 
= false, needs_wake 
= false; 
 740         semaphore_t custom_stack_sema 
= MACH_PORT_NULL
; 
 742         _pthread_dealloc_special_reply_port(t
); 
 743         _pthread_dealloc_reply_port(t
); 
 745         _PTHREAD_LOCK(_pthread_list_lock
); 
 747         // This piece of code interacts with pthread_join. It will always: 
 748         // - set tl_exit_gate to MACH_PORT_DEAD (thread exited) 
 749         // - set tl_exit_value to the value passed to pthread_exit() 
 750         // - decrement _pthread_count, so that we can exit the process when all 
 751         //   threads exited even if not all of them were joined. 
 752         t
->tl_exit_gate 
= MACH_PORT_DEAD
; 
 753         t
->tl_exit_value 
= exit_value
; 
 754         should_exit 
= (--_pthread_count 
<= 0); 
 756         // If we see a joiner, we prepost that the join has to succeed, 
 757         // and the joiner is committed to finish (even if it was canceled) 
 758         if (t
->tl_join_ctx
) { 
 759                 custom_stack_sema 
= _pthread_joiner_prepost_wake(t
); // unsets tl_joinable 
 763         // Joinable threads that have no joiner yet are kept on the thread list 
 764         // so that pthread_join() can later discover the thread when it is joined, 
 765         // and will have to do the pthread_t cleanup. 
 766         if (t
->tl_joinable
) { 
 767                 t
->tl_joiner_cleans_up 
= keep_thread_struct 
= true; 
 769                 TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
); 
 772         _PTHREAD_UNLOCK(_pthread_list_lock
); 
 775                 // When we found a waiter, we want to drop the very contended list lock 
 776                 // before we do the syscall in _pthread_joiner_wake(). Then, we decide 
 777                 // who gets to cleanup the pthread_t between the joiner and the exiting 
 779                 // - the joiner tries to set tl_join_ctx to NULL 
 780                 // - the exiting thread tries to set tl_joiner_cleans_up to true 
 781                 // Whoever does it first commits the other guy to cleanup the pthread_t 
 782                 _pthread_joiner_wake(t
); 
 783                 _PTHREAD_LOCK(_pthread_list_lock
); 
 784                 if (t
->tl_join_ctx
) { 
 785                         t
->tl_joiner_cleans_up 
= true; 
 786                         keep_thread_struct 
= true; 
 788                 _PTHREAD_UNLOCK(_pthread_list_lock
); 
 792         // /!\ dereferencing `t` past this point is not safe /!\ 
 795         if (keep_thread_struct 
|| t 
== main_thread()) { 
 796                 // Use the adjusted freesize of just the stack that we computed above. 
 797                 freesize 
= freesize_stack
; 
 799                 _pthread_introspection_thread_destroy(t
); 
 802         // Check if there is nothing to free because the thread has a custom 
 803         // stack allocation and is joinable. 
 810         __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, custom_stack_sema
); 
 811         PTHREAD_INTERNAL_CRASH(t
, "thread didn't terminate"); 
 816 _pthread_terminate_invoke(pthread_t t
, void *exit_value
) 
 820         // <rdar://problem/25688492> During pthread termination there is a race 
 821         // between pthread_join and pthread_terminate; if the joiner is responsible 
 822         // for cleaning up the pthread_t struct, then it may destroy some part of the 
 823         // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate() 
 824         // to crash because its stack has been removed from under its feet, just make 
 825         // sure termination happens in a part of the stack that is not on the same 
 826         // page as the pthread_t. 
 827         if (trunc_page((uintptr_t)__builtin_frame_address(0)) == 
 828                         trunc_page((uintptr_t)t
)) { 
 829                 p 
= alloca(PTHREAD_T_OFFSET
); 
 831         // And this __asm__ volatile is needed to stop the compiler from optimising 
 832         // away the alloca() completely. 
 833         __asm__ 
volatile ("" : : "r"(p
) ); 
 835         _pthread_terminate(t
, exit_value
); 
 838 #pragma mark pthread start / body 
 841  * Create and start execution of a new thread. 
 843 PTHREAD_NOINLINE PTHREAD_NORETURN
 
 845 _pthread_body(pthread_t self
, bool needs_tsd_base_set
) 
 847         _pthread_set_self_internal(self
, needs_tsd_base_set
); 
 848         __pthread_started_thread(self
); 
 849         _pthread_exit(self
, (self
->fun
)(self
->arg
)); 
 854 _pthread_start(pthread_t self
, mach_port_t kport
, 
 855                 __unused 
void *(*fun
)(void *), __unused 
void *arg
, 
 856                 __unused 
size_t stacksize
, unsigned int pflags
) 
 858         bool thread_tsd_bsd_set 
= (bool)(pflags 
& PTHREAD_START_TSD_BASE_SET
); 
 860         if (os_unlikely(pflags 
& PTHREAD_START_SUSPENDED
)) { 
 861                 PTHREAD_INTERNAL_CRASH(0, 
 862                                 "kernel without PTHREAD_START_SUSPENDED support"); 
 865         PTHREAD_ASSERT(MACH_PORT_VALID(kport
)); 
 866         PTHREAD_ASSERT(_pthread_kernel_thread(self
) == kport
); 
 868         // will mark the thread initialized 
 869         _pthread_markcancel_if_canceled(self
, kport
); 
 871         _pthread_body(self
, !thread_tsd_bsd_set
); 
 874 PTHREAD_ALWAYS_INLINE
 
 876 _pthread_struct_init(pthread_t t
, const pthread_attr_t 
*attrs
, 
 877                 void *stackaddr
, size_t stacksize
, void *freeaddr
, size_t freesize
) 
 880         PTHREAD_ASSERT(t
->sig 
!= _PTHREAD_SIG
); 
 883         t
->sig 
= _PTHREAD_SIG
; 
 884         t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = t
; 
 885         t
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &t
->err_no
; 
 886         if (attrs
->schedset 
== 0) { 
 887                 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = attrs
->qosclass
; 
 889                 t
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = 
 890                                 _pthread_unspecified_priority(); 
 892         t
->tsd
[_PTHREAD_TSD_SLOT_PTR_MUNGE
] = _pthread_ptr_munge_token
; 
 893         t
->tl_has_custom_stack 
= (attrs
->stackaddr 
!= NULL
); 
 895         _PTHREAD_LOCK_INIT(t
->lock
); 
 897         t
->stackaddr 
= stackaddr
; 
 898         t
->stackbottom 
= stackaddr 
- stacksize
; 
 899         t
->freeaddr 
= freeaddr
; 
 900         t
->freesize 
= freesize
; 
 902         t
->guardsize 
= _pthread_attr_guardsize(attrs
); 
 903         t
->tl_joinable 
= (attrs
->detached 
== PTHREAD_CREATE_JOINABLE
); 
 904         t
->inherit 
= attrs
->inherit
; 
 905         t
->tl_policy 
= attrs
->policy
; 
 906         t
->schedset 
= attrs
->schedset
; 
 907         _pthread_attr_get_schedparam(attrs
, &t
->tl_param
); 
 908         t
->cancel_state 
= PTHREAD_CANCEL_ENABLE 
| PTHREAD_CANCEL_DEFERRED
; 
 911 #pragma mark pthread public interface 
 913 /* Need to deprecate this in future */ 
 915 _pthread_is_threaded(void) 
 917         return __is_threaded
; 
 920 /* Non portable public api to know whether this process has(had) atleast one thread 
 921  * apart from main thread. There could be race if there is a thread in the process of 
 922  * creation at the time of call . It does not tell whether there are more than one thread 
 923  * at this point of time. 
 926 pthread_is_threaded_np(void) 
 928         return __is_threaded
; 
 932 PTHREAD_NOEXPORT_VARIANT
 
 934 pthread_mach_thread_np(pthread_t t
) 
 936         mach_port_t kport 
= MACH_PORT_NULL
; 
 937         (void)_pthread_is_valid(t
, &kport
); 
 941 PTHREAD_NOEXPORT_VARIANT
 
 943 pthread_from_mach_thread_np(mach_port_t kernel_thread
) 
 945         struct _pthread 
*p 
= NULL
; 
 947         /* No need to wait as mach port is already known */ 
 948         _PTHREAD_LOCK(_pthread_list_lock
); 
 950         TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) { 
 951                 if (_pthread_kernel_thread(p
) == kernel_thread
) { 
 956         _PTHREAD_UNLOCK(_pthread_list_lock
); 
 961 PTHREAD_NOEXPORT_VARIANT
 
 963 pthread_get_stacksize_np(pthread_t t
) 
 966         size_t stacksize 
= t
->stackaddr 
- t
->stackbottom
; 
 969                 return ESRCH
; // XXX bug? 
 972 #if !defined(__arm__) && !defined(__arm64__) 
 973         // The default rlimit based allocations will be provided with a stacksize 
 974         // of the current limit and a freesize of the max.  However, custom 
 975         // allocations will just have the guard page to free.  If we aren't in the 
 976         // latter case, call into rlimit to determine the current stack size.  In 
 977         // the event that the current limit == max limit then we'll fall down the 
 978         // fast path, but since it's unlikely that the limit is going to be lowered 
 979         // after it's been change to the max, we should be fine. 
 981         // Of course, on arm rlim_cur == rlim_max and there's only the one guard 
 982         // page.  So, we can skip all this there. 
 983         if (t 
== main_thread() && stacksize 
+ vm_page_size 
!= t
->freesize
) { 
 984                 // We want to call getrlimit() just once, as it's relatively expensive 
 985                 static size_t rlimit_stack
; 
 987                 if (rlimit_stack 
== 0) { 
 989                         int ret 
= getrlimit(RLIMIT_STACK
, &limit
); 
 992                                 rlimit_stack 
= (size_t) limit
.rlim_cur
; 
 996                 if (rlimit_stack 
== 0 || rlimit_stack 
> t
->freesize
) { 
1002 #endif /* !defined(__arm__) && !defined(__arm64__) */ 
1004         if (t 
== pthread_self() || t 
== main_thread()) { 
1009         if (_pthread_validate_thread_and_list_lock(t
)) { 
1011                 _PTHREAD_UNLOCK(_pthread_list_lock
); 
1013                 size 
= ESRCH
; // XXX bug? 
1017         // <rdar://problem/42588315> binary compatibility issues force us to return 
1018         // DEFAULT_STACK_SIZE here when we do not know the size of the stack 
1019         return size 
? size 
: DEFAULT_STACK_SIZE
; 
1022 PTHREAD_NOEXPORT_VARIANT
 
1024 pthread_get_stackaddr_np(pthread_t t
) 
1026         // since the main thread will not get de-allocated from underneath us 
1027         if (t 
== pthread_self() || t 
== main_thread()) { 
1028                 return t
->stackaddr
; 
1031         if (!_pthread_validate_thread_and_list_lock(t
)) { 
1032                 return (void *)(uintptr_t)ESRCH
; // XXX bug? 
1035         void *addr 
= t
->stackaddr
; 
1036         _PTHREAD_UNLOCK(_pthread_list_lock
); 
1042 _pthread_reply_port(pthread_t t
) 
1046                 p 
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
); 
1048                 p 
= t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
]; 
1050         return (mach_port_t
)(uintptr_t)p
; 
1054 _pthread_set_reply_port(pthread_t t
, mach_port_t reply_port
) 
1056         void *p 
= (void *)(uintptr_t)reply_port
; 
1058                 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY
, p
); 
1060                 t
->tsd
[_PTHREAD_TSD_SLOT_MIG_REPLY
] = p
; 
1065 _pthread_dealloc_reply_port(pthread_t t
) 
1067         mach_port_t reply_port 
= _pthread_reply_port(t
); 
1068         if (reply_port 
!= MACH_PORT_NULL
) { 
1069                 mig_dealloc_reply_port(reply_port
); 
1074 _pthread_special_reply_port(pthread_t t
) 
1078                 p 
= _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
); 
1080                 p 
= t
->tsd
[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY
]; 
1082         return (mach_port_t
)(uintptr_t)p
; 
1086 _pthread_dealloc_special_reply_port(pthread_t t
) 
1088         mach_port_t special_reply_port 
= _pthread_special_reply_port(t
); 
1089         if (special_reply_port 
!= MACH_PORT_NULL
) { 
1090                 thread_destruct_special_reply_port(special_reply_port
, 
1091                                 THREAD_SPECIAL_REPLY_PORT_ALL
); 
1096 pthread_main_thread_np(void) 
1098         return main_thread(); 
1101 /* returns non-zero if the current thread is the main thread */ 
1103 pthread_main_np(void) 
1105         return pthread_self() == main_thread(); 
1110  * if we are passed in a pthread_t that is NULL, then we return the current 
1111  * thread's thread_id. So folks don't have to call pthread_self, in addition to 
1112  * us doing it, if they just want their thread_id. 
1114 PTHREAD_NOEXPORT_VARIANT
 
1116 pthread_threadid_np(pthread_t thread
, uint64_t *thread_id
) 
1119         pthread_t self 
= pthread_self(); 
1121         if (thread_id 
== NULL
) { 
1125         if (thread 
== NULL 
|| thread 
== self
) { 
1126                 *thread_id 
= self
->thread_id
; 
1127         } else if (!_pthread_validate_thread_and_list_lock(thread
)) { 
1130                 if (thread
->thread_id 
== 0) { 
1133                         *thread_id 
= thread
->thread_id
; 
1135                 _PTHREAD_UNLOCK(_pthread_list_lock
); 
1140 PTHREAD_NOEXPORT_VARIANT
 
1142 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
) 
1144         if (thread 
== pthread_self()) { 
1145                 strlcpy(threadname
, thread
->pthread_name
, len
); 
1149         if (!_pthread_validate_thread_and_list_lock(thread
)) { 
1153         strlcpy(threadname
, thread
->pthread_name
, len
); 
1154         _PTHREAD_UNLOCK(_pthread_list_lock
); 
1160 pthread_setname_np(const char *name
) 
1163         pthread_t self 
= pthread_self(); 
1170         /* protytype is in pthread_internals.h */ 
1171         res 
= __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name
, (int)len
); 
1174                         strlcpy(self
->pthread_name
, name
, MAXTHREADNAMESIZE
); 
1176                         bzero(self
->pthread_name
, MAXTHREADNAMESIZE
); 
1183 PTHREAD_ALWAYS_INLINE
 
1185 __pthread_add_thread(pthread_t t
, bool from_mach_thread
) 
1187         if (from_mach_thread
) { 
1188                 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
); 
1190                 _PTHREAD_LOCK(_pthread_list_lock
); 
1193         TAILQ_INSERT_TAIL(&__pthread_head
, t
, tl_plist
); 
1196         if (from_mach_thread
) { 
1197                 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
); 
1199                 _PTHREAD_UNLOCK(_pthread_list_lock
); 
1202         if (!from_mach_thread
) { 
1203                 // PR-26275485: Mach threads will likely crash trying to run 
1204                 // introspection code.  Since the fall out from the introspection 
1205                 // code not seeing the injected thread is likely less than crashing 
1206                 // in the introspection code, just don't make the call. 
1207                 _pthread_introspection_thread_create(t
); 
1211 PTHREAD_ALWAYS_INLINE
 
1213 __pthread_undo_add_thread(pthread_t t
, bool from_mach_thread
) 
1215         if (from_mach_thread
) { 
1216                 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock
); 
1218                 _PTHREAD_LOCK(_pthread_list_lock
); 
1221         TAILQ_REMOVE(&__pthread_head
, t
, tl_plist
); 
1224         if (from_mach_thread
) { 
1225                 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock
); 
1227                 _PTHREAD_UNLOCK(_pthread_list_lock
); 
1231 PTHREAD_ALWAYS_INLINE
 
1233 __pthread_started_thread(pthread_t t
) 
1235         mach_port_t kport 
= _pthread_kernel_thread(t
); 
1236         if (os_slowpath(!MACH_PORT_VALID(kport
))) { 
1237                 PTHREAD_CLIENT_CRASH(kport
, 
1238                                 "Unable to allocate thread port, possible port leak"); 
1240         _pthread_introspection_thread_start(t
); 
1243 #define _PTHREAD_CREATE_NONE              0x0 
1244 #define _PTHREAD_CREATE_FROM_MACH_THREAD  0x1 
1245 #define _PTHREAD_CREATE_SUSPENDED         0x2 
1248 _pthread_create(pthread_t 
*thread
, const pthread_attr_t 
*attrs
, 
1249                 void *(*start_routine
)(void *), void *arg
, unsigned int create_flags
) 
1253         bool from_mach_thread 
= (create_flags 
& _PTHREAD_CREATE_FROM_MACH_THREAD
); 
1255         if (attrs 
== NULL
) { 
1256                 attrs 
= &_pthread_attr_default
; 
1257         } else if (attrs
->sig 
!= _PTHREAD_ATTR_SIG
) { 
1261         unsigned int flags 
= PTHREAD_START_CUSTOM
; 
1262         if (attrs
->schedset 
!= 0) { 
1263                 struct sched_param p
; 
1264                 _pthread_attr_get_schedparam(attrs
, &p
); 
1265                 flags 
|= PTHREAD_START_SETSCHED
; 
1266                 flags 
|= ((attrs
->policy 
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
); 
1267                 flags 
|= (p
.sched_priority 
& PTHREAD_START_IMPORTANCE_MASK
); 
1268         } else if (attrs
->qosclass 
!= 0) { 
1269                 flags 
|= PTHREAD_START_QOSCLASS
; 
1270                 flags 
|= (attrs
->qosclass 
& PTHREAD_START_QOSCLASS_MASK
); 
1272         if (create_flags 
& _PTHREAD_CREATE_SUSPENDED
) { 
1273                 flags 
|= PTHREAD_START_SUSPENDED
; 
1278         t 
=_pthread_allocate(attrs
, &stack
); 
1284         t
->fun 
= start_routine
; 
1285         __pthread_add_thread(t
, from_mach_thread
); 
1287         if (__bsdthread_create(start_routine
, arg
, stack
, t
, flags
) == 
1289                 if (errno 
== EMFILE
) { 
1290                         PTHREAD_CLIENT_CRASH(0, 
1291                                         "Unable to allocate thread port, possible port leak"); 
1293                 __pthread_undo_add_thread(t
, from_mach_thread
); 
1294                 _pthread_deallocate(t
, from_mach_thread
); 
1298         if (create_flags 
& _PTHREAD_CREATE_SUSPENDED
) { 
1299                 _pthread_markcancel_if_canceled(t
, _pthread_kernel_thread(t
)); 
1302         // n.b. if a thread is created detached and exits, t will be invalid 
1308 pthread_create(pthread_t 
*thread
, const pthread_attr_t 
*attr
, 
1309                 void *(*start_routine
)(void *), void *arg
) 
1311         unsigned int flags 
= _PTHREAD_CREATE_NONE
; 
1312         return _pthread_create(thread
, attr
, start_routine
, arg
, flags
); 
1316 pthread_create_from_mach_thread(pthread_t 
*thread
, const pthread_attr_t 
*attr
, 
1317                 void *(*start_routine
)(void *), void *arg
) 
1319         unsigned int flags 
= _PTHREAD_CREATE_FROM_MACH_THREAD
; 
1320         return _pthread_create(thread
, attr
, start_routine
, arg
, flags
); 
1323 #if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288 
1324 /* Functions defined in machine-dependent files. */ 
1325 PTHREAD_NOEXPORT 
void _pthread_setup_suspended(pthread_t th
, void (*f
)(pthread_t
), void *sp
); 
1329 _pthread_suspended_body(pthread_t self
) 
1331         _pthread_set_self(self
); 
1332         __pthread_started_thread(self
); 
1333         _pthread_exit(self
, (self
->fun
)(self
->arg
)); 
1337 _pthread_create_suspended_np(pthread_t 
*thread
, const pthread_attr_t 
*attrs
, 
1338                 void *(*start_routine
)(void *), void *arg
) 
1342         mach_port_t kernel_thread 
= MACH_PORT_NULL
; 
1344         if (attrs 
== NULL
) { 
1345                 attrs 
= &_pthread_attr_default
; 
1346         } else if (attrs
->sig 
!= _PTHREAD_ATTR_SIG
) { 
1350         t 
= _pthread_allocate(attrs
, &stack
); 
1355         if (thread_create(mach_task_self(), &kernel_thread
) != KERN_SUCCESS
) { 
1356                 _pthread_deallocate(t
, false); 
1360         _pthread_set_kernel_thread(t
, kernel_thread
); 
1361         (void)pthread_setschedparam_internal(t
, kernel_thread
, 
1362                         t
->tl_policy
, &t
->tl_param
); 
1367         t
->fun 
= start_routine
; 
1368         t
->cancel_state 
|= _PTHREAD_CANCEL_INITIALIZED
; 
1369         __pthread_add_thread(t
, false); 
1371         // Set up a suspended thread. 
1372         _pthread_setup_suspended(t
, _pthread_suspended_body
, stack
); 
1376 #endif // !defined(__OPEN_SOURCE__) && TARGET_OS_OSX 
1379 pthread_create_suspended_np(pthread_t 
*thread
, const pthread_attr_t 
*attr
, 
1380                 void *(*start_routine
)(void *), void *arg
) 
1382 #if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288 
1383         if (_os_xbs_chrooted
) { 
1384                 return _pthread_create_suspended_np(thread
, attr
, start_routine
, arg
); 
1387         unsigned int flags 
= _PTHREAD_CREATE_SUSPENDED
; 
1388         return _pthread_create(thread
, attr
, start_routine
, arg
, flags
); 
1392 PTHREAD_NOEXPORT_VARIANT
 
1394 pthread_detach(pthread_t thread
) 
1397         bool join 
= false, wake 
= false; 
1399         if (!_pthread_validate_thread_and_list_lock(thread
)) { 
1403         if (!thread
->tl_joinable
) { 
1405         } else if (thread
->tl_exit_gate 
== MACH_PORT_DEAD
) { 
1406                 // Join the thread if it's already exited. 
1409                 thread
->tl_joinable 
= false; // _pthread_joiner_prepost_wake uses this 
1410                 if (thread
->tl_join_ctx
) { 
1411                         (void)_pthread_joiner_prepost_wake(thread
); 
1415         _PTHREAD_UNLOCK(_pthread_list_lock
); 
1418                 pthread_join(thread
, NULL
); 
1420                 _pthread_joiner_wake(thread
); 
1425 PTHREAD_NOEXPORT_VARIANT
 
1427 pthread_kill(pthread_t th
, int sig
) 
1429         if (sig 
< 0 || sig 
> NSIG
) { 
1433         mach_port_t kport 
= MACH_PORT_NULL
; 
1434         if (!_pthread_is_valid(th
, &kport
)) { 
1435                 return ESRCH
; // Not a valid thread. 
1438         // Don't signal workqueue threads. 
1439         if (th
->wqthread 
!= 0 && th
->wqkillset 
== 0) { 
1443         int ret 
= __pthread_kill(kport
, sig
); 
1451 PTHREAD_NOEXPORT_VARIANT
 
1453 __pthread_workqueue_setkill(int enable
) 
1455         pthread_t self 
= pthread_self(); 
1457         _PTHREAD_LOCK(self
->lock
); 
1458         self
->wqkillset 
= enable 
? 1 : 0; 
1459         _PTHREAD_UNLOCK(self
->lock
); 
1465 /* For compatibility... */ 
1470         return pthread_self(); 
1474  * Terminate a thread. 
1476 extern int __disable_threadsignal(int); 
1480 _pthread_exit(pthread_t self
, void *exit_value
) 
1482         struct __darwin_pthread_handler_rec 
*handler
; 
1484         // Disable signal delivery while we clean up 
1485         __disable_threadsignal(1); 
1487         // Set cancel state to disable and type to deferred 
1488         _pthread_setcancelstate_exit(self
, exit_value
); 
1490         while ((handler 
= self
->__cleanup_stack
) != 0) { 
1491                 (handler
->__routine
)(handler
->__arg
); 
1492                 self
->__cleanup_stack 
= handler
->__next
; 
1494         _pthread_tsd_cleanup(self
); 
1496         // Clear per-thread semaphore cache 
1497         os_put_cached_semaphore(SEMAPHORE_NULL
); 
1499         _pthread_terminate_invoke(self
, exit_value
); 
1503 pthread_exit(void *exit_value
) 
1505         pthread_t self 
= pthread_self(); 
1506         if (os_unlikely(self
->wqthread
)) { 
1507                 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread " 
1508                                 "not created by pthread_create()"); 
1510         _pthread_exit(self
, exit_value
); 
1514 PTHREAD_NOEXPORT_VARIANT
 
1516 pthread_getschedparam(pthread_t thread
, int *policy
, struct sched_param 
*param
) 
1518         if (!_pthread_validate_thread_and_list_lock(thread
)) { 
1522         if (policy
) *policy 
= thread
->tl_policy
; 
1523         if (param
) *param 
= thread
->tl_param
; 
1524         _PTHREAD_UNLOCK(_pthread_list_lock
); 
1530 PTHREAD_ALWAYS_INLINE
 
1532 pthread_setschedparam_internal(pthread_t thread
, mach_port_t kport
, int policy
, 
1533                 const struct sched_param 
*param
) 
1535         policy_base_data_t bases
; 
1537         mach_msg_type_number_t count
; 
1542                         bases
.ts
.base_priority 
= param
->sched_priority
; 
1543                         base 
= (policy_base_t
)&bases
.ts
; 
1544                         count 
= POLICY_TIMESHARE_BASE_COUNT
; 
1547                         bases
.fifo
.base_priority 
= param
->sched_priority
; 
1548                         base 
= (policy_base_t
)&bases
.fifo
; 
1549                         count 
= POLICY_FIFO_BASE_COUNT
; 
1552                         bases
.rr
.base_priority 
= param
->sched_priority
; 
1553                         /* quantum isn't public yet */ 
1554                         bases
.rr
.quantum 
= param
->quantum
; 
1555                         base 
= (policy_base_t
)&bases
.rr
; 
1556                         count 
= POLICY_RR_BASE_COUNT
; 
1561         ret 
= thread_policy(kport
, policy
, base
, count
, TRUE
); 
1562         return (ret 
!= KERN_SUCCESS
) ? EINVAL 
: 0; 
1565 PTHREAD_NOEXPORT_VARIANT
 
1567 pthread_setschedparam(pthread_t t
, int policy
, const struct sched_param 
*param
) 
1569         mach_port_t kport 
= MACH_PORT_NULL
; 
1572         // since the main thread will not get de-allocated from underneath us 
1573         if (t 
== pthread_self() || t 
== main_thread()) { 
1574                 kport 
= _pthread_kernel_thread(t
); 
1577                 if (!_pthread_is_valid(t
, &kport
)) { 
1582         int res 
= pthread_setschedparam_internal(t
, kport
, policy
, param
); 
1583         if (res
) return res
; 
1586                 _PTHREAD_LOCK(_pthread_list_lock
); 
1587         } else if (!_pthread_validate_thread_and_list_lock(t
)) { 
1588                 // Ensure the thread is still valid. 
1592         t
->tl_policy 
= policy
; 
1593         t
->tl_param 
= *param
; 
1594         _PTHREAD_UNLOCK(_pthread_list_lock
); 
1600 sched_get_priority_min(int policy
) 
1602         return default_priority 
- 16; 
1606 sched_get_priority_max(int policy
) 
1608         return default_priority 
+ 16; 
1612 pthread_equal(pthread_t t1
, pthread_t t2
) 
1618  * Force LLVM not to optimise this to a call to __pthread_set_self, if it does 
1619  * then _pthread_set_self won't be bound when secondary threads try and start up. 
1623 _pthread_set_self(pthread_t p
) 
1626         if (os_likely(!p
)) { 
1627                 return _pthread_set_self_dyld(); 
1629 #endif // VARIANT_DYLD 
1630         _pthread_set_self_internal(p
, true); 
1634 // _pthread_set_self_dyld is noinline+noexport to allow the option for 
1635 // static libsyscall to adopt this as the entry point from mach_init if 
1637 PTHREAD_NOINLINE PTHREAD_NOEXPORT
 
1639 _pthread_set_self_dyld(void) 
1641         pthread_t p 
= main_thread(); 
1642         p
->thread_id 
= __thread_selfid(); 
1644         if (os_unlikely(p
->thread_id 
== -1ull)) { 
1645                 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id"); 
1648         // <rdar://problem/40930651> pthread self and the errno address are the 
1649         // bare minimium TSD setup that dyld needs to actually function.  Without 
1650         // this, TSD access will fail and crash if it uses bits of Libc prior to 
1651         // library initialization. __pthread_init will finish the initialization 
1652         // during library init. 
1653         p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_SELF
] = p
; 
1654         p
->tsd
[_PTHREAD_TSD_SLOT_ERRNO
] = &p
->err_no
; 
1655         _thread_set_tsd_base(&p
->tsd
[0]); 
1657 #endif // VARIANT_DYLD 
1659 PTHREAD_ALWAYS_INLINE
 
1661 _pthread_set_self_internal(pthread_t p
, bool needs_tsd_base_set
) 
1663         p
->thread_id 
= __thread_selfid(); 
1665         if (os_unlikely(p
->thread_id 
== -1ull)) { 
1666                 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id"); 
1669         if (needs_tsd_base_set
) { 
1670                 _thread_set_tsd_base(&p
->tsd
[0]); 
1675 // <rdar://problem/28984807> pthread_once should have an acquire barrier 
1676 PTHREAD_ALWAYS_INLINE
 
1678 _os_once_acquire(os_once_t 
*predicate
, void *context
, os_function_t function
) 
1680         if (OS_EXPECT(os_atomic_load(predicate
, acquire
), ~0l) != ~0l) { 
1681                 _os_once(predicate
, context
, function
); 
1682                 OS_COMPILER_CAN_ASSUME(*predicate 
== ~0l); 
1686 struct _pthread_once_context 
{ 
1687         pthread_once_t 
*pthread_once
; 
1688         void (*routine
)(void); 
1692 __pthread_once_handler(void *context
) 
1694         struct _pthread_once_context 
*ctx 
= context
; 
1695         pthread_cleanup_push((void*)__os_once_reset
, &ctx
->pthread_once
->once
); 
1697         pthread_cleanup_pop(0); 
1698         ctx
->pthread_once
->sig 
= _PTHREAD_ONCE_SIG
; 
1701 PTHREAD_NOEXPORT_VARIANT
 
1703 pthread_once(pthread_once_t 
*once_control
, void (*init_routine
)(void)) 
1705         struct _pthread_once_context ctx 
= { once_control
, init_routine 
}; 
1707                 _os_once_acquire(&once_control
->once
, &ctx
, __pthread_once_handler
); 
1708         } while (once_control
->sig 
== _PTHREAD_ONCE_SIG_init
); 
1714 pthread_getconcurrency(void) 
1716         return pthread_concurrency
; 
1720 pthread_setconcurrency(int new_level
) 
1722         if (new_level 
< 0) { 
1725         pthread_concurrency 
= new_level
; 
1729 #if !defined(VARIANT_STATIC) 
1733         if (_pthread_malloc
) { 
1734                 return _pthread_malloc(sz
); 
1743         if (_pthread_free
) { 
1747 #endif // VARIANT_STATIC 
1750  * Perform package initialization - called automatically when application starts 
1752 struct ProgramVars
; /* forward reference */ 
1755 static unsigned long 
1756 _pthread_strtoul(const char *p
, const char **endptr
, int base
) 
1760         // Expect hex string starting with "0x" 
1761         if ((base 
== 16 || base 
== 0) && p 
&& p
[0] == '0' && p
[1] == 'x') { 
1765                         if ('0' <= c 
&& c 
<= '9') { 
1766                                 val 
= (val 
<< 4) + (c 
- '0'); 
1767                         } else if ('a' <= c 
&& c 
<= 'f') { 
1768                                 val 
= (val 
<< 4) + (c 
- 'a' + 10); 
1769                         } else if ('A' <= c 
&& c 
<= 'F') { 
1770                                 val 
= (val 
<< 4) + (c 
- 'A' + 10); 
1778         *endptr 
= (char *)p
; 
1783 parse_main_stack_params(const char *apple
[], 
1789         const char *p 
= _simple_getenv(apple
, "main_stack"); 
1795         *stackaddr 
= _pthread_strtoul(s
, &s
, 16); 
1796         if (*s 
!= ',') goto out
; 
1798         *stacksize 
= _pthread_strtoul(s 
+ 1, &s
, 16); 
1799         if (*s 
!= ',') goto out
; 
1801         *allocaddr 
= _pthread_strtoul(s 
+ 1, &s
, 16); 
1802         if (*s 
!= ',') goto out
; 
1804         *allocsize 
= _pthread_strtoul(s 
+ 1, &s
, 16); 
1805         if (*s 
!= ',' && *s 
!= 0) goto out
; 
1809         bzero((char *)p
, strlen(p
)); 
1814 parse_ptr_munge_params(const char *envp
[], const char *apple
[]) 
1817         p 
= _simple_getenv(apple
, "ptr_munge"); 
1819                 _pthread_ptr_munge_token 
= _pthread_strtoul(p
, &s
, 16); 
1820                 bzero((char *)p
, strlen(p
)); 
1823         if (_pthread_ptr_munge_token
) return; 
1825         p 
= _simple_getenv(envp
, "PTHREAD_PTR_MUNGE_TOKEN"); 
1827                 uintptr_t t 
= _pthread_strtoul(p
, &s
, 16); 
1828                 if (t
) _pthread_ptr_munge_token 
= t
; 
1833 __pthread_init(const struct _libpthread_functions 
*pthread_funcs
, 
1834                 const char *envp
[], const char *apple
[], 
1835                 const struct ProgramVars 
*vars __unused
) 
1837         // Save our provided pushed-down functions 
1838         if (pthread_funcs
) { 
1839                 exitf 
= pthread_funcs
->exit
; 
1841                 if (pthread_funcs
->version 
>= 2) { 
1842                         _pthread_malloc 
= pthread_funcs
->malloc
; 
1843                         _pthread_free 
= pthread_funcs
->free
; 
1848         // Get host information 
1852         host_flavor_t flavor 
= HOST_PRIORITY_INFO
; 
1853         mach_msg_type_number_t count 
= HOST_PRIORITY_INFO_COUNT
; 
1854         host_priority_info_data_t priority_info
; 
1855         host_t host 
= mach_host_self(); 
1856         kr 
= host_info(host
, flavor
, (host_info_t
)&priority_info
, &count
); 
1857         if (kr 
!= KERN_SUCCESS
) { 
1858                 PTHREAD_INTERNAL_CRASH(kr
, "host_info() failed"); 
1860                 default_priority 
= (uint8_t)priority_info
.user_priority
; 
1861                 min_priority 
= (uint8_t)priority_info
.minimum_priority
; 
1862                 max_priority 
= (uint8_t)priority_info
.maximum_priority
; 
1864         mach_port_deallocate(mach_task_self(), host
); 
1867         // Set up the main thread structure 
1870         // Get the address and size of the main thread's stack from the kernel. 
1871         void *stackaddr 
= 0; 
1872         size_t stacksize 
= 0; 
1873         void *allocaddr 
= 0; 
1874         size_t allocsize 
= 0; 
1875         if (!parse_main_stack_params(apple
, &stackaddr
, &stacksize
, &allocaddr
, &allocsize
) || 
1876                 stackaddr 
== NULL 
|| stacksize 
== 0) { 
1877                 // Fall back to previous bevhaior. 
1878                 size_t len 
= sizeof(stackaddr
); 
1879                 int mib
[] = { CTL_KERN
, KERN_USRSTACK 
}; 
1880                 if (__sysctl(mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0) { 
1881 #if defined(__LP64__) 
1882                         stackaddr 
= (void *)USRSTACK64
; 
1884                         stackaddr 
= (void *)USRSTACK
; 
1887                 stacksize 
= DFLSSIZ
; 
1892         // Initialize random ptr_munge token from the kernel. 
1893         parse_ptr_munge_params(envp
, apple
); 
1895         // libpthread.a in dyld "owns" the main thread structure itself and sets 
1896         // up the tsd to point to it. So take the pthread_self() from there 
1897         // and make it our main thread point. 
1898         pthread_t thread 
= (pthread_t
)_pthread_getspecific_direct( 
1899                         _PTHREAD_TSD_SLOT_PTHREAD_SELF
); 
1900         PTHREAD_ASSERT(thread
); 
1901         _main_thread_ptr 
= thread
; 
1903         PTHREAD_ASSERT(_pthread_attr_default
.qosclass 
== 
1904                         _pthread_default_priority(0)); 
1905         _pthread_struct_init(thread
, &_pthread_attr_default
, 
1906                         stackaddr
, stacksize
, allocaddr
, allocsize
); 
1907         thread
->tl_joinable 
= true; 
1909         // Finish initialization with common code that is reinvoked on the 
1910         // child side of a fork. 
1912         // Finishes initialization of main thread attributes. 
1913         // Initializes the thread list and add the main thread. 
1914         // Calls _pthread_set_self() to prepare the main thread for execution. 
1915         _pthread_main_thread_init(thread
); 
1917         struct _pthread_registration_data registration_data
; 
1918         // Set up kernel entry points with __bsdthread_register. 
1919         _pthread_bsdthread_init(®istration_data
); 
1921         // Have pthread_key and pthread_mutex do their init envvar checks. 
1922         _pthread_key_global_init(envp
); 
1923         _pthread_mutex_global_init(envp
, ®istration_data
); 
1925 #if PTHREAD_DEBUG_LOG 
1926         _SIMPLE_STRING path 
= _simple_salloc(); 
1927         _simple_sprintf(path
, "/var/tmp/libpthread.%d.log", getpid()); 
1928         _pthread_debuglog 
= open(_simple_string(path
), 
1929                         O_WRONLY 
| O_APPEND 
| O_CREAT 
| O_NOFOLLOW 
| O_CLOEXEC
, 0666); 
1930         _simple_sfree(path
); 
1931         _pthread_debugstart 
= mach_absolute_time(); 
1936 #endif // !VARIANT_DYLD 
1938 PTHREAD_NOEXPORT 
void 
1939 _pthread_main_thread_init(pthread_t p
) 
1941         TAILQ_INIT(&__pthread_head
); 
1942         _PTHREAD_LOCK_INIT(_pthread_list_lock
); 
1943         _PTHREAD_LOCK_INIT(p
->lock
); 
1944         _pthread_set_kernel_thread(p
, mach_thread_self()); 
1945         _pthread_set_reply_port(p
, mach_reply_port()); 
1946         p
->__cleanup_stack 
= NULL
; 
1947         p
->tl_join_ctx 
= NULL
; 
1948         p
->tl_exit_gate 
= MACH_PORT_NULL
; 
1949         p
->tsd
[__TSD_SEMAPHORE_CACHE
] = (void*)SEMAPHORE_NULL
; 
1950         p
->tsd
[__TSD_MACH_SPECIAL_REPLY
] = 0; 
1951         p
->cancel_state 
|= _PTHREAD_CANCEL_INITIALIZED
; 
1953         // Initialize the list of threads with the new main thread. 
1954         TAILQ_INSERT_HEAD(&__pthread_head
, p
, tl_plist
); 
1957         _pthread_introspection_thread_start(p
); 
1975 pthread_yield_np(void) 
1982 // Libsystem knows about this symbol and exports it to libsyscall 
1983 PTHREAD_NOEXPORT_VARIANT
 
1985 _pthread_clear_qos_tsd(mach_port_t thread_port
) 
1987         if (thread_port 
== MACH_PORT_NULL 
|| (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF
) == thread_port
) { 
1988                 /* Clear the current thread's TSD, that can be done inline. */ 
1989                 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, 
1990                                 _pthread_unspecified_priority()); 
1994                 _PTHREAD_LOCK(_pthread_list_lock
); 
1996                 TAILQ_FOREACH(p
, &__pthread_head
, tl_plist
) { 
1997                         mach_port_t kp 
= _pthread_kernel_thread(p
); 
1998                         if (thread_port 
== kp
) { 
1999                                 p
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = 
2000                                                 _pthread_unspecified_priority(); 
2005                 _PTHREAD_UNLOCK(_pthread_list_lock
); 
2010 #pragma mark pthread/stack_np.h public interface 
2013 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__) 
2014 typedef uintptr_t frame_data_addr_t
; 
2017         frame_data_addr_t frame_addr_next
; 
2018         frame_data_addr_t ret_addr
; 
2021 #error ********** Unimplemented architecture 
2025 pthread_stack_frame_decode_np(uintptr_t frame_addr
, uintptr_t *return_addr
) 
2027         struct frame_data 
*frame 
= (struct frame_data 
*)frame_addr
; 
2030                 *return_addr 
= (uintptr_t)frame
->ret_addr
; 
2033         return (uintptr_t)frame
->frame_addr_next
; 
2037 #pragma mark pthread workqueue support routines 
2040 PTHREAD_NOEXPORT 
void 
2041 _pthread_bsdthread_init(struct _pthread_registration_data 
*data
) 
2043         bzero(data
, sizeof(*data
)); 
2044         data
->version 
= sizeof(struct _pthread_registration_data
); 
2045         data
->dispatch_queue_offset 
= __PTK_LIBDISPATCH_KEY0 
* sizeof(void *); 
2046         data
->return_to_kernel_offset 
= __TSD_RETURN_TO_KERNEL 
* sizeof(void *); 
2047         data
->tsd_offset 
= offsetof(struct _pthread
, tsd
); 
2048         data
->mach_thread_self_offset 
= __TSD_MACH_THREAD_SELF 
* sizeof(void *); 
2050         int rv 
= __bsdthread_register(thread_start
, start_wqthread
, (int)PTHREAD_SIZE
, 
2051                         (void*)data
, (uintptr_t)sizeof(*data
), data
->dispatch_queue_offset
); 
2054                 int required_features 
= 
2055                                 PTHREAD_FEATURE_FINEPRIO 
| 
2056                                 PTHREAD_FEATURE_BSDTHREADCTL 
| 
2057                                 PTHREAD_FEATURE_SETSELF 
| 
2058                                 PTHREAD_FEATURE_QOS_MAINTENANCE 
| 
2059                                 PTHREAD_FEATURE_QOS_DEFAULT
; 
2060                 if ((rv 
& required_features
) != required_features
) { 
2061                         PTHREAD_INTERNAL_CRASH(rv
, "Missing required kernel support"); 
2063                 __pthread_supported_features 
= rv
; 
2067          * TODO: differentiate between (-1, EINVAL) after fork (which has the side 
2068          * effect of resetting the child's stack_addr_hint before bailing out) and 
2069          * (-1, EINVAL) because of invalid arguments.  We'd probably like to treat 
2070          * the latter as fatal. 
2072          * <rdar://problem/36451838> 
2075         pthread_priority_t main_qos 
= (pthread_priority_t
)data
->main_qos
; 
2077         if (_pthread_priority_thread_qos(main_qos
) != THREAD_QOS_UNSPECIFIED
) { 
2078                 _pthread_set_main_qos(main_qos
); 
2079                 main_thread()->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = main_qos
; 
2082         if (data
->stack_addr_hint
) { 
2083                 __pthread_stack_hint 
= data
->stack_addr_hint
; 
2086         if (__libdispatch_workerfunction 
!= NULL
) { 
2087                 // prepare the kernel for workq action 
2088                 (void)__workq_open(); 
2094 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp
) 
2096         /* Old thread priorities are inverted from where we have them in 
2097          * the new flexible priority scheme. The highest priority is zero, 
2098          * up to 2, with background at 3. 
2100         pthread_workqueue_function_t func 
= (pthread_workqueue_function_t
)__libdispatch_workerfunction
; 
2101         bool overcommit 
= (pp 
& _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
); 
2102         int opts 
= overcommit 
? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 
: 0; 
2104         switch (_pthread_priority_thread_qos(pp
)) { 
2105         case THREAD_QOS_USER_INITIATED
: 
2106                 return (*func
)(WORKQ_HIGH_PRIOQUEUE
, opts
, NULL
); 
2107         case THREAD_QOS_LEGACY
: 
2108                 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being 
2109                  * picked up by NSThread (et al) and transported around the system. So change the TSD to 
2110                  * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy. 
2112                 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
, 
2113                                 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED
, 0, 0)); 
2114                 return (*func
)(WORKQ_DEFAULT_PRIOQUEUE
, opts
, NULL
); 
2115         case THREAD_QOS_UTILITY
: 
2116                 return (*func
)(WORKQ_LOW_PRIOQUEUE
, opts
, NULL
); 
2117         case THREAD_QOS_BACKGROUND
: 
2118                 return (*func
)(WORKQ_BG_PRIOQUEUE
, opts
, NULL
); 
2120         PTHREAD_INTERNAL_CRASH(pp
, "Invalid pthread priority for the legacy interface"); 
2123 PTHREAD_ALWAYS_INLINE
 
2124 static inline pthread_priority_t
 
2125 _pthread_wqthread_priority(int flags
) 
2127         pthread_priority_t pp 
= 0; 
2130         if (flags 
& WQ_FLAG_THREAD_KEVENT
) { 
2131                 pp 
|= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG
; 
2133         if (flags 
& WQ_FLAG_THREAD_EVENT_MANAGER
) { 
2134                 return pp 
| _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG
; 
2137         if (flags 
& WQ_FLAG_THREAD_OVERCOMMIT
) { 
2138                 pp 
|= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
; 
2140         if (flags 
& WQ_FLAG_THREAD_PRIO_QOS
) { 
2141                 qos 
= (thread_qos_t
)(flags 
& WQ_FLAG_THREAD_PRIO_MASK
); 
2142                 pp 
= _pthread_priority_make_from_thread_qos(qos
, 0, pp
); 
2143         } else if (flags 
& WQ_FLAG_THREAD_PRIO_SCHED
) { 
2144                 pp 
|= _PTHREAD_PRIORITY_SCHED_PRI_MASK
; 
2145                 pp 
|= (flags 
& WQ_FLAG_THREAD_PRIO_MASK
); 
2147                 PTHREAD_INTERNAL_CRASH(flags
, "Missing priority"); 
2154 _pthread_wqthread_setup(pthread_t self
, mach_port_t kport
, void *stacklowaddr
, 
2157         void *stackaddr 
= self
; 
2158         size_t stacksize 
= (uintptr_t)self 
- (uintptr_t)stacklowaddr
; 
2160         _pthread_struct_init(self
, &_pthread_attr_default
, stackaddr
, stacksize
, 
2161                         PTHREAD_ALLOCADDR(stackaddr
, stacksize
), 
2162                         PTHREAD_ALLOCSIZE(stackaddr
, stacksize
)); 
2164         _pthread_set_kernel_thread(self
, kport
); 
2166         self
->wqkillset 
= 0; 
2167         self
->tl_joinable 
= false; 
2168         self
->cancel_state 
|= _PTHREAD_CANCEL_INITIALIZED
; 
2170         // Update the running thread count and set childrun bit. 
2171         bool thread_tsd_base_set 
= (bool)(flags 
& WQ_FLAG_THREAD_TSD_BASE_SET
); 
2172         _pthread_set_self_internal(self
, !thread_tsd_base_set
); 
2173         __pthread_add_thread(self
, false); 
2174         __pthread_started_thread(self
); 
2177 PTHREAD_NORETURN PTHREAD_NOINLINE
 
2179 _pthread_wqthread_exit(pthread_t self
) 
2181         pthread_priority_t pp
; 
2184         pp 
= (pthread_priority_t
)self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
]; 
2185         qos 
= _pthread_priority_thread_qos(pp
); 
2186         if (qos 
== THREAD_QOS_UNSPECIFIED 
|| qos 
> WORKQ_THREAD_QOS_CLEANUP
) { 
2187                 // Reset QoS to something low for the cleanup process 
2188                 pp 
= _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP
, 0, 0); 
2189                 self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
; 
2192         _pthread_exit(self
, NULL
); 
2195 // workqueue entry point from kernel 
2197 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void *stacklowaddr
, 
2198                 void *keventlist
, int flags
, int nkevents
) 
2200         if ((flags 
& WQ_FLAG_THREAD_REUSE
) == 0) { 
2201                 _pthread_wqthread_setup(self
, kport
, stacklowaddr
, flags
); 
2204         pthread_priority_t pp
; 
2205         if (flags 
& WQ_FLAG_THREAD_OUTSIDEQOS
) { 
2206                 self
->wqoutsideqos 
= 1; 
2207                 pp 
= _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY
, 0, 
2208                                 _PTHREAD_PRIORITY_FALLBACK_FLAG
); 
2210                 self
->wqoutsideqos 
= 0; 
2211                 pp 
= _pthread_wqthread_priority(flags
); 
2214         self
->tsd
[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS
] = (void *)pp
; 
2216         // avoid spills on the stack hard to keep used stack space minimal 
2217         if (nkevents 
== WORKQ_EXIT_THREAD_NKEVENT
) { 
2219         } else if (flags 
& WQ_FLAG_THREAD_WORKLOOP
) { 
2220                 self
->fun 
= (void *(*)(void*))__libdispatch_workloopfunction
; 
2221                 self
->wq_retop 
= WQOPS_THREAD_WORKLOOP_RETURN
; 
2222                 self
->wq_kqid_ptr 
= ((kqueue_id_t 
*)keventlist 
- 1); 
2223                 self
->arg 
= keventlist
; 
2224                 self
->wq_nevents 
= nkevents
; 
2225         } else if (flags 
& WQ_FLAG_THREAD_KEVENT
) { 
2226                 self
->fun 
= (void *(*)(void*))__libdispatch_keventfunction
; 
2227                 self
->wq_retop 
= WQOPS_THREAD_KEVENT_RETURN
; 
2228                 self
->wq_kqid_ptr 
= NULL
; 
2229                 self
->arg 
= keventlist
; 
2230                 self
->wq_nevents 
= nkevents
; 
2232                 self
->fun 
= (void *(*)(void*))__libdispatch_workerfunction
; 
2233                 self
->wq_retop 
= WQOPS_THREAD_RETURN
; 
2234                 self
->wq_kqid_ptr 
= NULL
; 
2235                 self
->arg 
= (void *)(uintptr_t)pp
; 
2236                 self
->wq_nevents 
= 0; 
2237                 if (os_likely(__workq_newapi
)) { 
2238                         (*__libdispatch_workerfunction
)(pp
); 
2240                         _pthread_wqthread_legacy_worker_wrap(pp
); 
2246 kevent_errors_retry
: 
2247                 if (self
->wq_retop 
== WQOPS_THREAD_WORKLOOP_RETURN
) { 
2248                         ((pthread_workqueue_function_workloop_t
)self
->fun
) 
2249                                         (self
->wq_kqid_ptr
, &self
->arg
, &self
->wq_nevents
); 
2251                         ((pthread_workqueue_function_kevent_t
)self
->fun
) 
2252                                         (&self
->arg
, &self
->wq_nevents
); 
2254                 int rc 
= __workq_kernreturn(self
->wq_retop
, self
->arg
, self
->wq_nevents
, 0); 
2255                 if (os_unlikely(rc 
> 0)) { 
2256                         self
->wq_nevents 
= rc
; 
2257                         goto kevent_errors_retry
; 
2259                 if (os_unlikely(rc 
< 0)) { 
2260                         PTHREAD_INTERNAL_CRASH(self
->err_no
, "kevent (workloop) failed"); 
2264                 __workq_kernreturn(self
->wq_retop
, NULL
, 0, 0); 
2268         _pthread_wqthread_exit(self
); 
2272 #pragma mark pthread workqueue API for libdispatch 
2275 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN 
== WQ_KEVENT_LIST_LEN
, 
2276                 "Kernel and userland should agree on the event list size"); 
2279 pthread_workqueue_setdispatchoffset_np(int offset
) 
2281         __libdispatch_offset 
= offset
; 
2285 pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func
, 
2286                 pthread_workqueue_function_kevent_t kevent_func
, 
2287                 pthread_workqueue_function_workloop_t workloop_func
) 
2290         if (__libdispatch_workerfunction 
== NULL
) { 
2291                 // Check whether the kernel supports new SPIs 
2292                 res 
= __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, __libdispatch_offset
, kevent_func 
!= NULL 
? 0x01 : 0x00); 
2296                         __libdispatch_workerfunction 
= queue_func
; 
2297                         __libdispatch_keventfunction 
= kevent_func
; 
2298                         __libdispatch_workloopfunction 
= workloop_func
; 
2300                         // Prepare the kernel for workq action 
2301                         (void)__workq_open(); 
2302                         if (__is_threaded 
== 0) { 
2311 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func
, 
2312                 pthread_workqueue_function_kevent_t kevent_func
, 
2313                 pthread_workqueue_function_workloop_t workloop_func
, 
2314                 int offset
, int flags
) 
2320         __workq_newapi 
= true; 
2321         __libdispatch_offset 
= offset
; 
2323         int rv 
= pthread_workqueue_setdispatch_with_workloop_np(queue_func
, kevent_func
, workloop_func
); 
2328 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func
, 
2329                 pthread_workqueue_function_kevent_t kevent_func
, 
2330                 int offset
, int flags
) 
2332         return _pthread_workqueue_init_with_workloop(queue_func
, kevent_func
, NULL
, offset
, flags
); 
2336 _pthread_workqueue_init(pthread_workqueue_function2_t func
, int offset
, int flags
) 
2338         return _pthread_workqueue_init_with_kevent(func
, NULL
, offset
, flags
); 
2342 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func
) 
2344         return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t
)worker_func
, NULL
, NULL
); 
2348 _pthread_workqueue_supported(void) 
2350         if (os_unlikely(!__pthread_supported_features
)) { 
2351                 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized"); 
2354         return __pthread_supported_features
; 
2358 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
) 
2362         // Cannot add threads without a worker function registered. 
2363         if (__libdispatch_workerfunction 
== NULL
) { 
2367         pthread_priority_t kp 
= 0; 
2368         int compat_priority 
= queue_priority 
& WQ_FLAG_THREAD_PRIO_MASK
; 
2371         if (options 
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) { 
2372                 flags 
= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG
; 
2375 #pragma clang diagnostic push 
2376 #pragma clang diagnostic ignored "-Wdeprecated-declarations" 
2377         kp 
= _pthread_qos_class_encode_workqueue(compat_priority
, flags
); 
2378 #pragma clang diagnostic pop 
2380         res 
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)kp
); 
2388 _pthread_workqueue_should_narrow(pthread_priority_t pri
) 
2390         int res 
= __workq_kernreturn(WQOPS_SHOULD_NARROW
, NULL
, (int)pri
, 0); 
2398 _pthread_workqueue_addthreads(int numthreads
, pthread_priority_t priority
) 
2402         if (__libdispatch_workerfunction 
== NULL
) { 
2407         // <rdar://problem/37687655> Legacy simulators fail to boot 
2409         // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly, 
2410         // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU 
2411         // validates and rejects. 
2413         // As a workaround, forcefully unset this bit that cannot be set here 
2415         priority 
&= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG
; 
2418         res 
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, (int)priority
); 
2426 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority
) 
2428         int res 
= __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY
, NULL
, (int)priority
, 0); 
2436 _pthread_workloop_create(uint64_t workloop_id
, uint64_t options
, pthread_attr_t 
*attr
) 
2438         struct kqueue_workloop_params params 
= { 
2439                 .kqwlp_version 
= sizeof(struct kqueue_workloop_params
), 
2440                 .kqwlp_id 
= workloop_id
, 
2448         if (attr
->schedset
) { 
2449                 params
.kqwlp_flags 
|= KQ_WORKLOOP_CREATE_SCHED_PRI
; 
2450                 params
.kqwlp_sched_pri 
= attr
->param
.sched_priority
; 
2453         if (attr
->policyset
) { 
2454                 params
.kqwlp_flags 
|= KQ_WORKLOOP_CREATE_SCHED_POL
; 
2455                 params
.kqwlp_sched_pol 
= attr
->policy
; 
2458         if (attr
->cpupercentset
) { 
2459                 params
.kqwlp_flags 
|= KQ_WORKLOOP_CREATE_CPU_PERCENT
; 
2460                 params
.kqwlp_cpu_percent 
= attr
->cpupercent
; 
2461                 params
.kqwlp_cpu_refillms 
= attr
->refillms
; 
2464         int res 
= __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE
, 0, ¶ms
, 
2473 _pthread_workloop_destroy(uint64_t workloop_id
) 
2475         struct kqueue_workloop_params params 
= { 
2476                 .kqwlp_version 
= sizeof(struct kqueue_workloop_params
), 
2477                 .kqwlp_id 
= workloop_id
, 
2480         int res 
= __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY
, 0, ¶ms
, 
2489 #pragma mark Introspection SPI for libpthread. 
2492 static pthread_introspection_hook_t _pthread_introspection_hook
; 
2494 pthread_introspection_hook_t
 
2495 pthread_introspection_hook_install(pthread_introspection_hook_t hook
) 
2497         pthread_introspection_hook_t prev
; 
2498         prev 
= _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook
, hook
); 
2504 _pthread_introspection_hook_callout_thread_create(pthread_t t
) 
2506         _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE
, t
, t
, 
2511 _pthread_introspection_thread_create(pthread_t t
) 
2513         if (os_fastpath(!_pthread_introspection_hook
)) return; 
2514         _pthread_introspection_hook_callout_thread_create(t
); 
2519 _pthread_introspection_hook_callout_thread_start(pthread_t t
) 
2523         if (t 
== main_thread()) { 
2524                 size_t stacksize 
= t
->stackaddr 
- t
->stackbottom
; 
2525                 freesize 
= stacksize 
+ t
->guardsize
; 
2526                 freeaddr 
= t
->stackaddr 
- freesize
; 
2528                 freesize 
= t
->freesize 
- PTHREAD_SIZE
; 
2529                 freeaddr 
= t
->freeaddr
; 
2531         _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START
, t
, 
2532                         freeaddr
, freesize
); 
2536 _pthread_introspection_thread_start(pthread_t t
) 
2538         if (os_fastpath(!_pthread_introspection_hook
)) return; 
2539         _pthread_introspection_hook_callout_thread_start(t
); 
2544 _pthread_introspection_hook_callout_thread_terminate(pthread_t t
) 
2548         if (t 
== main_thread()) { 
2549                 size_t stacksize 
= t
->stackaddr 
- t
->stackbottom
; 
2550                 freesize 
= stacksize 
+ t
->guardsize
; 
2551                 freeaddr 
= t
->stackaddr 
- freesize
; 
2553                 freesize 
= t
->freesize 
- PTHREAD_SIZE
; 
2554                 freeaddr 
= t
->freeaddr
; 
2556         _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE
, t
, 
2557                         freeaddr
, freesize
); 
2561 _pthread_introspection_thread_terminate(pthread_t t
) 
2563         if (os_fastpath(!_pthread_introspection_hook
)) return; 
2564         _pthread_introspection_hook_callout_thread_terminate(t
); 
2569 _pthread_introspection_hook_callout_thread_destroy(pthread_t t
) 
2571         _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY
, t
, t
, 
2576 _pthread_introspection_thread_destroy(pthread_t t
) 
2578         if (os_fastpath(!_pthread_introspection_hook
)) return; 
2579         _pthread_introspection_hook_callout_thread_destroy(t
); 
2582 #pragma mark libplatform shims 
2584 #include <platform/string.h> 
2586 // pthread_setup initializes large structures to 0, 
2587 // which the compiler turns into a library call to memset. 
2589 // To avoid linking against Libc, provide a simple wrapper 
2590 // that calls through to the libplatform primitives 
2595 memset(void *b
, int c
, size_t len
) 
2597         return _platform_memset(b
, c
, len
); 
2603 bzero(void *s
, size_t n
) 
2605         _platform_bzero(s
, n
); 
2611 memcpy(void* a
, const void* b
, unsigned long s
) 
2613         return _platform_memmove(a
, b
, s
);